HADOOP-11393. Revert HADOOP_PREFIX, go back to HADOOP_HOME (aw)
This commit is contained in:
parent
0064cba169
commit
0a74610d1c
@ -47,8 +47,8 @@ function hadoop_usage
|
|||||||
# This script runs the hadoop core commands.
|
# This script runs the hadoop core commands.
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
@ -84,9 +84,9 @@ case ${COMMAND} in
|
|||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
||||||
elif [[ -f "${HADOOP_PREFIX}/bin/hdfs" ]]; then
|
elif [[ -f "${HADOOP_HOME}/bin/hdfs" ]]; then
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
exec "${HADOOP_PREFIX}/bin/hdfs" \
|
exec "${HADOOP_HOME}/bin/hdfs" \
|
||||||
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
||||||
else
|
else
|
||||||
hadoop_error "HADOOP_HDFS_HOME not found!"
|
hadoop_error "HADOOP_HDFS_HOME not found!"
|
||||||
@ -104,8 +104,8 @@ case ${COMMAND} in
|
|||||||
if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
|
if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
|
||||||
exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
|
exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
|
||||||
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
||||||
elif [[ -f "${HADOOP_PREFIX}/bin/mapred" ]]; then
|
elif [[ -f "${HADOOP_HOME}/bin/mapred" ]]; then
|
||||||
exec "${HADOOP_PREFIX}/bin/mapred" \
|
exec "${HADOOP_HOME}/bin/mapred" \
|
||||||
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
--config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
|
||||||
else
|
else
|
||||||
hadoop_error "HADOOP_MAPRED_HOME not found!"
|
hadoop_error "HADOOP_MAPRED_HOME not found!"
|
||||||
|
@ -63,6 +63,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
|
||||||
|
|
||||||
# allow overrides of the above and pre-defines of the below
|
# allow overrides of the above and pre-defines of the below
|
||||||
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
||||||
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
|
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
|
||||||
@ -128,8 +130,8 @@ fi
|
|||||||
hadoop_shellprofiles_init
|
hadoop_shellprofiles_init
|
||||||
|
|
||||||
# get the native libs in there pretty quick
|
# get the native libs in there pretty quick
|
||||||
hadoop_add_javalibpath "${HADOOP_PREFIX}/build/native"
|
hadoop_add_javalibpath "${HADOOP_HOME}/build/native"
|
||||||
hadoop_add_javalibpath "${HADOOP_PREFIX}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
|
hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
|
||||||
|
|
||||||
hadoop_shellprofiles_nativelib
|
hadoop_shellprofiles_nativelib
|
||||||
|
|
||||||
|
@ -21,8 +21,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
@ -47,7 +47,7 @@ daemonmode=$1
|
|||||||
shift
|
shift
|
||||||
|
|
||||||
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
|
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
|
||||||
hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
|
hdfsscript="${HADOOP_HOME}/bin/hdfs"
|
||||||
else
|
else
|
||||||
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
|
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
|
||||||
fi
|
fi
|
||||||
|
@ -27,8 +27,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
@ -51,7 +51,7 @@ daemonmode=$1
|
|||||||
shift
|
shift
|
||||||
|
|
||||||
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
|
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
|
||||||
hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
|
hdfsscript="${HADOOP_HOME}/bin/hdfs"
|
||||||
else
|
else
|
||||||
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
|
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
|
||||||
fi
|
fi
|
||||||
|
@ -278,7 +278,7 @@ function hadoop_bootstrap
|
|||||||
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
|
# By now, HADOOP_LIBEXEC_DIR should have been defined upstream
|
||||||
# We can piggyback off of that to figure out where the default
|
# We can piggyback off of that to figure out where the default
|
||||||
# HADOOP_FREFIX should be. This allows us to run without
|
# HADOOP_FREFIX should be. This allows us to run without
|
||||||
# HADOOP_PREFIX ever being defined by a human! As a consequence
|
# HADOOP_HOME ever being defined by a human! As a consequence
|
||||||
# HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
|
# HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
|
||||||
# env var within Hadoop.
|
# env var within Hadoop.
|
||||||
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
|
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
|
||||||
@ -286,8 +286,8 @@ function hadoop_bootstrap
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
|
HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
|
||||||
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
|
HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX}
|
||||||
export HADOOP_PREFIX
|
export HADOOP_HOME
|
||||||
|
|
||||||
#
|
#
|
||||||
# short-cuts. vendors may redefine these as well, preferably
|
# short-cuts. vendors may redefine these as well, preferably
|
||||||
@ -302,7 +302,7 @@ function hadoop_bootstrap
|
|||||||
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
|
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
|
||||||
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
|
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
|
||||||
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
|
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
|
||||||
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_PREFIX}}
|
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}}
|
||||||
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
||||||
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
||||||
|
|
||||||
@ -326,12 +326,12 @@ function hadoop_find_confdir
|
|||||||
|
|
||||||
# An attempt at compatibility with some Hadoop 1.x
|
# An attempt at compatibility with some Hadoop 1.x
|
||||||
# installs.
|
# installs.
|
||||||
if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
|
if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then
|
||||||
conf_dir="conf"
|
conf_dir="conf"
|
||||||
else
|
else
|
||||||
conf_dir="etc/hadoop"
|
conf_dir="etc/hadoop"
|
||||||
fi
|
fi
|
||||||
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
|
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}"
|
||||||
|
|
||||||
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
|
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
|
||||||
}
|
}
|
||||||
@ -524,8 +524,8 @@ function hadoop_basic_init
|
|||||||
hadoop_debug "Initialize CLASSPATH"
|
hadoop_debug "Initialize CLASSPATH"
|
||||||
|
|
||||||
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
|
[[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then
|
||||||
export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
|
export HADOOP_COMMON_HOME="${HADOOP_HOME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# default policy file for service-level authorization
|
# default policy file for service-level authorization
|
||||||
@ -533,20 +533,20 @@ function hadoop_basic_init
|
|||||||
|
|
||||||
# define HADOOP_HDFS_HOME
|
# define HADOOP_HDFS_HOME
|
||||||
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
|
[[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then
|
||||||
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
|
export HADOOP_HDFS_HOME="${HADOOP_HOME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# define HADOOP_YARN_HOME
|
# define HADOOP_YARN_HOME
|
||||||
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
|
[[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then
|
||||||
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
|
export HADOOP_YARN_HOME="${HADOOP_HOME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# define HADOOP_MAPRED_HOME
|
# define HADOOP_MAPRED_HOME
|
||||||
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
|
||||||
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
|
[[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then
|
||||||
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
|
export HADOOP_MAPRED_HOME="${HADOOP_HOME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
|
if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then
|
||||||
@ -573,7 +573,7 @@ function hadoop_basic_init
|
|||||||
# let's define it as 'hadoop'
|
# let's define it as 'hadoop'
|
||||||
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
||||||
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
|
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
|
||||||
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
|
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
|
||||||
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
||||||
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
|
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
|
||||||
HADOOP_NICENESS=${HADOOP_NICENESS:-0}
|
HADOOP_NICENESS=${HADOOP_NICENESS:-0}
|
||||||
@ -1219,7 +1219,6 @@ function hadoop_finalize_hadoop_opts
|
|||||||
hadoop_translate_cygwin_path HADOOP_LOG_DIR
|
hadoop_translate_cygwin_path HADOOP_LOG_DIR
|
||||||
hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
|
hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
|
||||||
hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
|
hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
|
||||||
HADOOP_HOME=${HADOOP_PREFIX}
|
|
||||||
hadoop_translate_cygwin_path HADOOP_HOME
|
hadoop_translate_cygwin_path HADOOP_HOME
|
||||||
export HADOOP_HOME
|
export HADOOP_HOME
|
||||||
hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
|
hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
|
||||||
@ -1252,11 +1251,11 @@ function hadoop_finalize_catalina_opts
|
|||||||
|
|
||||||
local prefix=${HADOOP_CATALINA_PREFIX}
|
local prefix=${HADOOP_CATALINA_PREFIX}
|
||||||
|
|
||||||
hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
|
hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
|
||||||
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
|
if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
|
||||||
hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
|
hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
|
||||||
fi
|
fi
|
||||||
hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_PREFIX}"
|
hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_HOME}"
|
||||||
hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
|
hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
|
||||||
hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
|
hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
|
||||||
hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
|
hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
|
||||||
@ -1282,7 +1281,7 @@ function hadoop_finalize
|
|||||||
hadoop_finalize_hadoop_heap
|
hadoop_finalize_hadoop_heap
|
||||||
hadoop_finalize_hadoop_opts
|
hadoop_finalize_hadoop_opts
|
||||||
|
|
||||||
hadoop_translate_cygwin_path HADOOP_PREFIX
|
hadoop_translate_cygwin_path HADOOP_HOME
|
||||||
hadoop_translate_cygwin_path HADOOP_CONF_DIR
|
hadoop_translate_cygwin_path HADOOP_CONF_DIR
|
||||||
hadoop_translate_cygwin_path HADOOP_COMMON_HOME
|
hadoop_translate_cygwin_path HADOOP_COMMON_HOME
|
||||||
hadoop_translate_cygwin_path HADOOP_HDFS_HOME
|
hadoop_translate_cygwin_path HADOOP_HDFS_HOME
|
||||||
|
@ -26,8 +26,8 @@
|
|||||||
##
|
##
|
||||||
## If you move HADOOP_LIBEXEC_DIR from some location that
|
## If you move HADOOP_LIBEXEC_DIR from some location that
|
||||||
## isn't bin/../libexec, you MUST define either HADOOP_LIBEXEC_DIR
|
## isn't bin/../libexec, you MUST define either HADOOP_LIBEXEC_DIR
|
||||||
## or have HADOOP_PREFIX/libexec/hadoop-config.sh and
|
## or have HADOOP_HOME/libexec/hadoop-config.sh and
|
||||||
## HADOOP_PREFIX/libexec/hadoop-layout.sh (this file) exist.
|
## HADOOP_HOME/libexec/hadoop-layout.sh (this file) exist.
|
||||||
|
|
||||||
## NOTE:
|
## NOTE:
|
||||||
##
|
##
|
||||||
@ -44,7 +44,7 @@
|
|||||||
####
|
####
|
||||||
|
|
||||||
# Default location for the common/core Hadoop project
|
# Default location for the common/core Hadoop project
|
||||||
# export HADOOP_COMMON_HOME=${HADOOP_PREFIX}
|
# export HADOOP_COMMON_HOME=${HADOOP_HOME}
|
||||||
|
|
||||||
# Relative locations where components under HADOOP_COMMON_HOME are located
|
# Relative locations where components under HADOOP_COMMON_HOME are located
|
||||||
# export HADOOP_COMMON_DIR="share/hadoop/common"
|
# export HADOOP_COMMON_DIR="share/hadoop/common"
|
||||||
@ -56,7 +56,7 @@
|
|||||||
####
|
####
|
||||||
|
|
||||||
# Default location for the HDFS subproject
|
# Default location for the HDFS subproject
|
||||||
# export HADOOP_HDFS_HOME=${HADOOP_PREFIX}
|
# export HADOOP_HDFS_HOME=${HADOOP_HOME}
|
||||||
|
|
||||||
# Relative locations where components under HADOOP_HDFS_HOME are located
|
# Relative locations where components under HADOOP_HDFS_HOME are located
|
||||||
# export HDFS_DIR="share/hadoop/hdfs"
|
# export HDFS_DIR="share/hadoop/hdfs"
|
||||||
@ -67,7 +67,7 @@
|
|||||||
####
|
####
|
||||||
|
|
||||||
# Default location for the YARN subproject
|
# Default location for the YARN subproject
|
||||||
# export HADOOP_YARN_HOME=${HADOOP_PREFIX}
|
# export HADOOP_YARN_HOME=${HADOOP_HOME}
|
||||||
|
|
||||||
# Relative locations where components under HADOOP_YARN_HOME are located
|
# Relative locations where components under HADOOP_YARN_HOME are located
|
||||||
# export YARN_DIR="share/hadoop/yarn"
|
# export YARN_DIR="share/hadoop/yarn"
|
||||||
@ -78,7 +78,7 @@
|
|||||||
####
|
####
|
||||||
|
|
||||||
# Default location for the MapReduce subproject
|
# Default location for the MapReduce subproject
|
||||||
# export HADOOP_MAPRED_HOME=${HADOOP_PREFIX}
|
# export HADOOP_MAPRED_HOME=${HADOOP_HOME}
|
||||||
|
|
||||||
# Relative locations where components under HADOOP_MAPRED_HOME are located
|
# Relative locations where components under HADOOP_MAPRED_HOME are located
|
||||||
# export MAPRED_DIR="share/hadoop/mapreduce"
|
# export MAPRED_DIR="share/hadoop/mapreduce"
|
||||||
@ -92,6 +92,6 @@
|
|||||||
# note that this path only gets added for certain commands and not
|
# note that this path only gets added for certain commands and not
|
||||||
# part of the general classpath unless HADOOP_OPTIONAL_TOOLS is used
|
# part of the general classpath unless HADOOP_OPTIONAL_TOOLS is used
|
||||||
# to configure them in
|
# to configure them in
|
||||||
# export HADOOP_TOOLS_HOME=${HADOOP_PREFIX}
|
# export HADOOP_TOOLS_HOME=${HADOOP_HOME}
|
||||||
# export HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
# export HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
||||||
# export HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
# export HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
#
|
#
|
||||||
# HADOOP_SLAVES File naming remote hosts.
|
# HADOOP_SLAVES File naming remote hosts.
|
||||||
# Default is ${HADOOP_CONF_DIR}/slaves.
|
# Default is ${HADOOP_CONF_DIR}/slaves.
|
||||||
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
|
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
|
||||||
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
|
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
|
||||||
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
|
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
|
||||||
##
|
##
|
||||||
@ -33,8 +33,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -21,8 +21,8 @@ exit 1
|
|||||||
|
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -22,8 +22,8 @@ echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
|
|||||||
exit 1
|
exit 1
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -55,14 +55,14 @@
|
|||||||
|
|
||||||
# Location of Hadoop. By default, Hadoop will attempt to determine
|
# Location of Hadoop. By default, Hadoop will attempt to determine
|
||||||
# this location based upon its execution path.
|
# this location based upon its execution path.
|
||||||
# export HADOOP_PREFIX=
|
# export HADOOP_HOME=
|
||||||
|
|
||||||
# Location of Hadoop's configuration information. i.e., where this
|
# Location of Hadoop's configuration information. i.e., where this
|
||||||
# file is probably living. Many sites will also set this in the
|
# file is probably living. Many sites will also set this in the
|
||||||
# same location where JAVA_HOME is defined. If this is not defined
|
# same location where JAVA_HOME is defined. If this is not defined
|
||||||
# Hadoop will attempt to locate it based upon its execution
|
# Hadoop will attempt to locate it based upon its execution
|
||||||
# path.
|
# path.
|
||||||
# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
|
# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
|
||||||
|
|
||||||
# The maximum amount of heap to use (Java -Xmx). If no unit
|
# The maximum amount of heap to use (Java -Xmx). If no unit
|
||||||
# is provided, it will be converted to MB. Daemons will
|
# is provided, it will be converted to MB. Daemons will
|
||||||
@ -186,10 +186,10 @@ esac
|
|||||||
# non-secure)
|
# non-secure)
|
||||||
#
|
#
|
||||||
|
|
||||||
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
|
# Where (primarily) daemon log files are stored.
|
||||||
# by default.
|
# ${HADOOP_HOME}/logs by default.
|
||||||
# Java property: hadoop.log.dir
|
# Java property: hadoop.log.dir
|
||||||
# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
|
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
|
||||||
|
|
||||||
# A string representing this instance of hadoop. $USER by default.
|
# A string representing this instance of hadoop. $USER by default.
|
||||||
# This is used in writing log and pid files, so keep that in mind!
|
# This is used in writing log and pid files, so keep that in mind!
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class TraceUtils {
|
public class TraceUtils {
|
||||||
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
||||||
static final String DEFAULT_HADOOP_PREFIX = "hadoop.htrace.";
|
static final String DEFAULT_HADOOP_TRACE_PREFIX = "hadoop.htrace.";
|
||||||
|
|
||||||
public static HTraceConfiguration wrapHadoopConf(final String prefix,
|
public static HTraceConfiguration wrapHadoopConf(final String prefix,
|
||||||
final Configuration conf) {
|
final Configuration conf) {
|
||||||
@ -52,7 +52,7 @@ public String get(String key) {
|
|||||||
if (ret != null) {
|
if (ret != null) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
return getInternal(DEFAULT_HADOOP_PREFIX + key);
|
return getInternal(DEFAULT_HADOOP_TRACE_PREFIX + key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -86,10 +86,10 @@ Other useful configuration parameters that you can customize include:
|
|||||||
|
|
||||||
In most cases, you should specify the `HADOOP_PID_DIR` and `HADOOP_LOG_DIR` directories such that they can only be written to by the users that are going to run the hadoop daemons. Otherwise there is the potential for a symlink attack.
|
In most cases, you should specify the `HADOOP_PID_DIR` and `HADOOP_LOG_DIR` directories such that they can only be written to by the users that are going to run the hadoop daemons. Otherwise there is the potential for a symlink attack.
|
||||||
|
|
||||||
It is also traditional to configure `HADOOP_PREFIX` in the system-wide shell environment configuration. For example, a simple script inside `/etc/profile.d`:
|
It is also traditional to configure `HADOOP_HOME` in the system-wide shell environment configuration. For example, a simple script inside `/etc/profile.d`:
|
||||||
|
|
||||||
HADOOP_PREFIX=/path/to/hadoop
|
HADOOP_HOME=/path/to/hadoop
|
||||||
export HADOOP_PREFIX
|
export HADOOP_HOME
|
||||||
|
|
||||||
| Daemon | Environment Variable |
|
| Daemon | Environment Variable |
|
||||||
|:---- |:---- |
|
|:---- |:---- |
|
||||||
@ -243,73 +243,73 @@ To start a Hadoop cluster you will need to start both the HDFS and YARN cluster.
|
|||||||
|
|
||||||
The first time you bring up HDFS, it must be formatted. Format a new distributed filesystem as *hdfs*:
|
The first time you bring up HDFS, it must be formatted. Format a new distributed filesystem as *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
|
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format <cluster_name>
|
||||||
|
|
||||||
Start the HDFS NameNode with the following command on the designated node as *hdfs*:
|
Start the HDFS NameNode with the following command on the designated node as *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start namenode
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start namenode
|
||||||
|
|
||||||
Start a HDFS DataNode with the following command on each designated node as *hdfs*:
|
Start a HDFS DataNode with the following command on each designated node as *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start datanode
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start datanode
|
||||||
|
|
||||||
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes can be started with a utility script. As *hdfs*:
|
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes can be started with a utility script. As *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/start-dfs.sh
|
[hdfs]$ $HADOOP_HOME/sbin/start-dfs.sh
|
||||||
|
|
||||||
Start the YARN with the following command, run on the designated ResourceManager as *yarn*:
|
Start the YARN with the following command, run on the designated ResourceManager as *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start resourcemanager
|
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start resourcemanager
|
||||||
|
|
||||||
Run a script to start a NodeManager on each designated host as *yarn*:
|
Run a script to start a NodeManager on each designated host as *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start nodemanager
|
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start nodemanager
|
||||||
|
|
||||||
Start a standalone WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
|
Start a standalone WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon start proxyserver
|
[yarn]$ $HADOOP_HOME/bin/yarn --daemon start proxyserver
|
||||||
|
|
||||||
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be started with a utility script. As *yarn*:
|
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be started with a utility script. As *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/sbin/start-yarn.sh
|
[yarn]$ $HADOOP_HOME/sbin/start-yarn.sh
|
||||||
|
|
||||||
Start the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
|
Start the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
|
||||||
|
|
||||||
[mapred]$ $HADOOP_PREFIX/bin/mapred --daemon start historyserver
|
[mapred]$ $HADOOP_HOME/bin/mapred --daemon start historyserver
|
||||||
|
|
||||||
### Hadoop Shutdown
|
### Hadoop Shutdown
|
||||||
|
|
||||||
Stop the NameNode with the following command, run on the designated NameNode as *hdfs*:
|
Stop the NameNode with the following command, run on the designated NameNode as *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop namenode
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop namenode
|
||||||
|
|
||||||
Run a script to stop a DataNode as *hdfs*:
|
Run a script to stop a DataNode as *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop datanode
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop datanode
|
||||||
|
|
||||||
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes may be stopped with a utility script. As *hdfs*:
|
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the HDFS processes may be stopped with a utility script. As *hdfs*:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/stop-dfs.sh
|
[hdfs]$ $HADOOP_HOME/sbin/stop-dfs.sh
|
||||||
|
|
||||||
Stop the ResourceManager with the following command, run on the designated ResourceManager as *yarn*:
|
Stop the ResourceManager with the following command, run on the designated ResourceManager as *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon stop resourcemanager
|
[yarn]$ $HADOOP_HOME/bin/yarn --daemon stop resourcemanager
|
||||||
|
|
||||||
Run a script to stop a NodeManager on a slave as *yarn*:
|
Run a script to stop a NodeManager on a slave as *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn --daemon stop nodemanager
|
[yarn]$ $HADOOP_HOME/bin/yarn --daemon stop nodemanager
|
||||||
|
|
||||||
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be stopped with a utility script. As *yarn*:
|
If `etc/hadoop/slaves` and ssh trusted access is configured (see [Single Node Setup](./SingleCluster.html)), all of the YARN processes can be stopped with a utility script. As *yarn*:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/sbin/stop-yarn.sh
|
[yarn]$ $HADOOP_HOME/sbin/stop-yarn.sh
|
||||||
|
|
||||||
Stop the WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
|
Stop the WebAppProxy server. Run on the WebAppProxy server as *yarn*. If multiple servers are used with load balancing it should be run on each of them:
|
||||||
|
|
||||||
[yarn]$ $HADOOP_PREFIX/bin/yarn stop proxyserver
|
[yarn]$ $HADOOP_HOME/bin/yarn stop proxyserver
|
||||||
|
|
||||||
Stop the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
|
Stop the MapReduce JobHistory Server with the following command, run on the designated server as *mapred*:
|
||||||
|
|
||||||
[mapred]$ $HADOOP_PREFIX/bin/mapred --daemon stop historyserver
|
[mapred]$ $HADOOP_HOME/bin/mapred --daemon stop historyserver
|
||||||
|
|
||||||
Web Interfaces
|
Web Interfaces
|
||||||
--------------
|
--------------
|
||||||
|
@ -39,7 +39,7 @@ All of the shell commands will accept a common set of options. For some commands
|
|||||||
| SHELL\_OPTION | Description |
|
| SHELL\_OPTION | Description |
|
||||||
|:---- |:---- |
|
|:---- |:---- |
|
||||||
| `--buildpaths` | Enables developer versions of jars. |
|
| `--buildpaths` | Enables developer versions of jars. |
|
||||||
| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_PREFIX/etc/hadoop`. |
|
| `--config confdir` | Overwrites the default Configuration directory. Default is `$HADOOP_HOME/etc/hadoop`. |
|
||||||
| `--daemon mode` | If the command supports daemonization (e.g., `hdfs namenode`), execute in the appropriate mode. Supported modes are `start` to start the process in daemon mode, `stop` to stop the process, and `status` to determine the active status of the process. `status` will return an [LSB-compliant](http://refspecs.linuxbase.org/LSB_3.0.0/LSB-generic/LSB-generic/iniscrptact.html) result code. If no option is provided, commands that support daemonization will run in the foreground. For commands that do not support daemonization, this option is ignored. |
|
| `--daemon mode` | If the command supports daemonization (e.g., `hdfs namenode`), execute in the appropriate mode. Supported modes are `start` to start the process in daemon mode, `stop` to stop the process, and `status` to determine the active status of the process. `status` will return an [LSB-compliant](http://refspecs.linuxbase.org/LSB_3.0.0/LSB-generic/LSB-generic/iniscrptact.html) result code. If no option is provided, commands that support daemonization will run in the foreground. For commands that do not support daemonization, this option is ignored. |
|
||||||
| `--debug` | Enables shell level configuration debugging information |
|
| `--debug` | Enables shell level configuration debugging information |
|
||||||
| `--help` | Shell script usage information. |
|
| `--help` | Shell script usage information. |
|
||||||
|
@ -83,7 +83,7 @@ Apache Hadoop allows for third parties to easily add new features through a vari
|
|||||||
|
|
||||||
Core to this functionality is the concept of a shell profile. Shell profiles are shell snippets that can do things such as add jars to the classpath, configure Java system properties and more.
|
Core to this functionality is the concept of a shell profile. Shell profiles are shell snippets that can do things such as add jars to the classpath, configure Java system properties and more.
|
||||||
|
|
||||||
Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_PREFIX}/libexec/shellprofile.d`. Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user. Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
|
Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_HOME}/libexec/shellprofile.d`. Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user. Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
|
||||||
|
|
||||||
An example of a shell profile is in the libexec directory.
|
An example of a shell profile is in the libexec directory.
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ setup() {
|
|||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
HADOOP_SHELL_SCRIPT_DEBUG=true
|
HADOOP_SHELL_SCRIPT_DEBUG=true
|
||||||
unset HADOOP_CONF_DIR
|
unset HADOOP_CONF_DIR
|
||||||
|
# we unset both of these for bw compat
|
||||||
unset HADOOP_HOME
|
unset HADOOP_HOME
|
||||||
unset HADOOP_PREFIX
|
unset HADOOP_PREFIX
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ basicinitsetup () {
|
|||||||
unset ${j}
|
unset ${j}
|
||||||
done
|
done
|
||||||
|
|
||||||
HADOOP_PREFIX=${TMP}
|
HADOOP_HOME=${TMP}
|
||||||
}
|
}
|
||||||
|
|
||||||
check_var_values () {
|
check_var_values () {
|
||||||
|
@ -22,7 +22,7 @@ load hadoop-functions_test_helper
|
|||||||
}
|
}
|
||||||
|
|
||||||
@test "hadoop_bootstrap (libexec)" {
|
@test "hadoop_bootstrap (libexec)" {
|
||||||
unset HADOOP_PREFIX
|
unset HADOOP_HOME
|
||||||
unset HADOOP_COMMON_DIR
|
unset HADOOP_COMMON_DIR
|
||||||
unset HADOOP_COMMON_LIB_JARS_DIR
|
unset HADOOP_COMMON_LIB_JARS_DIR
|
||||||
unset HDFS_DIR
|
unset HDFS_DIR
|
||||||
@ -39,7 +39,7 @@ load hadoop-functions_test_helper
|
|||||||
hadoop_bootstrap
|
hadoop_bootstrap
|
||||||
|
|
||||||
# all of these should be set
|
# all of these should be set
|
||||||
[ -n ${HADOOP_PREFIX} ]
|
[ -n ${HADOOP_HOME} ]
|
||||||
[ -n ${HADOOP_COMMON_DIR} ]
|
[ -n ${HADOOP_COMMON_DIR} ]
|
||||||
[ -n ${HADOOP_COMMON_LIB_JARS_DIR} ]
|
[ -n ${HADOOP_COMMON_LIB_JARS_DIR} ]
|
||||||
[ -n ${HDFS_DIR} ]
|
[ -n ${HDFS_DIR} ]
|
||||||
|
@ -16,10 +16,10 @@
|
|||||||
load hadoop-functions_test_helper
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
create_fake_dirs () {
|
create_fake_dirs () {
|
||||||
HADOOP_PREFIX=${TMP}
|
HADOOP_HOME=${TMP}
|
||||||
for j in conf etc/hadoop; do
|
for j in conf etc/hadoop; do
|
||||||
mkdir -p "${HADOOP_PREFIX}/${j}"
|
mkdir -p "${HADOOP_HOME}/${j}"
|
||||||
echo "unittest=${j}" > "${HADOOP_PREFIX}/${j}/hadoop-env.sh"
|
echo "unittest=${j}" > "${HADOOP_HOME}/${j}/hadoop-env.sh"
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,27 +32,27 @@ create_fake_dirs () {
|
|||||||
@test "hadoop_find_confdir (bw compat: conf)" {
|
@test "hadoop_find_confdir (bw compat: conf)" {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
hadoop_find_confdir
|
hadoop_find_confdir
|
||||||
echo ">${HADOOP_CONF_DIR}< >${HADOOP_PREFIX}/conf<"
|
echo ">${HADOOP_CONF_DIR}< >${HADOOP_HOME}/conf<"
|
||||||
[ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/conf ]
|
[ "${HADOOP_CONF_DIR}" = ${HADOOP_HOME}/conf ]
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "hadoop_find_confdir (etc/hadoop)" {
|
@test "hadoop_find_confdir (etc/hadoop)" {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
rm -rf "${HADOOP_PREFIX}/conf"
|
rm -rf "${HADOOP_HOME}/conf"
|
||||||
hadoop_find_confdir
|
hadoop_find_confdir
|
||||||
[ "${HADOOP_CONF_DIR}" = ${HADOOP_PREFIX}/etc/hadoop ]
|
[ "${HADOOP_CONF_DIR}" = ${HADOOP_HOME}/etc/hadoop ]
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "hadoop_verify_confdir (negative) " {
|
@test "hadoop_verify_confdir (negative) " {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
|
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
|
||||||
run hadoop_verify_confdir
|
run hadoop_verify_confdir
|
||||||
[ -n "${output}" ]
|
[ -n "${output}" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "hadoop_verify_confdir (positive) " {
|
@test "hadoop_verify_confdir (positive) " {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
|
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
|
||||||
touch "${HADOOP_CONF_DIR}/log4j.properties"
|
touch "${HADOOP_CONF_DIR}/log4j.properties"
|
||||||
run hadoop_verify_confdir
|
run hadoop_verify_confdir
|
||||||
[ -z "${output}" ]
|
[ -z "${output}" ]
|
||||||
@ -60,7 +60,7 @@ create_fake_dirs () {
|
|||||||
|
|
||||||
@test "hadoop_exec_hadoopenv (positive) " {
|
@test "hadoop_exec_hadoopenv (positive) " {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
|
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
|
||||||
hadoop_exec_hadoopenv
|
hadoop_exec_hadoopenv
|
||||||
[ -n "${HADOOP_ENV_PROCESSED}" ]
|
[ -n "${HADOOP_ENV_PROCESSED}" ]
|
||||||
[ "${unittest}" = conf ]
|
[ "${unittest}" = conf ]
|
||||||
@ -68,7 +68,7 @@ create_fake_dirs () {
|
|||||||
|
|
||||||
@test "hadoop_exec_hadoopenv (negative) " {
|
@test "hadoop_exec_hadoopenv (negative) " {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
|
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
|
||||||
HADOOP_ENV_PROCESSED=true
|
HADOOP_ENV_PROCESSED=true
|
||||||
hadoop_exec_hadoopenv
|
hadoop_exec_hadoopenv
|
||||||
[ -z "${unittest}" ]
|
[ -z "${unittest}" ]
|
||||||
@ -76,7 +76,7 @@ create_fake_dirs () {
|
|||||||
|
|
||||||
@test "hadoop_exec_userfuncs" {
|
@test "hadoop_exec_userfuncs" {
|
||||||
create_fake_dirs
|
create_fake_dirs
|
||||||
HADOOP_CONF_DIR=${HADOOP_PREFIX}/conf
|
HADOOP_CONF_DIR=${HADOOP_HOME}/conf
|
||||||
echo "unittest=userfunc" > "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
|
echo "unittest=userfunc" > "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
|
||||||
hadoop_exec_userfuncs
|
hadoop_exec_userfuncs
|
||||||
[ "${unittest}" = "userfunc" ]
|
[ "${unittest}" = "userfunc" ]
|
||||||
|
@ -100,7 +100,7 @@ load hadoop-functions_test_helper
|
|||||||
hadoop_finalize_hadoop_heap () { true; }
|
hadoop_finalize_hadoop_heap () { true; }
|
||||||
hadoop_finalize_hadoop_opts () { true; }
|
hadoop_finalize_hadoop_opts () { true; }
|
||||||
hadoop_translate_cygwin_path () {
|
hadoop_translate_cygwin_path () {
|
||||||
if [ $1 = HADOOP_PREFIX ]; then
|
if [ $1 = HADOOP_HOME ]; then
|
||||||
testvar=prefix;
|
testvar=prefix;
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
# KMS temporary directory
|
# KMS temporary directory
|
||||||
#
|
#
|
||||||
# export KMS_TEMP=${HADOOP_PREFIX}/temp
|
# export KMS_TEMP=${HADOOP_HOME}/temp
|
||||||
|
|
||||||
# The HTTP port used by KMS
|
# The HTTP port used by KMS
|
||||||
#
|
#
|
||||||
@ -59,7 +59,7 @@
|
|||||||
#
|
#
|
||||||
# Location of tomcat
|
# Location of tomcat
|
||||||
#
|
#
|
||||||
# export KMS_CATALINA_HOME=${HADOOP_PREFIX}/share/hadoop/kms/tomcat
|
# export KMS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/kms/tomcat
|
||||||
|
|
||||||
# Java System properties for KMS should be specified in this variable.
|
# Java System properties for KMS should be specified in this variable.
|
||||||
# The java.library.path and hadoop.home.dir properties are automatically
|
# The java.library.path and hadoop.home.dir properties are automatically
|
||||||
|
@ -28,7 +28,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
export HADOOP_CATALINA_PREFIX=kms
|
export HADOOP_CATALINA_PREFIX=kms
|
||||||
|
|
||||||
export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_PREFIX}/temp}"
|
export HADOOP_CATALINA_TEMP="${KMS_TEMP:-${HADOOP_HOME}/temp}"
|
||||||
|
|
||||||
hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
|
hadoop_deprecate_envvar KMS_CONFIG HADOOP_CONF_DIR
|
||||||
|
|
||||||
@ -49,7 +49,7 @@ function hadoop_subproject_init
|
|||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
export KMS_SSL_TRUSTSTORE_PASS=${KMS_SSL_TRUSTSTORE_PASS:-"$(echo ${CATALINA_OPTS} | grep -o 'trustStorePassword=[^ ]*' | cut -f2 -d= )"}
|
export KMS_SSL_TRUSTSTORE_PASS=${KMS_SSL_TRUSTSTORE_PASS:-"$(echo ${CATALINA_OPTS} | grep -o 'trustStorePassword=[^ ]*' | cut -f2 -d= )"}
|
||||||
|
|
||||||
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_PREFIX}/share/hadoop/kms/tomcat}"
|
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/kms/tomcat}"
|
||||||
export HADOOP_CATALINA_HOME="${KMS_CATALINA_HOME:-${CATALINA_BASE}}"
|
export HADOOP_CATALINA_HOME="${KMS_CATALINA_HOME:-${CATALINA_BASE}}"
|
||||||
|
|
||||||
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms-${HOSTNAME}.out}"
|
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-kms-${HOSTNAME}.out}"
|
||||||
@ -69,8 +69,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
|||||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
|
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
|
||||||
else
|
else
|
||||||
echo "ERROR: Hadoop common not found." 2>&1
|
echo "ERROR: Hadoop common not found." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -30,8 +30,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
# HTTPFS temporary directory
|
# HTTPFS temporary directory
|
||||||
#
|
#
|
||||||
# export HTTPFS_TEMP=${HADOOP_PREFIX}/temp
|
# export HTTPFS_TEMP=${HADOOP_HOME}/temp
|
||||||
|
|
||||||
# The HTTP port used by HTTPFS
|
# The HTTP port used by HTTPFS
|
||||||
#
|
#
|
||||||
@ -53,7 +53,7 @@
|
|||||||
#
|
#
|
||||||
# Location of tomcat
|
# Location of tomcat
|
||||||
#
|
#
|
||||||
# export HTTPFS_CATALINA_HOME=${HADOOP_PREFIX}/share/hadoop/httpfs/tomcat
|
# export HTTPFS_CATALINA_HOME=${HADOOP_HOME}/share/hadoop/httpfs/tomcat
|
||||||
|
|
||||||
# Java System properties for HTTPFS should be specified in this variable.
|
# Java System properties for HTTPFS should be specified in this variable.
|
||||||
# The java.library.path and hadoop.home.dir properties are automatically
|
# The java.library.path and hadoop.home.dir properties are automatically
|
||||||
|
@ -28,7 +28,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
export HADOOP_CATALINA_PREFIX=httpfs
|
export HADOOP_CATALINA_PREFIX=httpfs
|
||||||
|
|
||||||
export HADOOP_CATALINA_TEMP="${HTTPFS_TEMP:-${HADOOP_PREFIX}/temp}"
|
export HADOOP_CATALINA_TEMP="${HTTPFS_TEMP:-${HADOOP_HOME}/temp}"
|
||||||
|
|
||||||
hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
|
hadoop_deprecate_envvar HTTPFS_CONFIG HADOOP_CONF_DIR
|
||||||
|
|
||||||
@ -47,7 +47,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${HTTPFS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
|
export HADOOP_CATALINA_SSL_KEYSTORE_FILE="${HTTPFS_SSL_KEYSTORE_FILE:-${HOME}/.keystore}"
|
||||||
|
|
||||||
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_PREFIX}/share/hadoop/httpfs/tomcat}"
|
export CATALINA_BASE="${CATALINA_BASE:-${HADOOP_HOME}/share/hadoop/httpfs/tomcat}"
|
||||||
export HADOOP_CATALINA_HOME="${HTTPFS_CATALINA_HOME:-${CATALINA_BASE}}"
|
export HADOOP_CATALINA_HOME="${HTTPFS_CATALINA_HOME:-${CATALINA_BASE}}"
|
||||||
|
|
||||||
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs-${HOSTNAME}.out}"
|
export CATALINA_OUT="${CATALINA_OUT:-${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-httpfs-${HOSTNAME}.out}"
|
||||||
@ -67,8 +67,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
|||||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
|
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
|
||||||
else
|
else
|
||||||
echo "ERROR: Hadoop common not found." 2>&1
|
echo "ERROR: Hadoop common not found." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -30,8 +30,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
|
@ -35,9 +35,9 @@ Requirements
|
|||||||
BUILDING
|
BUILDING
|
||||||
|
|
||||||
fuse-dfs executable can be built by setting `require.fuse` option to true using Maven. For example:
|
fuse-dfs executable can be built by setting `require.fuse` option to true using Maven. For example:
|
||||||
in HADOOP_PREFIX: `mvn package -Pnative -Drequire.fuse=true -DskipTests -Dmaven.javadoc.skip=true`
|
in HADOOP_HOME: `mvn package -Pnative -Drequire.fuse=true -DskipTests -Dmaven.javadoc.skip=true`
|
||||||
|
|
||||||
The executable `fuse_dfs` will be located at HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/
|
The executable `fuse_dfs` will be located at HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/
|
||||||
|
|
||||||
Common build problems include not finding the libjvm.so in JAVA_HOME/jre/lib/OS_ARCH/server or not finding fuse in FUSE_HOME or /usr/local.
|
Common build problems include not finding the libjvm.so in JAVA_HOME/jre/lib/OS_ARCH/server or not finding fuse in FUSE_HOME or /usr/local.
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ NOTE - you cannot export this with a FUSE module built into the kernel
|
|||||||
|
|
||||||
RECOMMENDATIONS
|
RECOMMENDATIONS
|
||||||
|
|
||||||
1. From /bin, `ln -s HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs* .`
|
1. From /bin, `ln -s HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs/fuse_dfs* .`
|
||||||
|
|
||||||
2. Always start with debug on so you can see if you are missing a classpath or something like that.
|
2. Always start with debug on so you can see if you are missing a classpath or something like that.
|
||||||
|
|
||||||
|
@ -16,12 +16,12 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
if [ "$HADOOP_PREFIX" = "" ]; then
|
if [ "$HADOOP_HOME" = "" ]; then
|
||||||
echo "HADOOP_PREFIX is empty. Set it to the root directory of Hadoop source code"
|
echo "HADOOP_HOME is empty. Set it to the root directory of Hadoop source code"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
export FUSEDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs"
|
export FUSEDFS_PATH="$HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/main/native/fuse-dfs"
|
||||||
export LIBHDFS_PATH="$HADOOP_PREFIX/hadoop-hdfs-project/hadoop-hdfs-native-client/target/usr/local/lib"
|
export LIBHDFS_PATH="$HADOOP_HOME/hadoop-hdfs-project/hadoop-hdfs-native-client/target/usr/local/lib"
|
||||||
|
|
||||||
if [ "$OS_ARCH" = "" ]; then
|
if [ "$OS_ARCH" = "" ]; then
|
||||||
export OS_ARCH=amd64
|
export OS_ARCH=amd64
|
||||||
@ -38,12 +38,12 @@ fi
|
|||||||
while IFS= read -r -d '' file
|
while IFS= read -r -d '' file
|
||||||
do
|
do
|
||||||
export CLASSPATH=$CLASSPATH:$file
|
export CLASSPATH=$CLASSPATH:$file
|
||||||
done < <(find "$HADOOP_PREFIX/hadoop-client" -name "*.jar" -print0)
|
done < <(find "$HADOOP_HOME/hadoop-client" -name "*.jar" -print0)
|
||||||
|
|
||||||
while IFS= read -r -d '' file
|
while IFS= read -r -d '' file
|
||||||
do
|
do
|
||||||
export CLASSPATH=$CLASSPATH:$file
|
export CLASSPATH=$CLASSPATH:$file
|
||||||
done < <(find "$HADOOP_PREFIX/hhadoop-hdfs-project" -name "*.jar" -print0)
|
done < <(find "$HADOOP_HOME/hhadoop-hdfs-project" -name "*.jar" -print0)
|
||||||
|
|
||||||
export CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH
|
export CLASSPATH=$HADOOP_CONF_DIR:$CLASSPATH
|
||||||
export PATH=$FUSEDFS_PATH:$PATH
|
export PATH=$FUSEDFS_PATH:$PATH
|
||||||
|
@ -52,8 +52,8 @@ if [ ! -f "$excludeFilenameLocal" ] ; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
namenodes=$("$HADOOP_PREFIX/bin/hdfs" getconf -namenodes)
|
namenodes=$("$HADOOP_HOME/bin/hdfs" getconf -namenodes)
|
||||||
excludeFilenameRemote=$("$HADOOP_PREFIX/bin/hdfs" getconf -excludeFile)
|
excludeFilenameRemote=$("$HADOOP_HOME/bin/hdfs" getconf -excludeFile)
|
||||||
|
|
||||||
if [ "$excludeFilenameRemote" = '' ] ; then
|
if [ "$excludeFilenameRemote" = '' ] ; then
|
||||||
echo \
|
echo \
|
||||||
|
@ -60,8 +60,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
|
@ -49,7 +49,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
|
hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
|
||||||
|
|
||||||
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}"
|
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME}"
|
||||||
|
|
||||||
# turn on the defaults
|
# turn on the defaults
|
||||||
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
|
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
|
||||||
@ -71,8 +71,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
|||||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||||
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
|
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
|
||||||
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
|
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
|
||||||
else
|
else
|
||||||
echo "ERROR: Hadoop common not found." 2>&1
|
echo "ERROR: Hadoop common not found." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -21,8 +21,8 @@
|
|||||||
# for dfsadmin to support multiple namenodes.
|
# for dfsadmin to support multiple namenodes.
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -30,8 +30,8 @@ function hadoop_usage
|
|||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -29,8 +29,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -26,8 +26,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -28,8 +28,8 @@ function hadoop_usage
|
|||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -28,8 +28,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -26,8 +26,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -150,13 +150,13 @@ Here is an example configuration with two Namenodes:
|
|||||||
|
|
||||||
**Step 1**: Format a Namenode using the following command:
|
**Step 1**: Format a Namenode using the following command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format [-clusterId <cluster_id>]
|
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format [-clusterId <cluster_id>]
|
||||||
|
|
||||||
Choose a unique cluster\_id which will not conflict other clusters in your environment. If a cluster\_id is not provided, then a unique one is auto generated.
|
Choose a unique cluster\_id which will not conflict other clusters in your environment. If a cluster\_id is not provided, then a unique one is auto generated.
|
||||||
|
|
||||||
**Step 2**: Format additional Namenodes using the following command:
|
**Step 2**: Format additional Namenodes using the following command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format -clusterId <cluster_id>
|
[hdfs]$ $HADOOP_HOME/bin/hdfs namenode -format -clusterId <cluster_id>
|
||||||
|
|
||||||
Note that the cluster\_id in step 2 must be same as that of the cluster\_id in step 1. If they are different, the additional Namenodes will not be part of the federated cluster.
|
Note that the cluster\_id in step 2 must be same as that of the cluster\_id in step 1. If they are different, the additional Namenodes will not be part of the federated cluster.
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ Note that the cluster\_id in step 2 must be same as that of the cluster\_id in s
|
|||||||
|
|
||||||
Older releases only support a single Namenode. Upgrade the cluster to newer release in order to enable federation During upgrade you can provide a ClusterID as follows:
|
Older releases only support a single Namenode. Upgrade the cluster to newer release in order to enable federation During upgrade you can provide a ClusterID as follows:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start namenode -upgrade -clusterId <cluster_ID>
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start namenode -upgrade -clusterId <cluster_ID>
|
||||||
|
|
||||||
If cluster\_id is not provided, it is auto generated.
|
If cluster\_id is not provided, it is auto generated.
|
||||||
|
|
||||||
@ -187,7 +187,7 @@ Perform the following steps:
|
|||||||
* Refresh the Datanodes to pickup the newly added Namenode by running
|
* Refresh the Datanodes to pickup the newly added Namenode by running
|
||||||
the following command against all the Datanodes in the cluster:
|
the following command against all the Datanodes in the cluster:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs dfsadmin -refreshNamenodes <datanode_host_name>:<datanode_rpc_port>
|
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsadmin -refreshNamenodes <datanode_host_name>:<datanode_rpc_port>
|
||||||
|
|
||||||
Managing the cluster
|
Managing the cluster
|
||||||
--------------------
|
--------------------
|
||||||
@ -196,11 +196,11 @@ Managing the cluster
|
|||||||
|
|
||||||
To start the cluster run the following command:
|
To start the cluster run the following command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/start-dfs.sh
|
[hdfs]$ $HADOOP_HOME/sbin/start-dfs.sh
|
||||||
|
|
||||||
To stop the cluster run the following command:
|
To stop the cluster run the following command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/stop-dfs.sh
|
[hdfs]$ $HADOOP_HOME/sbin/stop-dfs.sh
|
||||||
|
|
||||||
These commands can be run from any node where the HDFS configuration is available. The command uses the configuration to determine the Namenodes in the cluster and then starts the Namenode process on those nodes. The Datanodes are started on the nodes specified in the `slaves` file. The script can be used as a reference for building your own scripts to start and stop the cluster.
|
These commands can be run from any node where the HDFS configuration is available. The command uses the configuration to determine the Namenodes in the cluster and then starts the Namenode process on those nodes. The Datanodes are started on the nodes specified in the `slaves` file. The script can be used as a reference for building your own scripts to start and stop the cluster.
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ These commands can be run from any node where the HDFS configuration is availabl
|
|||||||
|
|
||||||
The Balancer has been changed to work with multiple Namenodes. The Balancer can be run using the command:
|
The Balancer has been changed to work with multiple Namenodes. The Balancer can be run using the command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start balancer [-policy <policy>]
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start balancer [-policy <policy>]
|
||||||
|
|
||||||
The policy parameter can be any of the following:
|
The policy parameter can be any of the following:
|
||||||
|
|
||||||
@ -227,11 +227,11 @@ Decommissioning is similar to prior releases. The nodes that need to be decomiss
|
|||||||
|
|
||||||
**Step 1**: To distribute an exclude file to all the Namenodes, use the following command:
|
**Step 1**: To distribute an exclude file to all the Namenodes, use the following command:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/distribute-exclude.sh <exclude_file>
|
[hdfs]$ $HADOOP_HOME/sbin/distribute-exclude.sh <exclude_file>
|
||||||
|
|
||||||
**Step 2**: Refresh all the Namenodes to pick up the new exclude file:
|
**Step 2**: Refresh all the Namenodes to pick up the new exclude file:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/sbin/refresh-namenodes.sh
|
[hdfs]$ $HADOOP_HOME/sbin/refresh-namenodes.sh
|
||||||
|
|
||||||
The above command uses HDFS configuration to determine the configured Namenodes in the cluster and refreshes them to pick up the new exclude file.
|
The above command uses HDFS configuration to determine the configured Namenodes in the cluster and refreshes them to pick up the new exclude file.
|
||||||
|
|
||||||
|
@ -475,7 +475,7 @@ There are also several other configuration parameters which may be set to contro
|
|||||||
|
|
||||||
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
|
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/zkfc -formatZK
|
[hdfs]$ $HADOOP_HOME/bin/zkfc -formatZK
|
||||||
|
|
||||||
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
|
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
|
||||||
|
|
||||||
@ -487,7 +487,7 @@ Since automatic failover has been enabled in the configuration, the `start-dfs.s
|
|||||||
|
|
||||||
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
|
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start zkfc
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start zkfc
|
||||||
|
|
||||||
### Securing access to ZooKeeper
|
### Securing access to ZooKeeper
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ There are also several other configuration parameters which may be set to contro
|
|||||||
|
|
||||||
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
|
After the configuration keys have been added, the next step is to initialize required state in ZooKeeper. You can do so by running the following command from one of the NameNode hosts.
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs zkfc -formatZK
|
[hdfs]$ $HADOOP_HOME/bin/hdfs zkfc -formatZK
|
||||||
|
|
||||||
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
|
This will create a znode in ZooKeeper inside of which the automatic failover system stores its data.
|
||||||
|
|
||||||
@ -535,7 +535,7 @@ Since automatic failover has been enabled in the configuration, the `start-dfs.s
|
|||||||
|
|
||||||
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
|
If you manually manage the services on your cluster, you will need to manually start the `zkfc` daemon on each of the machines that runs a NameNode. You can start the daemon by running:
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start zkfc
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start zkfc
|
||||||
|
|
||||||
### Securing access to ZooKeeper
|
### Securing access to ZooKeeper
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ Three daemons are required to provide NFS service: rpcbind (or portmap), mountd
|
|||||||
|
|
||||||
2. Start Hadoop's portmap (needs root privileges):
|
2. Start Hadoop's portmap (needs root privileges):
|
||||||
|
|
||||||
[root]> $HADOOP_PREFIX/bin/hdfs --daemon start portmap
|
[root]> $HADOOP_HOME/bin/hdfs --daemon start portmap
|
||||||
|
|
||||||
3. Start mountd and nfsd.
|
3. Start mountd and nfsd.
|
||||||
|
|
||||||
@ -224,12 +224,12 @@ Three daemons are required to provide NFS service: rpcbind (or portmap), mountd
|
|||||||
While in secure mode, any user can start NFS gateway
|
While in secure mode, any user can start NFS gateway
|
||||||
as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
|
as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon start nfs3
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon start nfs3
|
||||||
|
|
||||||
4. Stop NFS gateway services.
|
4. Stop NFS gateway services.
|
||||||
|
|
||||||
[hdfs]$ $HADOOP_PREFIX/bin/hdfs --daemon stop nfs3
|
[hdfs]$ $HADOOP_HOME/bin/hdfs --daemon stop nfs3
|
||||||
[root]> $HADOOP_PREFIX/bin/hdfs --daemon stop portmap
|
[root]> $HADOOP_HOME/bin/hdfs --daemon stop portmap
|
||||||
|
|
||||||
Optionally, you can forgo running the Hadoop-provided portmap daemon and instead use the system portmap daemon on all operating systems if you start the NFS Gateway as root. This will allow the HDFS NFS Gateway to work around the aforementioned bug and still register using the system portmap daemon. To do so, just start the NFS gateway daemon as you normally would, but make sure to do so as the "root" user, and also set the "HADOOP\_PRIVILEGED\_NFS\_USER" environment variable to an unprivileged user. In this mode the NFS Gateway will start as root to perform its initial registration with the system portmap, and then will drop privileges back to the user specified by the HADOOP\_PRIVILEGED\_NFS\_USER afterward and for the rest of the duration of the lifetime of the NFS Gateway process. Note that if you choose this route, you should skip steps 1 and 2 above.
|
Optionally, you can forgo running the Hadoop-provided portmap daemon and instead use the system portmap daemon on all operating systems if you start the NFS Gateway as root. This will allow the HDFS NFS Gateway to work around the aforementioned bug and still register using the system portmap daemon. To do so, just start the NFS gateway daemon as you normally would, but make sure to do so as the "root" user, and also set the "HADOOP\_PRIVILEGED\_NFS\_USER" environment variable to an unprivileged user. In this mode the NFS Gateway will start as root to perform its initial registration with the system portmap, and then will drop privileges back to the user specified by the HADOOP\_PRIVILEGED\_NFS\_USER afterward and for the rest of the duration of the lifetime of the NFS Gateway process. Note that if you choose this route, you should skip steps 1 and 2 above.
|
||||||
|
|
||||||
|
@ -62,7 +62,7 @@ private String getHostPortForNN(MiniDFSCluster cluster) {
|
|||||||
public void testCreateAndDestroySpanReceiver() throws Exception {
|
public void testCreateAndDestroySpanReceiver() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
|
||||||
Tracer.SPAN_RECEIVER_CLASSES_KEY, "");
|
Tracer.SPAN_RECEIVER_CLASSES_KEY, "");
|
||||||
MiniDFSCluster cluster =
|
MiniDFSCluster cluster =
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
|
@ -65,10 +65,10 @@ public static void shutdown() throws IOException {
|
|||||||
public void testShortCircuitTraceHooks() throws IOException {
|
public void testShortCircuitTraceHooks() throws IOException {
|
||||||
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
|
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
|
||||||
Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
||||||
SetSpanReceiver.class.getName());
|
SetSpanReceiver.class.getName());
|
||||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX +
|
||||||
Tracer.SAMPLER_CLASSES_KEY,
|
Tracer.SAMPLER_CLASSES_KEY,
|
||||||
"AlwaysSampler");
|
"AlwaysSampler");
|
||||||
conf.setLong("dfs.blocksize", 100 * 1024);
|
conf.setLong("dfs.blocksize", 100 * 1024);
|
||||||
|
@ -37,8 +37,8 @@ function hadoop_usage
|
|||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -47,7 +47,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
|
hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
|
||||||
|
|
||||||
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}"
|
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
|
||||||
|
|
||||||
hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING
|
hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING
|
||||||
}
|
}
|
||||||
@ -62,8 +62,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
|||||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||||
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
|
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
|
||||||
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
|
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
|
||||||
else
|
else
|
||||||
echo "ERROR: Hadoop common not found." 2>&1
|
echo "ERROR: Hadoop common not found." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -21,8 +21,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -315,7 +315,7 @@ private static void setupPipesJob(JobConf conf) throws IOException {
|
|||||||
// <path>#<executable>
|
// <path>#<executable>
|
||||||
if (exec.contains("#")) {
|
if (exec.contains("#")) {
|
||||||
// set default gdb commands for map and reduce task
|
// set default gdb commands for map and reduce task
|
||||||
String defScript = "$HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-script";
|
String defScript = "$HADOOP_HOME/src/c++/pipes/debug/pipes-default-script";
|
||||||
setIfUnset(conf, MRJobConfig.MAP_DEBUG_SCRIPT,defScript);
|
setIfUnset(conf, MRJobConfig.MAP_DEBUG_SCRIPT,defScript);
|
||||||
setIfUnset(conf, MRJobConfig.REDUCE_DEBUG_SCRIPT,defScript);
|
setIfUnset(conf, MRJobConfig.REDUCE_DEBUG_SCRIPT,defScript);
|
||||||
}
|
}
|
||||||
|
@ -450,7 +450,7 @@ public static void main(String[] args) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Copy the executables over to the remote filesystem
|
//Copy the executables over to the remote filesystem
|
||||||
String hadoopHome = System.getenv("HADOOP_PREFIX");
|
String hadoopHome = System.getenv("HADOOP_HOME");
|
||||||
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
|
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
|
||||||
HDFS_SHLIB);
|
HDFS_SHLIB);
|
||||||
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
|
fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
|
||||||
|
@ -179,7 +179,7 @@ private void runSortValidatorTest(final JobClient jc,
|
|||||||
|
|
||||||
private String normalizeCommandPath(String command) {
|
private String normalizeCommandPath(String command) {
|
||||||
final String hadoopHome;
|
final String hadoopHome;
|
||||||
if ((hadoopHome = System.getenv("HADOOP_PREFIX")) != null) {
|
if ((hadoopHome = System.getenv("HADOOP_HOME")) != null) {
|
||||||
command = hadoopHome + "/" + command;
|
command = hadoopHome + "/" + command;
|
||||||
}
|
}
|
||||||
return command;
|
return command;
|
||||||
|
@ -450,7 +450,7 @@ public int compare(AppInfo o1, AppInfo o2) {
|
|||||||
fi
|
fi
|
||||||
export HADOOP_CLIENT_OPTS="-Xmx1024m"
|
export HADOOP_CLIENT_OPTS="-Xmx1024m"
|
||||||
export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
|
export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
|
||||||
"$HADOOP_PREFIX"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
|
"$HADOOP_HOME"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
void generateScript(File localScript, Path workingDir,
|
void generateScript(File localScript, Path workingDir,
|
||||||
@ -484,7 +484,7 @@ void generateScript(File localScript, Path workingDir,
|
|||||||
fw.write("m\"\n");
|
fw.write("m\"\n");
|
||||||
fw.write("export HADOOP_CLASSPATH=");
|
fw.write("export HADOOP_CLASSPATH=");
|
||||||
fw.write(classpath);
|
fw.write(classpath);
|
||||||
fw.write("\n\"$HADOOP_PREFIX\"/bin/hadoop ");
|
fw.write("\n\"$HADOOP_HOME\"/bin/hadoop ");
|
||||||
fw.write(HadoopArchiveLogsRunner.class.getName());
|
fw.write(HadoopArchiveLogsRunner.class.getName());
|
||||||
fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
|
fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
|
||||||
fw.write(workingDir.toString());
|
fw.write(workingDir.toString());
|
||||||
|
@ -294,14 +294,14 @@ private void _testGenerateScript(boolean proxy) throws Exception {
|
|||||||
Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
|
Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
|
||||||
if (proxy) {
|
if (proxy) {
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
"\"$HADOOP_PREFIX\"/bin/hadoop org.apache.hadoop.tools." +
|
"\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
|
||||||
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
|
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
|
||||||
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
|
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
|
||||||
remoteRootLogDir.toString() + " -suffix " + suffix,
|
remoteRootLogDir.toString() + " -suffix " + suffix,
|
||||||
lines[15]);
|
lines[15]);
|
||||||
} else {
|
} else {
|
||||||
Assert.assertEquals(
|
Assert.assertEquals(
|
||||||
"\"$HADOOP_PREFIX\"/bin/hadoop org.apache.hadoop.tools." +
|
"\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
|
||||||
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
|
"HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" " +
|
||||||
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
|
"-workingDir " + workingDir.toString() + " -remoteRootLogDir " +
|
||||||
remoteRootLogDir.toString() + " -suffix " + suffix + " -noProxy",
|
remoteRootLogDir.toString() + " -suffix " + suffix + " -noProxy",
|
||||||
|
@ -20,7 +20,7 @@ B.a31 B.a32
|
|||||||
*****************************
|
*****************************
|
||||||
*** Invoke SampleDataJoin ***
|
*** Invoke SampleDataJoin ***
|
||||||
*****************************
|
*****************************
|
||||||
[:~]$ $HADOOP_PREFIX/bin/hadoop jar hadoop-datajoin-examples.jar org.apache.hadoop.contrib.utils.join.DataJoinJob datajoin/input datajoin/output Text 1 org.apache.hadoop.contrib.utils.join.SampleDataJoinMapper org.apache.hadoop.contrib.utils.join.SampleDataJoinReducer org.apache.hadoop.contrib.utils.join.SampleTaggedMapOutput Text
|
[:~]$ $HADOOP_HOME/bin/hadoop jar hadoop-datajoin-examples.jar org.apache.hadoop.contrib.utils.join.DataJoinJob datajoin/input datajoin/output Text 1 org.apache.hadoop.contrib.utils.join.SampleDataJoinMapper org.apache.hadoop.contrib.utils.join.SampleDataJoinReducer org.apache.hadoop.contrib.utils.join.SampleTaggedMapOutput Text
|
||||||
Using TextInputFormat: Text
|
Using TextInputFormat: Text
|
||||||
Using TextOutputFormat: Text
|
Using TextOutputFormat: Text
|
||||||
07/06/01 19:58:23 INFO mapred.FileInputFormat: Total input paths to process : 2
|
07/06/01 19:58:23 INFO mapred.FileInputFormat: Total input paths to process : 2
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
# You may obtain a copy of the License at
|
# You may obtain a copy of the License at
|
||||||
@ -9,6 +10,6 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
core=`find . -name 'core*'`
|
core=$(find . -name 'core*')
|
||||||
#Only pipes programs have 5th argument as program name.
|
#Only pipes programs have 5th argument as program name.
|
||||||
gdb -quiet $5 -c $core -x $HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-gdb-commands.txt
|
gdb -quiet "${5}" -c "${core}" -x "${HADOOP_HOME}/src/c++/pipes/debug/pipes-default-gdb-commands.txt"
|
||||||
|
@ -77,8 +77,8 @@ function run_sls_generator()
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -71,8 +71,8 @@ function parse_args()
|
|||||||
function calculate_classpath
|
function calculate_classpath
|
||||||
{
|
{
|
||||||
hadoop_add_to_classpath_tools hadoop-sls
|
hadoop_add_to_classpath_tools hadoop-sls
|
||||||
hadoop_debug "Injecting ${HADOOP_PREFIX}/share/hadoop/tools/sls/html into CLASSPATH"
|
hadoop_debug "Injecting ${HADOOP_TOOLS_DIR}/sls/html into CLASSPATH"
|
||||||
hadoop_add_classpath "${HADOOP_PREFIX}/share/hadoop/tools/sls/html"
|
hadoop_add_classpath "${HADOOP_TOOLS_DIR}/sls/html"
|
||||||
}
|
}
|
||||||
|
|
||||||
function run_simulation() {
|
function run_simulation() {
|
||||||
@ -105,8 +105,8 @@ function run_simulation() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -91,7 +91,7 @@ public int run(String[] args) throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void printUsage() {
|
private void printUsage() {
|
||||||
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
|
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
|
||||||
+ " dumptb <glob-pattern>");
|
+ " dumptb <glob-pattern>");
|
||||||
System.out.println(" Dumps all files that match the given pattern to " +
|
System.out.println(" Dumps all files that match the given pattern to " +
|
||||||
"standard output as typed bytes.");
|
"standard output as typed bytes.");
|
||||||
|
@ -56,7 +56,7 @@ public static void main(String[] args) throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static void printUsage() {
|
private static void printUsage() {
|
||||||
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
|
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
|
||||||
+ " [options]");
|
+ " [options]");
|
||||||
System.out.println("Options:");
|
System.out.println("Options:");
|
||||||
System.out.println(" dumptb <glob-pattern> Dumps all files that match the"
|
System.out.println(" dumptb <glob-pattern> Dumps all files that match the"
|
||||||
|
@ -89,7 +89,7 @@ public int run(String[] args) throws Exception {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void printUsage() {
|
private void printUsage() {
|
||||||
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
|
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
|
||||||
+ " loadtb <path>");
|
+ " loadtb <path>");
|
||||||
System.out.println(" Reads typed bytes from standard input" +
|
System.out.println(" Reads typed bytes from standard input" +
|
||||||
" and stores them in a sequence file in");
|
" and stores them in a sequence file in");
|
||||||
|
@ -502,7 +502,7 @@ public void exitUsage(boolean detailed) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void printUsage(boolean detailed) {
|
private void printUsage(boolean detailed) {
|
||||||
System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar"
|
System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar"
|
||||||
+ " [options]");
|
+ " [options]");
|
||||||
System.out.println("Options:");
|
System.out.println("Options:");
|
||||||
System.out.println(" -input <path> DFS input file(s) for the Map"
|
System.out.println(" -input <path> DFS input file(s) for the Map"
|
||||||
@ -551,7 +551,7 @@ private void printUsage(boolean detailed) {
|
|||||||
System.out.println();
|
System.out.println();
|
||||||
System.out.println("For more details about these options:");
|
System.out.println("For more details about these options:");
|
||||||
System.out.println("Use " +
|
System.out.println("Use " +
|
||||||
"$HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar -info");
|
"$HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar -info");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
@ -611,7 +611,7 @@ private void printUsage(boolean detailed) {
|
|||||||
System.out.println(" -D stream.non.zero.exit.is.failure=false");
|
System.out.println(" -D stream.non.zero.exit.is.failure=false");
|
||||||
System.out.println("Use a custom hadoop streaming build along with standard"
|
System.out.println("Use a custom hadoop streaming build along with standard"
|
||||||
+ " hadoop install:");
|
+ " hadoop install:");
|
||||||
System.out.println(" $HADOOP_PREFIX/bin/hadoop jar " +
|
System.out.println(" $HADOOP_HOME/bin/hadoop jar " +
|
||||||
"/path/my-hadoop-streaming.jar [...]\\");
|
"/path/my-hadoop-streaming.jar [...]\\");
|
||||||
System.out.println(" [...] -D stream.shipped.hadoopstreaming=" +
|
System.out.println(" [...] -D stream.shipped.hadoopstreaming=" +
|
||||||
"/path/my-hadoop-streaming.jar");
|
"/path/my-hadoop-streaming.jar");
|
||||||
@ -625,7 +625,7 @@ private void printUsage(boolean detailed) {
|
|||||||
System.out.println(" -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
|
System.out.println(" -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
|
||||||
System.out.println();
|
System.out.println();
|
||||||
System.out.println("Shortcut:");
|
System.out.println("Shortcut:");
|
||||||
System.out.println(" setenv HSTREAMING \"$HADOOP_PREFIX/bin/hadoop jar " +
|
System.out.println(" setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar " +
|
||||||
"hadoop-streaming.jar\"");
|
"hadoop-streaming.jar\"");
|
||||||
System.out.println();
|
System.out.println();
|
||||||
System.out.println("Example: $HSTREAMING -mapper " +
|
System.out.println("Example: $HSTREAMING -mapper " +
|
||||||
@ -648,9 +648,9 @@ public void fail(String message) {
|
|||||||
// --------------------------------------------
|
// --------------------------------------------
|
||||||
|
|
||||||
protected String getHadoopClientHome() {
|
protected String getHadoopClientHome() {
|
||||||
String h = env_.getProperty("HADOOP_PREFIX"); // standard Hadoop
|
String h = env_.getProperty("HADOOP_HOME"); // standard Hadoop
|
||||||
if (h == null) {
|
if (h == null) {
|
||||||
//fail("Missing required environment variable: HADOOP_PREFIX");
|
//fail("Missing required environment variable: HADOOP_HOME");
|
||||||
h = "UNDEF";
|
h = "UNDEF";
|
||||||
}
|
}
|
||||||
return h;
|
return h;
|
||||||
@ -674,8 +674,8 @@ protected String packageJobJar() throws IOException {
|
|||||||
// usually found in: build/contrib or build/hadoop-<version>-dev-streaming.jar
|
// usually found in: build/contrib or build/hadoop-<version>-dev-streaming.jar
|
||||||
|
|
||||||
// First try an explicit spec: it's too hard to find our own location in this case:
|
// First try an explicit spec: it's too hard to find our own location in this case:
|
||||||
// $HADOOP_PREFIX/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar
|
// $HADOOP_HOME/bin/hadoop jar /not/first/on/classpath/custom-hadoop-streaming.jar
|
||||||
// where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_PREFIX
|
// where findInClasspath() would find the version of hadoop-streaming.jar in $HADOOP_HOME
|
||||||
String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir
|
String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // jar or class dir
|
||||||
|
|
||||||
if (runtimeClasses == null) {
|
if (runtimeClasses == null) {
|
||||||
|
@ -26,8 +26,8 @@ function hadoop_usage
|
|||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -26,8 +26,8 @@ function hadoop_usage
|
|||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -51,8 +51,8 @@ function hadoop_usage
|
|||||||
|
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
|
@ -53,7 +53,7 @@ function hadoop_subproject_init
|
|||||||
|
|
||||||
hadoop_deprecate_envvar YARN_SLAVES HADOOP_SLAVES
|
hadoop_deprecate_envvar YARN_SLAVES HADOOP_SLAVES
|
||||||
|
|
||||||
HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_PREFIX}"
|
HADOOP_YARN_HOME="${HADOOP_YARN_HOME:-$HADOOP_HOME}"
|
||||||
|
|
||||||
# YARN-1429 added the completely superfluous YARN_USER_CLASSPATH
|
# YARN-1429 added the completely superfluous YARN_USER_CLASSPATH
|
||||||
# env var. We're going to override HADOOP_USER_CLASSPATH to keep
|
# env var. We're going to override HADOOP_USER_CLASSPATH to keep
|
||||||
@ -74,8 +74,8 @@ if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
|
|||||||
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
. "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
|
||||||
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
|
||||||
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
|
||||||
elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
|
elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
|
||||||
. "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
|
. "${HADOOP_HOME}/libexec/hadoop-config.sh"
|
||||||
else
|
else
|
||||||
echo "ERROR: Hadoop common not found." 2>&1
|
echo "ERROR: Hadoop common not found." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -21,8 +21,8 @@ function hadoop_usage
|
|||||||
}
|
}
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
@ -25,8 +25,8 @@ this="${BASH_SOURCE-$0}"
|
|||||||
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_PREFIX}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
else
|
else
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
||||||
fi
|
fi
|
||||||
|
@ -169,7 +169,7 @@ public void testContainerLaunchInvalidImage() throws IOException {
|
|||||||
String appSubmitter = "nobody";
|
String appSubmitter = "nobody";
|
||||||
String appId = "APP_ID";
|
String appId = "APP_ID";
|
||||||
String containerId = "CONTAINER_ID";
|
String containerId = "CONTAINER_ID";
|
||||||
String testImage = "testrepo.com/test-image rm -rf $HADOOP_PREFIX/*";
|
String testImage = "testrepo.com/test-image rm -rf $HADOOP_HOME/*";
|
||||||
|
|
||||||
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
|
Container container = mock(Container.class, RETURNS_DEEP_STUBS);
|
||||||
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
|
ContainerId cId = mock(ContainerId.class, RETURNS_DEEP_STUBS);
|
||||||
|
@ -142,7 +142,7 @@ Step 2. Pick a custom Docker image if you want. In this example, we'll use seque
|
|||||||
Step 3. Run.
|
Step 3. Run.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
hadoop jar $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar \
|
hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar \
|
||||||
teragen \
|
teragen \
|
||||||
-Dmapreduce.map.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \
|
-Dmapreduce.map.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \
|
||||||
-Dyarn.app.mapreduce.am.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \
|
-Dyarn.app.mapreduce.am.env="yarn.nodemanager.docker-container-executor.image-name=sequenceiq/hadoop-docker:2.4.1" \
|
||||||
|
Loading…
Reference in New Issue
Block a user