HADOOP-13673. Update scripts to be smarter when running with privilege
Signed-off-by: Andrew Wang <wang@apache.org> Signed-off-by: Ravi Prakash <raviprak@apache.org>
This commit is contained in:
parent
acda1a5fee
commit
0eb4b513b7
@ -183,13 +183,24 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $# = 0 ]; then
|
# now that we have support code, let's abs MYNAME so we can use it later
|
||||||
|
MYNAME=$(hadoop_abs "${MYNAME}")
|
||||||
|
|
||||||
|
if [[ $# = 0 ]]; then
|
||||||
hadoop_exit_with_usage 1
|
hadoop_exit_with_usage 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
HADOOP_SUBCMD=$1
|
HADOOP_SUBCMD=$1
|
||||||
shift
|
shift
|
||||||
|
|
||||||
|
if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then
|
||||||
|
hadoop_uservar_su hadoop "${HADOOP_SUBCMD}" \
|
||||||
|
"${MYNAME}" \
|
||||||
|
"--reexec" \
|
||||||
|
"${HADOOP_USER_PARAMS[@]}"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||||
|
|
||||||
HADOOP_SUBCMD_ARGS=("$@")
|
HADOOP_SUBCMD_ARGS=("$@")
|
||||||
|
@ -41,6 +41,42 @@ function hadoop_debug
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## @description Given a filename or dir, return the absolute version of it
|
||||||
|
## @description This works as an alternative to readlink, which isn't
|
||||||
|
## @description portable.
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @param fsobj
|
||||||
|
## @replaceable no
|
||||||
|
## @return 0 success
|
||||||
|
## @return 1 failure
|
||||||
|
## @return stdout abspath
|
||||||
|
function hadoop_abs
|
||||||
|
{
|
||||||
|
declare obj=$1
|
||||||
|
declare dir
|
||||||
|
declare fn
|
||||||
|
declare dirret
|
||||||
|
|
||||||
|
if [[ ! -e ${obj} ]]; then
|
||||||
|
return 1
|
||||||
|
elif [[ -d ${obj} ]]; then
|
||||||
|
dir=${obj}
|
||||||
|
else
|
||||||
|
dir=$(dirname -- "${obj}")
|
||||||
|
fn=$(basename -- "${obj}")
|
||||||
|
fn="/${fn}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
|
||||||
|
dirret=$?
|
||||||
|
if [[ ${dirret} = 0 ]]; then
|
||||||
|
echo "${dir}${fn}"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
## @description Given variable $1 delete $2 from it
|
## @description Given variable $1 delete $2 from it
|
||||||
## @audience public
|
## @audience public
|
||||||
## @stability stable
|
## @stability stable
|
||||||
@ -79,6 +115,101 @@ function hadoop_verify_entry
|
|||||||
[[ ${!1} =~ \ ${2}\ ]]
|
[[ ${!1} =~ \ ${2}\ ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## @description Check if we are running with privilege
|
||||||
|
## @description by default, this implementation looks for
|
||||||
|
## @description EUID=0. For OSes that have true privilege
|
||||||
|
## @description separation, this should be something more complex
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable yes
|
||||||
|
## @return 1 = no priv
|
||||||
|
## @return 0 = priv
|
||||||
|
function hadoop_privilege_check
|
||||||
|
{
|
||||||
|
[[ "${EUID}" = 0 ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Execute a command via su when running as root
|
||||||
|
## @description if the given user is found or exit with
|
||||||
|
## @description failure if not.
|
||||||
|
## @description otherwise just run it. (This is intended to
|
||||||
|
## @description be used by the start-*/stop-* scripts.)
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable yes
|
||||||
|
## @param user
|
||||||
|
## @param commandstring
|
||||||
|
## @return exitstatus
|
||||||
|
function hadoop_su
|
||||||
|
{
|
||||||
|
declare user=$1
|
||||||
|
shift
|
||||||
|
declare idret
|
||||||
|
|
||||||
|
if hadoop_privilege_check; then
|
||||||
|
id -u "${user}" >/dev/null 2>&1
|
||||||
|
idret=$?
|
||||||
|
if [[ ${idret} != 0 ]]; then
|
||||||
|
hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
su -l "${user}" -- "$@"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
"$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Execute a command via su when running as root
|
||||||
|
## @description with extra support for commands that might
|
||||||
|
## @description legitimately start as root (e.g., datanode)
|
||||||
|
## @description (This is intended to
|
||||||
|
## @description be used by the start-*/stop-* scripts.)
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
|
## @param user
|
||||||
|
## @param commandstring
|
||||||
|
## @return exitstatus
|
||||||
|
function hadoop_uservar_su
|
||||||
|
{
|
||||||
|
|
||||||
|
## startup matrix:
|
||||||
|
#
|
||||||
|
# if $EUID != 0, then exec
|
||||||
|
# if $EUID =0 then
|
||||||
|
# if hdfs_subcmd_user is defined, call hadoop_su to exec
|
||||||
|
# if hdfs_subcmd_user is not defined, error
|
||||||
|
#
|
||||||
|
# For secure daemons, this means both the secure and insecure env vars need to be
|
||||||
|
# defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
|
||||||
|
# This function will pick up the "normal" var, switch to that user, then
|
||||||
|
# execute the command which will then pick up the "secure" version.
|
||||||
|
#
|
||||||
|
|
||||||
|
declare program=$1
|
||||||
|
declare command=$2
|
||||||
|
shift 2
|
||||||
|
|
||||||
|
declare uprogram
|
||||||
|
declare ucommand
|
||||||
|
declare uvar
|
||||||
|
|
||||||
|
if hadoop_privilege_check; then
|
||||||
|
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||||
|
|
||||||
|
if [[ -n "${!uvar}" ]]; then
|
||||||
|
hadoop_su "${!uvar}" "$@"
|
||||||
|
else
|
||||||
|
hadoop_error "ERROR: Attempting to launch ${program} ${command} as root"
|
||||||
|
hadoop_error "ERROR: but there is no ${uvar} defined. Aborting launch."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
"$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
## @description Add a subcommand to the usage output
|
## @description Add a subcommand to the usage output
|
||||||
## @audience private
|
## @audience private
|
||||||
## @stability evolving
|
## @stability evolving
|
||||||
@ -343,6 +474,9 @@ function hadoop_bootstrap
|
|||||||
# daemonization
|
# daemonization
|
||||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
|
||||||
|
|
||||||
|
# by default, we have not been self-re-execed
|
||||||
|
HADOOP_REEXECED_CMD=false
|
||||||
|
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
HADOOP_SUBCMD_SECURESERVICE=false
|
HADOOP_SUBCMD_SECURESERVICE=false
|
||||||
|
|
||||||
@ -624,9 +758,10 @@ function hadoop_basic_init
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# if for some reason the shell doesn't have $USER defined
|
# if for some reason the shell doesn't have $USER defined
|
||||||
# let's define it as 'hadoop'
|
# (e.g., ssh'd in to execute a command)
|
||||||
|
# let's get the effective username and use that
|
||||||
|
USER=${USER:-$(id -nu)}
|
||||||
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
|
||||||
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
|
|
||||||
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
|
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
|
||||||
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
|
||||||
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
|
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
|
||||||
@ -1400,8 +1535,7 @@ function hadoop_verify_secure_prereq
|
|||||||
# and you are using pfexec, you'll probably want to change
|
# and you are using pfexec, you'll probably want to change
|
||||||
# this.
|
# this.
|
||||||
|
|
||||||
# ${EUID} comes from the shell itself!
|
if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
||||||
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
||||||
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
|
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
@ -1994,20 +2128,18 @@ function hadoop_secure_daemon_handler
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
## @description Verify that ${USER} is allowed to execute the
|
## @description Get the environment variable used to validate users
|
||||||
## @description given subcommand.
|
|
||||||
## @audience public
|
## @audience public
|
||||||
## @stability stable
|
## @stability stable
|
||||||
## @replaceable yes
|
## @replaceable yes
|
||||||
## @param subcommand
|
## @param subcommand
|
||||||
## @return will exit on failure conditions
|
## @return string
|
||||||
function hadoop_verify_user
|
function hadoop_get_verify_uservar
|
||||||
{
|
{
|
||||||
declare program=$1
|
declare program=$1
|
||||||
declare command=$2
|
declare command=$2
|
||||||
declare uprogram
|
declare uprogram
|
||||||
declare ucommand
|
declare ucommand
|
||||||
declare uvar
|
|
||||||
|
|
||||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||||
@ -2018,7 +2150,25 @@ function hadoop_verify_user
|
|||||||
ucommand=${command^^}
|
ucommand=${command^^}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
uvar="${uprogram}_${ucommand}_USER"
|
echo "${uprogram}_${ucommand}_USER"
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Verify that ${USER} is allowed to execute the
|
||||||
|
## @description given subcommand.
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable yes
|
||||||
|
## @param command
|
||||||
|
## @param subcommand
|
||||||
|
## @return return 0 on success
|
||||||
|
## @return exit 1 on failure
|
||||||
|
function hadoop_verify_user
|
||||||
|
{
|
||||||
|
declare program=$1
|
||||||
|
declare command=$2
|
||||||
|
declare uvar
|
||||||
|
|
||||||
|
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||||
|
|
||||||
if [[ -n ${!uvar} ]]; then
|
if [[ -n ${!uvar} ]]; then
|
||||||
if [[ ${!uvar} != "${USER}" ]]; then
|
if [[ ${!uvar} != "${USER}" ]]; then
|
||||||
@ -2026,6 +2176,42 @@ function hadoop_verify_user
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Verify that ${USER} is allowed to execute the
|
||||||
|
## @description given subcommand.
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable yes
|
||||||
|
## @param subcommand
|
||||||
|
## @return 1 on no re-exec needed
|
||||||
|
## @return 0 on need to re-exec
|
||||||
|
function hadoop_need_reexec
|
||||||
|
{
|
||||||
|
declare program=$1
|
||||||
|
declare command=$2
|
||||||
|
declare uvar
|
||||||
|
|
||||||
|
# we've already been re-execed, bail
|
||||||
|
|
||||||
|
if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if we have privilege, and the _USER is defined, and _USER is
|
||||||
|
# set to someone who isn't us, then yes, we should re-exec.
|
||||||
|
# otherwise no, don't re-exec and let the system deal with it.
|
||||||
|
|
||||||
|
if hadoop_privilege_check; then
|
||||||
|
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
|
||||||
|
if [[ -n ${!uvar} ]]; then
|
||||||
|
if [[ ${!uvar} != "${USER}" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
|
## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
|
||||||
@ -2228,6 +2414,15 @@ function hadoop_parse_args
|
|||||||
shift
|
shift
|
||||||
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
|
||||||
;;
|
;;
|
||||||
|
--reexec)
|
||||||
|
shift
|
||||||
|
if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
|
||||||
|
hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
HADOOP_REEXECED_CMD=true
|
||||||
|
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
|
||||||
|
;;
|
||||||
--workers)
|
--workers)
|
||||||
shift
|
shift
|
||||||
# shellcheck disable=SC2034
|
# shellcheck disable=SC2034
|
||||||
|
@ -15,10 +15,14 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
echo "This script is deprecated. Use start-dfs.sh and start-yarn.sh instead."
|
## @description catch the ctrl-c
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_abort_startall()
|
||||||
|
{
|
||||||
exit 1
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_HOME}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
@ -38,6 +42,16 @@ else
|
|||||||
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
|
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! hadoop_privilege_check; then
|
||||||
|
trap hadoop_abort_startall INT
|
||||||
|
hadoop_error "WARNING: Attempting to start all Apache Hadoop daemons as ${USER} in 10 seconds."
|
||||||
|
hadoop_error "WARNING: This is not a recommended production deployment configuration."
|
||||||
|
hadoop_error "WARNING: Use CTRL-C to abort."
|
||||||
|
sleep 10
|
||||||
|
trap - INT
|
||||||
|
fi
|
||||||
|
|
||||||
# start hdfs daemons if hdfs is present
|
# start hdfs daemons if hdfs is present
|
||||||
if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
|
if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
|
||||||
"${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
|
"${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
|
||||||
@ -49,4 +63,3 @@ if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,12 +15,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
## @description catch the ctrl-c
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_abort_stopall()
|
||||||
|
{
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
# Stop all hadoop daemons. Run this on master node.
|
# Stop all hadoop daemons. Run this on master node.
|
||||||
|
|
||||||
echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
if [[ -n "${HADOOP_HOME}" ]]; then
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||||
@ -40,6 +45,14 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! hadoop_privilege_check; then
|
||||||
|
trap hadoop_abort_stopall INT
|
||||||
|
hadoop_error "WARNING: Stopping all Apache Hadoop daemons as ${USER} in 10 seconds."
|
||||||
|
hadoop_error "WARNING: Use CTRL-C to abort."
|
||||||
|
sleep 10
|
||||||
|
trap - INT
|
||||||
|
fi
|
||||||
|
|
||||||
# stop hdfs daemons if hdfs is present
|
# stop hdfs daemons if hdfs is present
|
||||||
if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
|
if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
|
||||||
"${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"
|
"${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"
|
||||||
|
@ -103,6 +103,15 @@ In addition, daemons that run in an extra security mode also support `(command)_
|
|||||||
|
|
||||||
Apache Hadoop provides a way to do a user check per-subcommand. While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents. For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable. This also works for non-daemons. Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
|
Apache Hadoop provides a way to do a user check per-subcommand. While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents. For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable. This also works for non-daemons. Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
|
||||||
|
|
||||||
|
If a \_USER environment variable exists and commands are run with a privilege (e.g., as root; see hadoop_privilege_check in the API documentation), execution will switch to the specified user. For commands that support user account switching for security and therefore have a SECURE\_USER variable, the base \_USER variable needs to be the user that is expected to be used to switch to the SECURE\_USER account. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
HDFS_DATANODE_USER=root
|
||||||
|
HDFS_DATANODE_SECURE_USER=hdfs
|
||||||
|
```
|
||||||
|
|
||||||
|
Be aware that if the \-\-workers flag is used, the user switch happens *after* ssh is invoked. The multi-daemon start and stop commands in sbin will, however, switch (if appropriate) prior and will therefore use the keys of the specified \_USER.
|
||||||
|
|
||||||
## Developer and Advanced Administrator Environment
|
## Developer and Advanced Administrator Environment
|
||||||
|
|
||||||
### Shell Profiles
|
### Shell Profiles
|
||||||
|
@ -16,9 +16,9 @@
|
|||||||
|
|
||||||
setup() {
|
setup() {
|
||||||
|
|
||||||
TMP="${BATS_TEST_DIRNAME}/../../../target/test-dir/bats.$$.${RANDOM}"
|
RELTMP="${BATS_TEST_DIRNAME}/../../../target/test-dir/bats.$$.${RANDOM}"
|
||||||
mkdir -p ${TMP}
|
mkdir -p ${RELTMP}
|
||||||
TMP=$(cd -P -- "${TMP}" >/dev/null && pwd -P)
|
TMP=$(cd -P -- "${RELTMP}" >/dev/null && pwd -P)
|
||||||
export TMP
|
export TMP
|
||||||
TESTBINDIR=$(cd -P -- "$(pwd)" >/dev/null && pwd -P)
|
TESTBINDIR=$(cd -P -- "$(pwd)" >/dev/null && pwd -P)
|
||||||
HADOOP_LIBEXEC_DIR=${TESTBINDIR}/../../main/bin
|
HADOOP_LIBEXEC_DIR=${TESTBINDIR}/../../main/bin
|
||||||
|
@ -0,0 +1,65 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
create_fake () {
|
||||||
|
mkdir ${TMP}/j
|
||||||
|
touch ${TMP}/j/k
|
||||||
|
ln -s j ${TMP}/l
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@test "hadoop_abs (simple not exist)" {
|
||||||
|
run hadoop_abs fake
|
||||||
|
[ "${status}" -eq 1 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (simple dir)" {
|
||||||
|
create_fake
|
||||||
|
run hadoop_abs "${TMP}/j"
|
||||||
|
[ "${output}" = "${TMP}/j" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (simple file)" {
|
||||||
|
create_fake
|
||||||
|
run hadoop_abs "${TMP}/j/k"
|
||||||
|
[ "${output}" = "${TMP}/j/k" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (relative file1)" {
|
||||||
|
create_fake
|
||||||
|
run hadoop_abs "${TMP}/j/../j/k"
|
||||||
|
[ "${output}" = "${TMP}/j/k" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (relative file2)" {
|
||||||
|
create_fake
|
||||||
|
run hadoop_abs "${RELTMP}/j/../j/k"
|
||||||
|
[ "${output}" = "${TMP}/j/k" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (relative dir)" {
|
||||||
|
create_fake
|
||||||
|
fred=$(cd -P -- ".." >/dev/null && pwd -P)
|
||||||
|
run hadoop_abs ".."
|
||||||
|
[ "${output}" = "${fred}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_abs (symlink)" {
|
||||||
|
create_fake
|
||||||
|
run hadoop_abs "${TMP}/l"
|
||||||
|
[ "${output}" = "${TMP}/j" ]
|
||||||
|
}
|
@ -0,0 +1,21 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_get_verify_uservar" {
|
||||||
|
run hadoop_get_verify_uservar cool program
|
||||||
|
[ "${output}" = "COOL_PROGRAM_USER" ]
|
||||||
|
}
|
@ -0,0 +1,26 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_privilege_check " {
|
||||||
|
if [ ${EUID} = 0 ]; then
|
||||||
|
result=0
|
||||||
|
else
|
||||||
|
result=1
|
||||||
|
fi
|
||||||
|
run hadoop_privilege_check
|
||||||
|
[ "${status}" = "${result}" ]
|
||||||
|
}
|
@ -239,6 +239,9 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# now that we have support code, let's abs MYNAME so we can use it later
|
||||||
|
MYNAME=$(hadoop_abs "${MYNAME}")
|
||||||
|
|
||||||
if [[ $# = 0 ]]; then
|
if [[ $# = 0 ]]; then
|
||||||
hadoop_exit_with_usage 1
|
hadoop_exit_with_usage 1
|
||||||
fi
|
fi
|
||||||
@ -246,6 +249,14 @@ fi
|
|||||||
HADOOP_SUBCMD=$1
|
HADOOP_SUBCMD=$1
|
||||||
shift
|
shift
|
||||||
|
|
||||||
|
if hadoop_need_reexec hdfs "${HADOOP_SUBCMD}"; then
|
||||||
|
hadoop_uservar_su hdfs "${HADOOP_SUBCMD}" \
|
||||||
|
"${MYNAME}" \
|
||||||
|
"--reexec" \
|
||||||
|
"${HADOOP_USER_PARAMS[@]}"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||||
|
|
||||||
HADOOP_SUBCMD_ARGS=("$@")
|
HADOOP_SUBCMD_ARGS=("$@")
|
||||||
|
@ -20,6 +20,21 @@
|
|||||||
# Optinally upgrade or rollback dfs state.
|
# Optinally upgrade or rollback dfs state.
|
||||||
# Run this on master node.
|
# Run this on master node.
|
||||||
|
|
||||||
|
## startup matrix:
|
||||||
|
#
|
||||||
|
# if $EUID != 0, then exec
|
||||||
|
# if $EUID =0 then
|
||||||
|
# if hdfs_subcmd_user is defined, su to that user, exec
|
||||||
|
# if hdfs_subcmd_user is not defined, error
|
||||||
|
#
|
||||||
|
# For secure daemons, this means both the secure and insecure env vars need to be
|
||||||
|
# defined. e.g., HDFS_DATANODE_USER=root HADOOP_SECURE_DN_USER=hdfs
|
||||||
|
#
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
function hadoop_usage
|
function hadoop_usage
|
||||||
{
|
{
|
||||||
echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
|
echo "Usage: start-dfs.sh [-upgrade|-rollback] [-clusterId]"
|
||||||
@ -45,7 +60,6 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# get arguments
|
# get arguments
|
||||||
if [[ $# -ge 1 ]]; then
|
if [[ $# -ge 1 ]]; then
|
||||||
startOpt="$1"
|
startOpt="$1"
|
||||||
@ -77,32 +91,25 @@ if [[ -z "${NAMENODES}" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Starting namenodes on [${NAMENODES}]"
|
echo "Starting namenodes on [${NAMENODES}]"
|
||||||
|
hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${NAMENODES}" \
|
--hostnames "${NAMENODES}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
namenode ${nameStartOpt}
|
namenode ${nameStartOpt}
|
||||||
|
|
||||||
|
HADOOP_JUMBO_RETCOUNTER=$?
|
||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# datanodes (using default workers file)
|
# datanodes (using default workers file)
|
||||||
|
|
||||||
if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
|
|
||||||
[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
||||||
hadoop_error "ERROR: Attempting to start secure cluster, skipping datanodes. "
|
|
||||||
hadoop_error "ERROR: Run start-secure-dns.sh as root or configure "
|
|
||||||
hadoop_error "ERROR: \${HADOOP_SECURE_COMMAND} to complete startup."
|
|
||||||
else
|
|
||||||
|
|
||||||
echo "Starting datanodes"
|
echo "Starting datanodes"
|
||||||
|
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
datanode ${dataStartOpt}
|
datanode ${dataStartOpt}
|
||||||
fi
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# secondary namenodes (if any)
|
# secondary namenodes (if any)
|
||||||
@ -113,8 +120,8 @@ if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
|||||||
|
|
||||||
if [[ "${NAMENODES}" =~ , ]]; then
|
if [[ "${NAMENODES}" =~ , ]]; then
|
||||||
|
|
||||||
hadoop_error "ERROR: Highly available NameNode is configured."
|
hadoop_error "WARNING: Highly available NameNode is configured."
|
||||||
hadoop_error "ERROR: Skipping SecondaryNameNode."
|
hadoop_error "WARNING: Skipping SecondaryNameNode."
|
||||||
|
|
||||||
else
|
else
|
||||||
|
|
||||||
@ -124,12 +131,13 @@ if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
|||||||
|
|
||||||
echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
|
echo "Starting secondary namenodes [${SECONDARY_NAMENODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${SECONDARY_NAMENODES}" \
|
--hostnames "${SECONDARY_NAMENODES}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
secondarynamenode
|
secondarynamenode
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -143,12 +151,13 @@ case "${SHARED_EDITS_DIR}" in
|
|||||||
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
|
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
|
||||||
echo "Starting journal nodes [${JOURNAL_NODES}]"
|
echo "Starting journal nodes [${JOURNAL_NODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${JOURNAL_NODES}" \
|
--hostnames "${JOURNAL_NODES}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
journalnode
|
journalnode
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
@ -158,12 +167,15 @@ AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automati
|
|||||||
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
||||||
echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
echo "Starting ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${NAMENODES}" \
|
--hostnames "${NAMENODES}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
zkfc
|
zkfc
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
exit ${HADOOP_JUMBO_RETCOUNTER}
|
||||||
|
|
||||||
# eof
|
# eof
|
||||||
|
17
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
Normal file → Executable file
17
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-secure-dns.sh
Normal file → Executable file
@ -17,8 +17,12 @@
|
|||||||
|
|
||||||
# Run as root to start secure datanodes in a security-enabled cluster.
|
# Run as root to start secure datanodes in a security-enabled cluster.
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
function hadoop_usage {
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_usage()
|
||||||
|
{
|
||||||
echo "Usage: start-secure-dns.sh"
|
echo "Usage: start-secure-dns.sh"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,12 +46,9 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
|
echo "Starting datanodes"
|
||||||
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
|
||||||
--workers \
|
--workers \
|
||||||
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
datanode
|
datanode
|
||||||
else
|
|
||||||
echo hadoop_usage_and_exit 1
|
|
||||||
fi
|
|
||||||
|
@ -19,9 +19,13 @@
|
|||||||
# Stop hadoop dfs daemons.
|
# Stop hadoop dfs daemons.
|
||||||
# Run this on master node.
|
# Run this on master node.
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
function hadoop_usage
|
function hadoop_usage
|
||||||
{
|
{
|
||||||
echo "Usage: stop-dfs.sh [-upgrade|-rollback] [-clusterId]"
|
echo "Usage: stop-dfs.sh"
|
||||||
}
|
}
|
||||||
|
|
||||||
this="${BASH_SOURCE-$0}"
|
this="${BASH_SOURCE-$0}"
|
||||||
@ -55,7 +59,7 @@ fi
|
|||||||
|
|
||||||
echo "Stopping namenodes on [${NAMENODES}]"
|
echo "Stopping namenodes on [${NAMENODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${NAMENODES}" \
|
--hostnames "${NAMENODES}" \
|
||||||
@ -65,21 +69,13 @@ echo "Stopping namenodes on [${NAMENODES}]"
|
|||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# datanodes (using default workers file)
|
# datanodes (using default workers file)
|
||||||
|
|
||||||
if [[ -n "${HADOOP_SECURE_DN_USER}" ]] &&
|
|
||||||
[[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
|
|
||||||
echo "ERROR: Attempting to stop secure cluster, skipping datanodes. "
|
|
||||||
echo "Run stop-secure-dns.sh as root or configure "
|
|
||||||
echo "\${HADOOP_SECURE_COMMAND} to complete stop."
|
|
||||||
else
|
|
||||||
|
|
||||||
echo "Stopping datanodes"
|
echo "Stopping datanodes"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon stop \
|
--daemon stop \
|
||||||
datanode
|
datanode
|
||||||
fi
|
|
||||||
|
|
||||||
#---------------------------------------------------------
|
#---------------------------------------------------------
|
||||||
# secondary namenodes (if any)
|
# secondary namenodes (if any)
|
||||||
@ -93,7 +89,7 @@ fi
|
|||||||
if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
||||||
echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
|
echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${SECONDARY_NAMENODES}" \
|
--hostnames "${SECONDARY_NAMENODES}" \
|
||||||
@ -111,7 +107,7 @@ case "${SHARED_EDITS_DIR}" in
|
|||||||
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
|
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
|
||||||
echo "Stopping journal nodes [${JOURNAL_NODES}]"
|
echo "Stopping journal nodes [${JOURNAL_NODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${JOURNAL_NODES}" \
|
--hostnames "${JOURNAL_NODES}" \
|
||||||
@ -126,7 +122,7 @@ AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automati
|
|||||||
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
||||||
echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
||||||
|
|
||||||
"${HADOOP_HDFS_HOME}/bin/hdfs" \
|
hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
--workers \
|
--workers \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--hostnames "${NAMENODES}" \
|
--hostnames "${NAMENODES}" \
|
||||||
|
16
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
Normal file → Executable file
16
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-secure-dns.sh
Normal file → Executable file
@ -17,8 +17,12 @@
|
|||||||
|
|
||||||
# Run as root to stop secure datanodes in a security-enabled cluster.
|
# Run as root to stop secure datanodes in a security-enabled cluster.
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
function hadoop_usage {
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_usage()
|
||||||
|
{
|
||||||
echo "Usage: stop-secure-dns.sh"
|
echo "Usage: stop-secure-dns.sh"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,12 +46,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${EUID}" -eq 0 ]] && [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
|
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
||||||
exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
|
||||||
--workers \
|
--workers \
|
||||||
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon stop \
|
--daemon stop \
|
||||||
datanode
|
datanode
|
||||||
else
|
|
||||||
echo hadoop_usage_and_exit 1
|
|
||||||
fi
|
|
||||||
|
@ -121,6 +121,8 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# now that we have support code, let's abs MYNAME so we can use it later
|
||||||
|
MYNAME=$(hadoop_abs "${MYNAME}")
|
||||||
|
|
||||||
if [ $# = 0 ]; then
|
if [ $# = 0 ]; then
|
||||||
hadoop_exit_with_usage 1
|
hadoop_exit_with_usage 1
|
||||||
@ -129,6 +131,14 @@ fi
|
|||||||
HADOOP_SUBCMD=$1
|
HADOOP_SUBCMD=$1
|
||||||
shift
|
shift
|
||||||
|
|
||||||
|
if hadoop_need_reexec mapred "${HADOOP_SUBCMD}"; then
|
||||||
|
hadoop_uservar_su mapred "${HADOOP_SUBCMD}" \
|
||||||
|
"${MYNAME}" \
|
||||||
|
"--reexec" \
|
||||||
|
"${HADOOP_USER_PARAMS[@]}"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||||
|
|
||||||
HADOOP_SUBCMD_ARGS=("$@")
|
HADOOP_SUBCMD_ARGS=("$@")
|
||||||
|
@ -15,14 +15,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
MYNAME="${BASH_SOURCE-$0}"
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
function hadoop_usage
|
function hadoop_usage
|
||||||
{
|
{
|
||||||
hadoop_generate_usage "${MYNAME}" false
|
hadoop_generate_usage "${MYNAME}" false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MYNAME="${BASH_SOURCE-$0}"
|
||||||
|
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
@ -42,14 +45,17 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
HADOOP_JUMBO_RETCOUNTER=0
|
||||||
|
|
||||||
# start resourceManager
|
# start resourceManager
|
||||||
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
|
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
|
||||||
if [[ ${HARM} = "false" ]]; then
|
if [[ ${HARM} = "false" ]]; then
|
||||||
echo "Starting resourcemanager"
|
echo "Starting resourcemanager"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
resourcemanager
|
resourcemanager
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
else
|
else
|
||||||
logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
|
logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
|
||||||
logicals=${logicals//,/ }
|
logicals=${logicals//,/ }
|
||||||
@ -59,30 +65,35 @@ else
|
|||||||
RMHOSTS="${RMHOSTS} ${rmhost}"
|
RMHOSTS="${RMHOSTS} ${rmhost}"
|
||||||
done
|
done
|
||||||
echo "Starting resourcemanagers on [${RMHOSTS}]"
|
echo "Starting resourcemanagers on [${RMHOSTS}]"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
--workers \
|
--workers \
|
||||||
--hostnames "${RMHOSTS}" \
|
--hostnames "${RMHOSTS}" \
|
||||||
resourcemanager
|
resourcemanager
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# start nodemanager
|
# start nodemanager
|
||||||
echo "Starting nodemanagers"
|
echo "Starting nodemanagers"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--workers \
|
--workers \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
nodemanager
|
nodemanager
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
|
|
||||||
|
|
||||||
# start proxyserver
|
# start proxyserver
|
||||||
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.web-proxy.address 2>&- | cut -f1 -d:)
|
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.web-proxy.address 2>&- | cut -f1 -d:)
|
||||||
if [[ -n ${PROXYSERVER} ]]; then
|
if [[ -n ${PROXYSERVER} ]]; then
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--workers \
|
--workers \
|
||||||
--hostnames "${PROXYSERVER}" \
|
--hostnames "${PROXYSERVER}" \
|
||||||
--daemon start \
|
--daemon start \
|
||||||
proxyserver
|
proxyserver
|
||||||
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
exit ${HADOOP_JUMBO_RETCOUNTER}
|
||||||
|
@ -15,14 +15,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
## @description usage info
|
||||||
MYNAME="${BASH_SOURCE-$0}"
|
## @audience private
|
||||||
|
## @stability evolving
|
||||||
|
## @replaceable no
|
||||||
function hadoop_usage
|
function hadoop_usage
|
||||||
{
|
{
|
||||||
hadoop_generate_usage "${MYNAME}" false
|
hadoop_generate_usage "${MYNAME}" false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MYNAME="${BASH_SOURCE-$0}"
|
||||||
|
|
||||||
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
||||||
|
|
||||||
# let's locate libexec...
|
# let's locate libexec...
|
||||||
@ -44,7 +47,7 @@ fi
|
|||||||
|
|
||||||
# stop nodemanager
|
# stop nodemanager
|
||||||
echo "Stopping nodemanagers"
|
echo "Stopping nodemanagers"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--workers \
|
--workers \
|
||||||
--daemon stop \
|
--daemon stop \
|
||||||
@ -54,7 +57,7 @@ echo "Stopping nodemanagers"
|
|||||||
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
|
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
|
||||||
if [[ ${HARM} = "false" ]]; then
|
if [[ ${HARM} = "false" ]]; then
|
||||||
echo "Stopping resourcemanager"
|
echo "Stopping resourcemanager"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon stop \
|
--daemon stop \
|
||||||
resourcemanager
|
resourcemanager
|
||||||
@ -67,7 +70,7 @@ else
|
|||||||
RMHOSTS="${RMHOSTS} ${rmhost}"
|
RMHOSTS="${RMHOSTS} ${rmhost}"
|
||||||
done
|
done
|
||||||
echo "Stopping resourcemanagers on [${RMHOSTS}]"
|
echo "Stopping resourcemanagers on [${RMHOSTS}]"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--daemon stop \
|
--daemon stop \
|
||||||
--workers \
|
--workers \
|
||||||
@ -79,7 +82,7 @@ fi
|
|||||||
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.web-proxy.address 2>&- | cut -f1 -d:)
|
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.web-proxy.address 2>&- | cut -f1 -d:)
|
||||||
if [[ -n ${PROXYSERVER} ]]; then
|
if [[ -n ${PROXYSERVER} ]]; then
|
||||||
echo "Stopping proxy server [${PROXYSERVER}]"
|
echo "Stopping proxy server [${PROXYSERVER}]"
|
||||||
"${HADOOP_YARN_HOME}/bin/yarn" \
|
hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
|
||||||
--config "${HADOOP_CONF_DIR}" \
|
--config "${HADOOP_CONF_DIR}" \
|
||||||
--workers \
|
--workers \
|
||||||
--hostnames "${PROXYSERVER}" \
|
--hostnames "${PROXYSERVER}" \
|
||||||
|
@ -219,6 +219,9 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# now that we have support code, let's abs MYNAME so we can use it later
|
||||||
|
MYNAME=$(hadoop_abs "${MYNAME}")
|
||||||
|
|
||||||
# if no args specified, show usage
|
# if no args specified, show usage
|
||||||
if [[ $# = 0 ]]; then
|
if [[ $# = 0 ]]; then
|
||||||
hadoop_exit_with_usage 1
|
hadoop_exit_with_usage 1
|
||||||
@ -228,6 +231,14 @@ fi
|
|||||||
HADOOP_SUBCMD=$1
|
HADOOP_SUBCMD=$1
|
||||||
shift
|
shift
|
||||||
|
|
||||||
|
if hadoop_need_reexec yarn "${HADOOP_SUBCMD}"; then
|
||||||
|
hadoop_uservar_su yarn "${HADOOP_SUBCMD}" \
|
||||||
|
"${MYNAME}" \
|
||||||
|
"--reexec" \
|
||||||
|
"${HADOOP_USER_PARAMS[@]}"
|
||||||
|
exit $?
|
||||||
|
fi
|
||||||
|
|
||||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||||
|
|
||||||
HADOOP_SUBCMD_ARGS=("$@")
|
HADOOP_SUBCMD_ARGS=("$@")
|
||||||
|
Loading…
Reference in New Issue
Block a user