131 lines
3.8 KiB
Bash
Executable File
131 lines
3.8 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
# this work for additional information regarding copyright ownership.
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
# (the "License"); you may not use this file except in compliance with
|
|
# the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
# Stop hadoop dfs daemons.
|
|
# Run this on master node.
|
|
|
|
## @description usage info
|
|
## @audience private
|
|
## @stability evolving
|
|
## @replaceable no
|
|
function hadoop_usage
|
|
{
|
|
echo "Usage: stop-dfs.sh"
|
|
}
|
|
|
|
this="${BASH_SOURCE-$0}"
|
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
|
|
|
# let's locate libexec...
|
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
|
else
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
|
fi
|
|
|
|
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
|
# shellcheck disable=SC2034
|
|
HADOOP_NEW_CONFIG=true
|
|
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
|
|
. "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
|
|
else
|
|
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
|
|
exit 1
|
|
fi
|
|
|
|
#---------------------------------------------------------
|
|
# namenodes
|
|
|
|
NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -namenodes 2>/dev/null)
|
|
|
|
if [[ -z "${NAMENODES}" ]]; then
|
|
NAMENODES=$(hostname)
|
|
fi
|
|
|
|
echo "Stopping namenodes on [${NAMENODES}]"
|
|
|
|
hadoop_uservar_su hdfs namenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
--workers \
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
--hostnames "${NAMENODES}" \
|
|
--daemon stop \
|
|
namenode
|
|
|
|
#---------------------------------------------------------
|
|
# datanodes (using default workers file)
|
|
|
|
echo "Stopping datanodes"
|
|
|
|
hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
--workers \
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
--daemon stop \
|
|
datanode
|
|
|
|
#---------------------------------------------------------
|
|
# secondary namenodes (if any)
|
|
|
|
SECONDARY_NAMENODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -secondarynamenodes 2>/dev/null)
|
|
|
|
if [[ "${SECONDARY_NAMENODES}" == "0.0.0.0" ]]; then
|
|
SECONDARY_NAMENODES=$(hostname)
|
|
fi
|
|
|
|
if [[ -n "${SECONDARY_NAMENODES}" ]]; then
|
|
echo "Stopping secondary namenodes [${SECONDARY_NAMENODES}]"
|
|
|
|
hadoop_uservar_su hdfs secondarynamenode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
--workers \
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
--hostnames "${SECONDARY_NAMENODES}" \
|
|
--daemon stop \
|
|
secondarynamenode
|
|
fi
|
|
|
|
#---------------------------------------------------------
|
|
# quorumjournal nodes (if any)
|
|
|
|
JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
|
|
|
|
if [[ "{$JOURNAL_NODES-}" != $'\n' ]]; then
|
|
echo "Stopping journal nodes [${JOURNAL_NODES}]"
|
|
|
|
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
--workers \
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
--hostnames "${JOURNAL_NODES}" \
|
|
--daemon stop \
|
|
journalnode
|
|
fi
|
|
|
|
#---------------------------------------------------------
|
|
# ZK Failover controllers, if auto-HA is enabled
|
|
AUTOHA_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.ha.automatic-failover.enabled | tr '[:upper:]' '[:lower:]')
|
|
if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
|
|
echo "Stopping ZK Failover Controllers on NN hosts [${NAMENODES}]"
|
|
|
|
hadoop_uservar_su hdfs zkfc "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
--workers \
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
--hostnames "${NAMENODES}" \
|
|
--daemon stop \
|
|
zkfc
|
|
fi
|
|
|
|
# eof
|