2017-06-15 17:02:59 +00:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
# Start hadoop hdfs and ozone daemons.
|
|
|
|
# Run this on master node.
|
2017-11-09 18:08:58 +00:00
|
|
|
## @description usage info
|
|
|
|
## @audience private
|
|
|
|
## @stability evolving
|
|
|
|
## @replaceable no
|
2017-06-15 17:02:59 +00:00
|
|
|
function hadoop_usage
|
|
|
|
{
|
|
|
|
echo "Usage: start-ozone.sh"
|
|
|
|
}
|
|
|
|
|
|
|
|
this="${BASH_SOURCE-$0}"
|
|
|
|
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
|
|
|
|
|
|
|
|
# let's locate libexec...
|
|
|
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
|
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
|
|
|
else
|
|
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
|
|
|
fi
|
|
|
|
|
|
|
|
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
|
|
|
# shellcheck disable=SC2034
|
|
|
|
HADOOP_NEW_CONFIG=true
|
|
|
|
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
|
|
|
|
# shellcheck disable=SC1090
|
|
|
|
. "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
|
|
|
|
else
|
|
|
|
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2017-10-31 21:32:36 +00:00
|
|
|
SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
|
|
|
|
SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
|
|
|
|
|
|
|
|
if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
|
|
|
|
echo "Ozone is not supported in a security enabled cluster."
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2017-06-15 17:02:59 +00:00
|
|
|
#---------------------------------------------------------
|
|
|
|
# Check if ozone is enabled
|
|
|
|
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
|
|
|
|
if [[ "${OZONE_ENABLED}" != "true" ]]; then
|
|
|
|
echo "Operation is not supported because ozone is not enabled."
|
|
|
|
exit -1
|
|
|
|
fi
|
|
|
|
|
|
|
|
#---------------------------------------------------------
|
|
|
|
# Start hdfs before starting ozone daemons
|
|
|
|
if [[ -f "${bin}/start-dfs.sh" ]]; then
|
|
|
|
"${bin}/start-dfs.sh"
|
|
|
|
else
|
|
|
|
echo "ERROR: Cannot execute ${bin}/start-dfs.sh." 2>&1
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
|
|
|
#---------------------------------------------------------
|
|
|
|
# Ozone keyspacemanager nodes
|
|
|
|
KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -keyspacemanagers 2>/dev/null)
|
|
|
|
echo "Starting key space manager nodes [${KSM_NODES}]"
|
|
|
|
if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
|
|
|
|
KSM_NODES=$(hostname)
|
|
|
|
fi
|
|
|
|
|
|
|
|
hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
|
--workers \
|
|
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
|
|
--hostnames "${KSM_NODES}" \
|
|
|
|
--daemon start \
|
|
|
|
ksm
|
|
|
|
|
|
|
|
HADOOP_JUMBO_RETCOUNTER=$?
|
|
|
|
|
|
|
|
#---------------------------------------------------------
|
|
|
|
# Ozone storagecontainermanager nodes
|
|
|
|
SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -storagecontainermanagers 2>/dev/null)
|
|
|
|
echo "Starting storage container manager nodes [${SCM_NODES}]"
|
|
|
|
hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/hdfs" \
|
|
|
|
--workers \
|
|
|
|
--config "${HADOOP_CONF_DIR}" \
|
|
|
|
--hostnames "${SCM_NODES}" \
|
|
|
|
--daemon start \
|
|
|
|
scm
|
|
|
|
|
|
|
|
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
|
|
|
|
|
|
|
|
exit ${HADOOP_JUMBO_RETCOUNTER}
|