2011-06-12 22:00:51 +00:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
# contributor license agreements. See the NOTICE file distributed with
|
|
|
|
# this work for additional information regarding copyright ownership.
|
|
|
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
# (the "License"); you may not use this file except in compliance with
|
|
|
|
# the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2015-07-16 23:58:11 +00:00
|
|
|
MYNAME="${BASH_SOURCE-$0}"
|
2016-03-28 16:00:07 +00:00
|
|
|
HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
|
2015-07-16 23:58:11 +00:00
|
|
|
|
2016-03-28 16:00:07 +00:00
|
|
|
## @description build up the hdfs command's usage text.
|
|
|
|
## @audience public
|
|
|
|
## @stability stable
|
|
|
|
## @replaceable no
|
2014-08-19 12:11:17 +00:00
|
|
|
function hadoop_usage
|
|
|
|
{
|
2015-07-31 21:32:21 +00:00
|
|
|
hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
|
|
|
|
hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
|
2016-06-28 12:53:03 +00:00
|
|
|
hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode"
|
2015-07-31 21:32:21 +00:00
|
|
|
hadoop_add_option "--loglevel level" "set the log4j level for this command"
|
2016-06-28 12:53:03 +00:00
|
|
|
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
|
|
|
hadoop_add_option "--workers" "turn on worker mode"
|
2015-07-31 21:32:21 +00:00
|
|
|
|
2017-08-02 17:17:40 +00:00
|
|
|
hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
|
|
|
|
hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
|
|
|
|
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
|
|
|
hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
|
|
|
|
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
|
|
|
|
hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands"
|
|
|
|
hadoop_add_subcommand "dfs" client "run a filesystem command on the file system"
|
|
|
|
hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
|
2017-10-07 00:31:53 +00:00
|
|
|
hadoop_add_subcommand "dfsrouter" daemon "run the DFS router"
|
|
|
|
hadoop_add_subcommand "dfsrouteradmin" admin "manage Router-based federation"
|
2017-08-02 17:17:40 +00:00
|
|
|
hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
|
|
|
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
|
|
|
hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
|
|
|
|
hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
|
|
|
|
hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
|
|
|
|
hadoop_add_subcommand "getconf" client "get config values from configuration"
|
|
|
|
hadoop_add_subcommand "groups" client "get the groups which users belong to"
|
|
|
|
hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
|
|
|
|
hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
|
|
|
|
hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
|
|
|
|
hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user"
|
|
|
|
hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types"
|
|
|
|
hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
|
|
|
|
hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway"
|
|
|
|
hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file"
|
|
|
|
hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
|
|
|
|
hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
|
|
|
|
hadoop_add_subcommand "portmap" daemon "run a portmap service"
|
|
|
|
hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
|
|
|
|
hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
|
|
|
|
hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
|
|
|
|
hadoop_add_subcommand "version" client "print the version"
|
|
|
|
hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
|
2016-03-28 16:00:07 +00:00
|
|
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
|
|
|
|
}
|
|
|
|
|
|
|
|
## @description Default command handler for hadoop command
|
|
|
|
## @audience public
|
|
|
|
## @stability stable
|
|
|
|
## @replaceable no
|
|
|
|
## @param CLI arguments
|
|
|
|
function hdfscmd_case
|
|
|
|
{
|
|
|
|
subcmd=$1
|
|
|
|
shift
|
|
|
|
|
|
|
|
case ${subcmd} in
|
|
|
|
balancer)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
|
|
|
|
;;
|
|
|
|
cacheadmin)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
|
|
|
|
;;
|
|
|
|
classpath)
|
|
|
|
hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
|
|
|
|
;;
|
|
|
|
crypto)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin
|
|
|
|
;;
|
|
|
|
datanode)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
2017-03-29 16:56:25 +00:00
|
|
|
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
|
|
|
|
hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
|
|
|
|
hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
|
2016-03-28 16:00:07 +00:00
|
|
|
;;
|
|
|
|
debug)
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin'
|
|
|
|
;;
|
|
|
|
dfs)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
|
|
|
|
;;
|
|
|
|
dfsadmin)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
|
|
|
|
;;
|
2017-10-07 00:31:53 +00:00
|
|
|
dfsrouter)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
|
|
|
|
;;
|
|
|
|
dfsrouteradmin)
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
|
|
|
|
;;
|
2016-05-09 17:17:56 +00:00
|
|
|
diskbalancer)
|
2016-09-09 02:26:56 +00:00
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
|
2016-05-09 17:17:56 +00:00
|
|
|
;;
|
2016-03-28 16:00:07 +00:00
|
|
|
envvars)
|
|
|
|
echo "JAVA_HOME='${JAVA_HOME}'"
|
|
|
|
echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
|
|
|
|
echo "HDFS_DIR='${HDFS_DIR}'"
|
|
|
|
echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
|
|
|
|
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
|
|
|
|
echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
|
|
|
|
echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
|
|
|
|
echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
|
|
|
|
exit 0
|
|
|
|
;;
|
2017-02-21 21:55:27 +00:00
|
|
|
ec)
|
2017-02-24 00:00:11 +00:00
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin
|
2016-03-28 16:00:07 +00:00
|
|
|
;;
|
|
|
|
fetchdt)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
|
|
|
|
;;
|
|
|
|
fsck)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
|
|
|
|
;;
|
|
|
|
getconf)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf
|
|
|
|
;;
|
|
|
|
groups)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetGroups
|
|
|
|
;;
|
|
|
|
haadmin)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
|
|
|
|
;;
|
|
|
|
journalnode)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
|
|
|
|
;;
|
|
|
|
jmxget)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
|
|
|
|
;;
|
|
|
|
lsSnapshottableDir)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
|
|
|
;;
|
|
|
|
mover)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
|
|
|
|
;;
|
|
|
|
namenode)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
|
|
|
|
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
|
|
|
;;
|
|
|
|
nfs3)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
2017-03-29 16:56:25 +00:00
|
|
|
HADOOP_SECURE_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
|
|
|
|
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_LOG_DIR HADOOP_SECURE_LOG_DIR
|
|
|
|
hadoop_deprecate_envvar HADOOP_SECURE_NFS3_PID_DIR HADOOP_SECURE_PID_DIR
|
2016-03-28 16:00:07 +00:00
|
|
|
;;
|
|
|
|
oev)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
|
|
|
|
;;
|
|
|
|
oiv)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
|
|
|
|
;;
|
|
|
|
oiv_legacy)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
|
|
|
|
;;
|
|
|
|
portmap)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
|
|
|
|
;;
|
|
|
|
secondarynamenode)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
|
|
|
|
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
|
|
|
;;
|
|
|
|
snapshotDiff)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
|
|
|
|
;;
|
|
|
|
storagepolicies)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
|
|
|
|
;;
|
|
|
|
version)
|
|
|
|
HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
|
|
|
|
;;
|
|
|
|
zkfc)
|
|
|
|
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
|
|
|
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
HADOOP_CLASSNAME="${subcmd}"
|
|
|
|
if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
|
|
|
|
hadoop_exit_with_usage 1
|
|
|
|
fi
|
|
|
|
;;
|
|
|
|
esac
|
2011-06-12 22:00:51 +00:00
|
|
|
}
|
|
|
|
|
2014-08-19 12:11:17 +00:00
|
|
|
# let's locate libexec...
|
2016-03-24 15:47:00 +00:00
|
|
|
if [[ -n "${HADOOP_HOME}" ]]; then
|
|
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
2014-08-19 12:11:17 +00:00
|
|
|
else
|
2015-07-16 23:58:11 +00:00
|
|
|
bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
|
2015-11-04 10:26:17 +00:00
|
|
|
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
|
2014-08-19 12:11:17 +00:00
|
|
|
fi
|
|
|
|
|
2015-11-04 10:26:17 +00:00
|
|
|
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
|
2014-08-19 12:11:17 +00:00
|
|
|
HADOOP_NEW_CONFIG=true
|
|
|
|
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
|
2017-03-29 16:56:25 +00:00
|
|
|
# shellcheck source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
|
2014-08-19 12:11:17 +00:00
|
|
|
. "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
|
|
|
|
else
|
|
|
|
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
|
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
|
2016-08-05 01:08:23 +00:00
|
|
|
# now that we have support code, let's abs MYNAME so we can use it later
|
|
|
|
MYNAME=$(hadoop_abs "${MYNAME}")
|
|
|
|
|
2014-08-19 12:11:17 +00:00
|
|
|
if [[ $# = 0 ]]; then
|
|
|
|
hadoop_exit_with_usage 1
|
2011-06-12 22:00:51 +00:00
|
|
|
fi
|
|
|
|
|
2016-03-28 16:00:07 +00:00
|
|
|
HADOOP_SUBCMD=$1
|
2011-06-12 22:00:51 +00:00
|
|
|
shift
|
|
|
|
|
2016-08-05 01:08:23 +00:00
|
|
|
if hadoop_need_reexec hdfs "${HADOOP_SUBCMD}"; then
|
|
|
|
hadoop_uservar_su hdfs "${HADOOP_SUBCMD}" \
|
|
|
|
"${MYNAME}" \
|
|
|
|
"--reexec" \
|
|
|
|
"${HADOOP_USER_PARAMS[@]}"
|
|
|
|
exit $?
|
|
|
|
fi
|
|
|
|
|
2017-03-29 16:56:25 +00:00
|
|
|
hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
2016-09-12 18:10:00 +00:00
|
|
|
|
2016-03-28 16:00:07 +00:00
|
|
|
HADOOP_SUBCMD_ARGS=("$@")
|
2015-07-16 23:58:11 +00:00
|
|
|
|
2016-03-28 16:00:07 +00:00
|
|
|
if declare -f hdfs_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
|
|
|
|
hadoop_debug "Calling dynamically: hdfs_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}"
|
|
|
|
"hdfs_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
|
|
|
|
else
|
|
|
|
hdfscmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
|
|
|
|
fi
|
2014-04-25 00:16:47 +00:00
|
|
|
|
2016-09-12 18:10:00 +00:00
|
|
|
hadoop_add_client_opts
|
2014-12-10 21:41:28 +00:00
|
|
|
|
2016-06-28 12:53:03 +00:00
|
|
|
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
|
|
|
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" "${HADOOP_USER_PARAMS[@]}"
|
2015-02-13 02:01:28 +00:00
|
|
|
exit $?
|
|
|
|
fi
|
|
|
|
|
2016-09-12 18:10:00 +00:00
|
|
|
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
|
|
|
|
2017-03-29 16:56:25 +00:00
|
|
|
# everything is in globals at this point, so call the generic handler
|
|
|
|
hadoop_generic_java_subcmd_handler
|