HADOOP-13595. Rework hadoop_usage to be broken up by clients/daemons/etc. Contributed by Allen Wittenauer.
This commit is contained in:
parent
8ce8672b6b
commit
1a1bf6b7d0
@ -30,20 +30,20 @@ function hadoop_usage
|
|||||||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||||
hadoop_add_option "workers" "turn on worker mode"
|
hadoop_add_option "workers" "turn on worker mode"
|
||||||
|
|
||||||
hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
|
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
|
||||||
hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
|
hadoop_add_subcommand "classpath" client "prints the class path needed to get the Hadoop jar and the required libraries"
|
||||||
hadoop_add_subcommand "conftest" "validate configuration XML files"
|
hadoop_add_subcommand "conftest" client "validate configuration XML files"
|
||||||
hadoop_add_subcommand "credential" "interact with credential providers"
|
hadoop_add_subcommand "credential" client "interact with credential providers"
|
||||||
hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
|
hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
|
||||||
hadoop_add_subcommand "dtutil" "operations related to delegation tokens"
|
hadoop_add_subcommand "dtutil" client "operations related to delegation tokens"
|
||||||
hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||||
hadoop_add_subcommand "fs" "run a generic filesystem user client"
|
hadoop_add_subcommand "fs" client "run a generic filesystem user client"
|
||||||
hadoop_add_subcommand "jar <jar>" "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
|
hadoop_add_subcommand "jar <jar>" client "run a jar file. NOTE: please use \"yarn jar\" to launch YARN applications, not this command."
|
||||||
hadoop_add_subcommand "jnipath" "prints the java.library.path"
|
hadoop_add_subcommand "jnipath" client "prints the java.library.path"
|
||||||
hadoop_add_subcommand "kerbname" "show auth_to_local principal conversion"
|
hadoop_add_subcommand "kerbname" client "show auth_to_local principal conversion"
|
||||||
hadoop_add_subcommand "key" "manage keys via the KeyProvider"
|
hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
|
||||||
hadoop_add_subcommand "trace" "view and modify Hadoop tracing settings"
|
hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
|
||||||
hadoop_add_subcommand "version" "print the version"
|
hadoop_add_subcommand "version" client "print the version"
|
||||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
# be done outside of a function
|
# be done outside of a function
|
||||||
declare -a HADOOP_SUBCMD_USAGE
|
declare -a HADOOP_SUBCMD_USAGE
|
||||||
declare -a HADOOP_OPTION_USAGE
|
declare -a HADOOP_OPTION_USAGE
|
||||||
|
declare -a HADOOP_SUBCMD_USAGE_TYPES
|
||||||
|
|
||||||
## @description Print a message to stderr
|
## @description Print a message to stderr
|
||||||
## @audience public
|
## @audience public
|
||||||
@ -115,6 +116,89 @@ function hadoop_verify_entry
|
|||||||
[[ ${!1} =~ \ ${2}\ ]]
|
[[ ${!1} =~ \ ${2}\ ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## @description Check if an array has a given value
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable yes
|
||||||
|
## @param element
|
||||||
|
## @param array
|
||||||
|
## @returns 0 = yes
|
||||||
|
## @returns 1 = no
|
||||||
|
function hadoop_array_contains
|
||||||
|
{
|
||||||
|
declare element=$1
|
||||||
|
shift
|
||||||
|
declare val
|
||||||
|
|
||||||
|
if [[ "$#" -eq 0 ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
for val in "${@}"; do
|
||||||
|
if [[ "${val}" == "${element}" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Add the `appendstring` if `checkstring` is not
|
||||||
|
## @description present in the given array
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable yes
|
||||||
|
## @param envvar
|
||||||
|
## @param appendstring
|
||||||
|
function hadoop_add_array_param
|
||||||
|
{
|
||||||
|
declare arrname=$1
|
||||||
|
declare add=$2
|
||||||
|
|
||||||
|
declare arrref="${arrname}[@]"
|
||||||
|
declare array=("${!arrref}")
|
||||||
|
|
||||||
|
if ! hadoop_array_contains "${add}" "${array[@]}"; then
|
||||||
|
#shellcheck disable=SC1083,SC2086
|
||||||
|
eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \)
|
||||||
|
hadoop_debug "$1 accepted $2"
|
||||||
|
else
|
||||||
|
hadoop_debug "$1 declined $2"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Sort an array (must not contain regexps)
|
||||||
|
## @description present in the given array
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable yes
|
||||||
|
## @param arrayvar
|
||||||
|
function hadoop_sort_array
|
||||||
|
{
|
||||||
|
declare arrname=$1
|
||||||
|
declare arrref="${arrname}[@]"
|
||||||
|
declare array=("${!arrref}")
|
||||||
|
declare oifs
|
||||||
|
|
||||||
|
declare globstatus
|
||||||
|
declare -a sa
|
||||||
|
|
||||||
|
globstatus=$(set -o | grep noglob | awk '{print $NF}')
|
||||||
|
|
||||||
|
set -f
|
||||||
|
oifs=${IFS}
|
||||||
|
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
IFS=$'\n' sa=($(sort <<<"${array[*]}"))
|
||||||
|
|
||||||
|
# shellcheck disable=SC1083
|
||||||
|
eval "${arrname}"=\(\"\${sa[@]}\"\)
|
||||||
|
|
||||||
|
IFS=${oifs}
|
||||||
|
if [[ "${globstatus}" = off ]]; then
|
||||||
|
set +f
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
## @description Check if we are running with priv
|
## @description Check if we are running with priv
|
||||||
## @description by default, this implementation looks for
|
## @description by default, this implementation looks for
|
||||||
## @description EUID=0. For OSes that have true priv
|
## @description EUID=0. For OSes that have true priv
|
||||||
@ -220,13 +304,20 @@ function hadoop_uservar_su
|
|||||||
## @stability evolving
|
## @stability evolving
|
||||||
## @replaceable no
|
## @replaceable no
|
||||||
## @param subcommand
|
## @param subcommand
|
||||||
|
## @param subcommandtype
|
||||||
## @param subcommanddesc
|
## @param subcommanddesc
|
||||||
function hadoop_add_subcommand
|
function hadoop_add_subcommand
|
||||||
{
|
{
|
||||||
local subcmd=$1
|
declare subcmd=$1
|
||||||
local text=$2
|
declare subtype=$2
|
||||||
|
declare text=$3
|
||||||
|
|
||||||
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${text}"
|
hadoop_debug "${subcmd} as a ${subtype}"
|
||||||
|
|
||||||
|
hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}"
|
||||||
|
|
||||||
|
# done in this order so that sort works later
|
||||||
|
HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}"
|
||||||
((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
|
((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,17 +344,22 @@ function hadoop_reset_usage
|
|||||||
{
|
{
|
||||||
HADOOP_SUBCMD_USAGE=()
|
HADOOP_SUBCMD_USAGE=()
|
||||||
HADOOP_OPTION_USAGE=()
|
HADOOP_OPTION_USAGE=()
|
||||||
|
HADOOP_SUBCMD_USAGE_TYPES=()
|
||||||
HADOOP_SUBCMD_USAGE_COUNTER=0
|
HADOOP_SUBCMD_USAGE_COUNTER=0
|
||||||
HADOOP_OPTION_USAGE_COUNTER=0
|
HADOOP_OPTION_USAGE_COUNTER=0
|
||||||
}
|
}
|
||||||
|
|
||||||
## @description Print a screen-size aware two-column output
|
## @description Print a screen-size aware two-column output
|
||||||
|
## @description if reqtype is not null, only print those requested
|
||||||
## @audience private
|
## @audience private
|
||||||
## @stability evolving
|
## @stability evolving
|
||||||
## @replaceable no
|
## @replaceable no
|
||||||
|
## @param reqtype
|
||||||
## @param array
|
## @param array
|
||||||
function hadoop_generic_columnprinter
|
function hadoop_generic_columnprinter
|
||||||
{
|
{
|
||||||
|
declare reqtype=$1
|
||||||
|
shift
|
||||||
declare -a input=("$@")
|
declare -a input=("$@")
|
||||||
declare -i i=0
|
declare -i i=0
|
||||||
declare -i counter=0
|
declare -i counter=0
|
||||||
@ -275,11 +371,13 @@ function hadoop_generic_columnprinter
|
|||||||
declare -i foldsize
|
declare -i foldsize
|
||||||
declare -a tmpa
|
declare -a tmpa
|
||||||
declare numcols
|
declare numcols
|
||||||
|
declare brup
|
||||||
|
|
||||||
if [[ -n "${COLUMNS}" ]]; then
|
if [[ -n "${COLUMNS}" ]]; then
|
||||||
numcols=${COLUMNS}
|
numcols=${COLUMNS}
|
||||||
else
|
else
|
||||||
numcols=$(tput cols) 2>/dev/null
|
numcols=$(tput cols) 2>/dev/null
|
||||||
|
COLUMNS=${numcols}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "${numcols}"
|
if [[ -z "${numcols}"
|
||||||
@ -292,7 +390,8 @@ function hadoop_generic_columnprinter
|
|||||||
while read -r line; do
|
while read -r line; do
|
||||||
tmpa[${counter}]=${line}
|
tmpa[${counter}]=${line}
|
||||||
((counter=counter+1))
|
((counter=counter+1))
|
||||||
option=$(echo "${line}" | cut -f1 -d'@')
|
IFS='@' read -ra brup <<< "${line}"
|
||||||
|
option="${brup[0]}"
|
||||||
if [[ ${#option} -gt ${maxoptsize} ]]; then
|
if [[ ${#option} -gt ${maxoptsize} ]]; then
|
||||||
maxoptsize=${#option}
|
maxoptsize=${#option}
|
||||||
fi
|
fi
|
||||||
@ -304,8 +403,22 @@ function hadoop_generic_columnprinter
|
|||||||
((foldsize=numcols-maxoptsize))
|
((foldsize=numcols-maxoptsize))
|
||||||
|
|
||||||
until [[ $i -eq ${#tmpa[@]} ]]; do
|
until [[ $i -eq ${#tmpa[@]} ]]; do
|
||||||
option=$(echo "${tmpa[$i]}" | cut -f1 -d'@')
|
IFS='@' read -ra brup <<< "${tmpa[$i]}"
|
||||||
giventext=$(echo "${tmpa[$i]}" | cut -f2 -d'@')
|
|
||||||
|
option="${brup[0]}"
|
||||||
|
cmdtype="${brup[1]}"
|
||||||
|
giventext="${brup[2]}"
|
||||||
|
|
||||||
|
if [[ -n "${reqtype}" ]]; then
|
||||||
|
if [[ "${cmdtype}" != "${reqtype}" ]]; then
|
||||||
|
((i=i+1))
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${giventext}" ]]; then
|
||||||
|
giventext=${cmdtype}
|
||||||
|
fi
|
||||||
|
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
|
printf "%-${maxoptsize}s %-s\n" "${option}" "${line}"
|
||||||
@ -325,13 +438,14 @@ function hadoop_generic_columnprinter
|
|||||||
## @param [text to use in place of SUBCOMMAND]
|
## @param [text to use in place of SUBCOMMAND]
|
||||||
function hadoop_generate_usage
|
function hadoop_generate_usage
|
||||||
{
|
{
|
||||||
local cmd=$1
|
declare cmd=$1
|
||||||
local takesclass=$2
|
declare takesclass=$2
|
||||||
local subcmdtext=${3:-"SUBCOMMAND"}
|
declare subcmdtext=${3:-"SUBCOMMAND"}
|
||||||
local haveoptions
|
declare haveoptions
|
||||||
local optstring
|
declare optstring
|
||||||
local havesubs
|
declare havesubs
|
||||||
local subcmdstring
|
declare subcmdstring
|
||||||
|
declare cmdtype
|
||||||
|
|
||||||
cmd=${cmd##*/}
|
cmd=${cmd##*/}
|
||||||
|
|
||||||
@ -358,7 +472,7 @@ function hadoop_generate_usage
|
|||||||
echo " OPTIONS is none or any of:"
|
echo " OPTIONS is none or any of:"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
hadoop_generic_columnprinter "${HADOOP_OPTION_USAGE[@]}"
|
hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${havesubs}" = true ]]; then
|
if [[ "${havesubs}" = true ]]; then
|
||||||
@ -366,7 +480,18 @@ function hadoop_generate_usage
|
|||||||
echo " ${subcmdtext} is one of:"
|
echo " ${subcmdtext} is one of:"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
hadoop_generic_columnprinter "${HADOOP_SUBCMD_USAGE[@]}"
|
if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then
|
||||||
|
|
||||||
|
hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES
|
||||||
|
for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do
|
||||||
|
#shellcheck disable=SC2086
|
||||||
|
cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}"
|
||||||
|
printf "\n %s Commands:\n\n" "${cmdtype}"
|
||||||
|
hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}"
|
||||||
|
done
|
||||||
|
else
|
||||||
|
hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}"
|
||||||
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
|
echo "${subcmdtext} may print help when invoked w/o parameters or with -h."
|
||||||
fi
|
fi
|
||||||
|
@ -180,11 +180,11 @@ It is also possible to add the new subcommands to the usage output. The `hadoop_
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = "yarn" ]]; then
|
||||||
hadoop_add_subcommand "hello" "Print some text to the screen"
|
hadoop_add_subcommand "hello" client "Print some text to the screen"
|
||||||
fi
|
fi
|
||||||
```
|
```
|
||||||
|
|
||||||
This functionality may also be use to override the built-ins. For example, defining:
|
We set the subcommand type to be "client" as there are no special restrictions, extra capabilities, etc. This functionality may also be use to override the built-ins. For example, defining:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
function hdfs_subcommand_fetchdt
|
function hdfs_subcommand_fetchdt
|
||||||
|
@ -0,0 +1,37 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_add_array_param (empty)" {
|
||||||
|
hadoop_add_array_param ARRAY value
|
||||||
|
[ "${ARRAY[0]}" = value ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_add_array_param (exist)" {
|
||||||
|
ARRAY=("val2")
|
||||||
|
hadoop_add_array_param ARRAY val1
|
||||||
|
[ "${ARRAY[0]}" = val2 ]
|
||||||
|
[ "${ARRAY[1]}" = val1 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_add_array_param (double exist)" {
|
||||||
|
ARRAY=("val2" "val1")
|
||||||
|
hadoop_add_array_param ARRAY val3
|
||||||
|
[ "${ARRAY[0]}" = val2 ]
|
||||||
|
[ "${ARRAY[1]}" = val1 ]
|
||||||
|
[ "${ARRAY[2]}" = val3 ]
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,47 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_array_contains (empty)" {
|
||||||
|
run hadoop_array_contains value "${ARRAY[@]}"
|
||||||
|
[ "${status}" = 1 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_array_contains (exist)" {
|
||||||
|
ARRAY=("value")
|
||||||
|
run hadoop_array_contains value "${ARRAY[@]}"
|
||||||
|
[ "${status}" = 0 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_array_contains (notexist)" {
|
||||||
|
ARRAY=("different")
|
||||||
|
run hadoop_array_contains value "${ARRAY[@]}"
|
||||||
|
[ "${status}" = 1 ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_array_contains (exist, multi)" {
|
||||||
|
ARRAY=("val1" "val2" "val3")
|
||||||
|
for j in val1 val2 val3; do
|
||||||
|
run hadoop_array_contains "${j}" "${ARRAY[@]}"
|
||||||
|
[ "${status}" = 0 ]
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_array_contains (multi, not exist)" {
|
||||||
|
ARRAY=("val1" "val2" "val3")
|
||||||
|
run hadoop_array_contains value "${ARRAY[@]}"
|
||||||
|
[ "${status}" = 1 ]
|
||||||
|
}
|
@ -0,0 +1,37 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_sort_array (empty)" {
|
||||||
|
hadoop_sort_array ARRAY
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_sort_array (single value)" {
|
||||||
|
ARRAY=("value")
|
||||||
|
hadoop_sort_array ARRAY
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_sort_array (multiple value)" {
|
||||||
|
ARRAY=("b" "c" "a")
|
||||||
|
preifsod=$(echo "${IFS}" | od -c)
|
||||||
|
hadoop_sort_array ARRAY
|
||||||
|
postifsod=$(echo "${IFS}" | od -c)
|
||||||
|
|
||||||
|
[ "${ARRAY[0]}" = "a" ]
|
||||||
|
[ "${ARRAY[1]}" = "b" ]
|
||||||
|
[ "${ARRAY[2]}" = "c" ]
|
||||||
|
[ "${preifsod}" = "${postifsod}" ]
|
||||||
|
}
|
@ -16,7 +16,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "kms" "run KMS, the Key Management Server"
|
hadoop_add_subcommand "kms" daemon "run KMS, the Key Management Server"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description Command handler for kms subcommand
|
## @description Command handler for kms subcommand
|
||||||
@ -54,4 +54,4 @@ function hadoop_subcommand_kms
|
|||||||
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
|
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
|
||||||
hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}"
|
hadoop_mkdir "${KMS_TEMP:-${HADOOP_HOME}/temp}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hdfs ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hdfs ]]; then
|
||||||
hadoop_add_subcommand "httpfs" "run HttpFS server, the HDFS HTTP Gateway"
|
hadoop_add_subcommand "httpfs" daemon "run HttpFS server, the HDFS HTTP Gateway"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description Command handler for httpfs subcommand
|
## @description Command handler for httpfs subcommand
|
||||||
|
@ -31,37 +31,37 @@ function hadoop_usage
|
|||||||
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
||||||
hadoop_add_option "--workers" "turn on worker mode"
|
hadoop_add_option "--workers" "turn on worker mode"
|
||||||
|
|
||||||
hadoop_add_subcommand "balancer" "run a cluster balancing utility"
|
hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
|
||||||
hadoop_add_subcommand "cacheadmin" "configure the HDFS cache"
|
hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
|
||||||
hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries"
|
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
||||||
hadoop_add_subcommand "crypto" "configure HDFS encryption zones"
|
hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
|
||||||
hadoop_add_subcommand "datanode" "run a DFS datanode"
|
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
|
||||||
hadoop_add_subcommand "debug" "run a Debug Admin to execute HDFS debug commands"
|
hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands"
|
||||||
hadoop_add_subcommand "dfs" "run a filesystem command on the file system"
|
hadoop_add_subcommand "dfs" client "run a filesystem command on the file system"
|
||||||
hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
|
hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
|
||||||
hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on a given node"
|
hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
|
||||||
hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||||
hadoop_add_subcommand "ec" "run a HDFS ErasureCoding CLI"
|
hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
|
||||||
hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
|
hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
|
||||||
hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility"
|
hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
|
||||||
hadoop_add_subcommand "getconf" "get config values from configuration"
|
hadoop_add_subcommand "getconf" client "get config values from configuration"
|
||||||
hadoop_add_subcommand "groups" "get the groups which users belong to"
|
hadoop_add_subcommand "groups" client "get the groups which users belong to"
|
||||||
hadoop_add_subcommand "haadmin" "run a DFS HA admin client"
|
hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
|
||||||
hadoop_add_subcommand "jmxget" "get JMX exported values from NameNode or DataNode."
|
hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
|
||||||
hadoop_add_subcommand "journalnode" "run the DFS journalnode"
|
hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
|
||||||
hadoop_add_subcommand "lsSnapshottableDir" "list all snapshottable dirs owned by the current user"
|
hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable dirs owned by the current user"
|
||||||
hadoop_add_subcommand "mover" "run a utility to move block replicas across storage types"
|
hadoop_add_subcommand "mover" daemon "run a utility to move block replicas across storage types"
|
||||||
hadoop_add_subcommand "namenode" "run the DFS namenode"
|
hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
|
||||||
hadoop_add_subcommand "nfs3" "run an NFS version 3 gateway"
|
hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway"
|
||||||
hadoop_add_subcommand "oev" "apply the offline edits viewer to an edits file"
|
hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an edits file"
|
||||||
hadoop_add_subcommand "oiv" "apply the offline fsimage viewer to an fsimage"
|
hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
|
||||||
hadoop_add_subcommand "oiv_legacy" "apply the offline fsimage viewer to a legacy fsimage"
|
hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
|
||||||
hadoop_add_subcommand "portmap" "run a portmap service"
|
hadoop_add_subcommand "portmap" daemon "run a portmap service"
|
||||||
hadoop_add_subcommand "secondarynamenode" "run the DFS secondary namenode"
|
hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
|
||||||
hadoop_add_subcommand "snapshotDiff" "diff two snapshots of a directory or diff the current directory contents with a snapshot"
|
hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
|
||||||
hadoop_add_subcommand "storagepolicies" "list/get/set block storage policies"
|
hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
|
||||||
hadoop_add_subcommand "version" "print the version"
|
hadoop_add_subcommand "version" client "print the version"
|
||||||
hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon"
|
hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
|
||||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,15 +24,15 @@ HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
|
|||||||
## @replaceable no
|
## @replaceable no
|
||||||
function hadoop_usage
|
function hadoop_usage
|
||||||
{
|
{
|
||||||
hadoop_add_subcommand "classpath" "prints the class path needed for running mapreduce subcommands"
|
hadoop_add_subcommand "classpath" client "prints the class path needed for running mapreduce subcommands"
|
||||||
hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||||
hadoop_add_subcommand "historyserver" "run job history servers as a standalone daemon"
|
hadoop_add_subcommand "historyserver" daemon "run job history servers as a standalone daemon"
|
||||||
hadoop_add_subcommand "hsadmin" "job history server admin interface"
|
hadoop_add_subcommand "hsadmin" admin "job history server admin interface"
|
||||||
hadoop_add_subcommand "job" "manipulate MapReduce jobs"
|
hadoop_add_subcommand "job" client "manipulate MapReduce jobs"
|
||||||
hadoop_add_subcommand "pipes" "run a Pipes job"
|
hadoop_add_subcommand "pipes" client "run a Pipes job"
|
||||||
hadoop_add_subcommand "queue" "get information regarding JobQueues"
|
hadoop_add_subcommand "queue" client "get information regarding JobQueues"
|
||||||
hadoop_add_subcommand "sampler" "sampler"
|
hadoop_add_subcommand "sampler" client "sampler"
|
||||||
hadoop_add_subcommand "version" "print the version"
|
hadoop_add_subcommand "version" client "print the version"
|
||||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f mapred_subcommand_archive-logs >/dev/null 2>/dev/null; then
|
if ! declare -f mapred_subcommand_archive-logs >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
||||||
hadoop_add_subcommand "archive-logs" "combine aggregated logs into hadoop archives"
|
hadoop_add_subcommand "archive-logs" client "combine aggregated logs into hadoop archives"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f hadoop_subcommand_archive >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_archive >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "archive" "create a Hadoop archive"
|
hadoop_add_subcommand "archive" client "create a Hadoop archive"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
@ -39,7 +39,7 @@ fi
|
|||||||
if ! declare -f mapred_subcommand_archive >/dev/null 2>/dev/null; then
|
if ! declare -f mapred_subcommand_archive >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
||||||
hadoop_add_subcommand "archive" "create a Hadoop archive"
|
hadoop_add_subcommand "archive" client "create a Hadoop archive"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f hadoop_subcommand_distcp >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_distcp >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "distcp" "copy file or directories recursively"
|
hadoop_add_subcommand "distcp" client "copy file or directories recursively"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
@ -39,7 +39,7 @@ fi
|
|||||||
if ! declare -f mapred_subcommand_distcp >/dev/null 2>/dev/null; then
|
if ! declare -f mapred_subcommand_distcp >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
||||||
hadoop_add_subcommand "distcp" "copy file or directories recursively"
|
hadoop_add_subcommand "distcp" client "copy file or directories recursively"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f hadoop_subcommand_distch >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_distch >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "distch" "distributed metadata changer"
|
hadoop_add_subcommand "distch" client "distributed metadata changer"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this can't be indented otherwise shelldocs won't get it
|
# this can't be indented otherwise shelldocs won't get it
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f hadoop_subcommand_gridmix >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_gridmix >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "gridmix" "submit a mix of synthetic job, modeling a profiled from production load"
|
hadoop_add_subcommand "gridmix" client "submit a mix of synthetic job, modeling a profiled from production load"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description gridmix command for hadoop
|
## @description gridmix command for hadoop
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f hadoop_subcommand_rumenfolder >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_rumenfolder >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "rumenfolder" "scale a rumen input trace"
|
hadoop_add_subcommand "rumenfolder" client "scale a rumen input trace"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description rumenfolder command for hadoop
|
## @description rumenfolder command for hadoop
|
||||||
@ -37,7 +37,7 @@ fi
|
|||||||
if ! declare -f hadoop_subcommand_rumentrace >/dev/null 2>/dev/null; then
|
if ! declare -f hadoop_subcommand_rumentrace >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = hadoop ]]; then
|
||||||
hadoop_add_subcommand "rumentrace" "convert logs into a rumen trace"
|
hadoop_add_subcommand "rumentrace" client "convert logs into a rumen trace"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description rumentrace command for hadoop
|
## @description rumentrace command for hadoop
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
if ! declare -f mapred_subcommand_streaming >/dev/null 2>/dev/null; then
|
if ! declare -f mapred_subcommand_streaming >/dev/null 2>/dev/null; then
|
||||||
|
|
||||||
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
if [[ "${HADOOP_SHELL_EXECNAME}" = mapred ]]; then
|
||||||
hadoop_add_subcommand "streaming" "launch a mapreduce streaming job"
|
hadoop_add_subcommand "streaming" client "launch a mapreduce streaming job"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## @description streaming command for mapred
|
## @description streaming command for mapred
|
||||||
|
@ -31,28 +31,28 @@ function hadoop_usage
|
|||||||
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
|
||||||
hadoop_add_option "--workers" "turn on worker mode"
|
hadoop_add_option "--workers" "turn on worker mode"
|
||||||
|
|
||||||
hadoop_add_subcommand "application" "prints application(s) report/kill application"
|
hadoop_add_subcommand "application" client "prints application(s) report/kill application"
|
||||||
hadoop_add_subcommand "applicationattempt" "prints applicationattempt(s) report"
|
hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report"
|
||||||
hadoop_add_subcommand "classpath" "prints the class path needed to get the hadoop jar and the required libraries"
|
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
|
||||||
hadoop_add_subcommand "cluster" "prints cluster information"
|
hadoop_add_subcommand "cluster" client "prints cluster information"
|
||||||
hadoop_add_subcommand "container" "prints container(s) report"
|
hadoop_add_subcommand "container" client "prints container(s) report"
|
||||||
hadoop_add_subcommand "daemonlog" "get/set the log level for each daemon"
|
hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
|
||||||
hadoop_add_subcommand "envvars" "display computed Hadoop environment variables"
|
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||||
hadoop_add_subcommand "jar <jar>" "run a jar file"
|
hadoop_add_subcommand "jar <jar>" client "run a jar file"
|
||||||
hadoop_add_subcommand "logs" "dump container logs"
|
hadoop_add_subcommand "logs" client "dump container logs"
|
||||||
hadoop_add_subcommand "node" "prints node report(s)"
|
hadoop_add_subcommand "node" admin "prints node report(s)"
|
||||||
hadoop_add_subcommand "nodemanager" "run a nodemanager on each worker"
|
hadoop_add_subcommand "nodemanager" daemon "run a nodemanager on each worker"
|
||||||
hadoop_add_subcommand "proxyserver" "run the web app proxy server"
|
hadoop_add_subcommand "proxyserver" daemon "run the web app proxy server"
|
||||||
hadoop_add_subcommand "queue" "prints queue information"
|
hadoop_add_subcommand "queue" client "prints queue information"
|
||||||
hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
|
hadoop_add_subcommand "resourcemanager" daemon "run the ResourceManager"
|
||||||
hadoop_add_subcommand "rmadmin" "admin tools"
|
hadoop_add_subcommand "rmadmin" admin "admin tools"
|
||||||
hadoop_add_subcommand "router" "run the Router daemon"
|
hadoop_add_subcommand "router" daemon "run the Router daemon"
|
||||||
hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
|
hadoop_add_subcommand "scmadmin" admin "SharedCacheManager admin tools"
|
||||||
hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager daemon"
|
hadoop_add_subcommand "sharedcachemanager" admin "run the SharedCacheManager daemon"
|
||||||
hadoop_add_subcommand "timelinereader" "run the timeline reader server"
|
hadoop_add_subcommand "timelinereader" client "run the timeline reader server"
|
||||||
hadoop_add_subcommand "timelineserver" "run the timeline server"
|
hadoop_add_subcommand "timelineserver" daemon "run the timeline server"
|
||||||
hadoop_add_subcommand "top" "view cluster information"
|
hadoop_add_subcommand "top" client "view cluster information"
|
||||||
hadoop_add_subcommand "version" "print the version"
|
hadoop_add_subcommand "version" client "print the version"
|
||||||
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user