HDDS-2058. Remove hadoop dependencies in ozone build

Closes #1376
This commit is contained in:
Nanda kumar 2019-08-30 10:19:19 +02:00 committed by Márton Elek
parent 5b557de721
commit 22a58615a2
No known key found for this signature in database
GPG Key ID: D51EA8F00EE79B28
20 changed files with 4546 additions and 157 deletions

View File

@ -258,9 +258,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<protocVersion>${protobuf.version}</protocVersion>
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
</param>
<param>${basedir}/src/main/proto</param>
</imports>
<source>

View File

@ -0,0 +1,317 @@
@echo off
@rem Licensed to the Apache Software Foundation (ASF) under one or more
@rem contributor license agreements. See the NOTICE file distributed with
@rem this work for additional information regarding copyright ownership.
@rem The ASF licenses this file to You under the Apache License, Version 2.0
@rem (the "License"); you may not use this file except in compliance with
@rem the License. You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem included in all the hadoop scripts with source command
@rem should not be executable directly
@rem also should not be passed any arguments, since we need original %*
if not defined HADOOP_COMMON_DIR (
set HADOOP_COMMON_DIR=share\hadoop\common
)
if not defined HADOOP_COMMON_LIB_JARS_DIR (
set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib
)
if not defined HADOOP_COMMON_LIB_NATIVE_DIR (
set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native
)
if not defined HDFS_DIR (
set HDFS_DIR=share\hadoop\hdfs
)
if not defined HDFS_LIB_JARS_DIR (
set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib
)
if not defined YARN_DIR (
set YARN_DIR=share\hadoop\yarn
)
if not defined YARN_LIB_JARS_DIR (
set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib
)
if not defined MAPRED_DIR (
set MAPRED_DIR=share\hadoop\mapreduce
)
if not defined MAPRED_LIB_JARS_DIR (
set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib
)
@rem the root of the Hadoop installation
set HADOOP_HOME=%~dp0
for %%i in (%HADOOP_HOME%.) do (
set HADOOP_HOME=%%~dpi
)
if "%HADOOP_HOME:~-1%" == "\" (
set HADOOP_HOME=%HADOOP_HOME:~0,-1%
)
if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar (
@echo +================================================================+
@echo ^| Error: HADOOP_HOME is not set correctly ^|
@echo +----------------------------------------------------------------+
@echo ^| Please set your HADOOP_HOME variable to the absolute path of ^|
@echo ^| the directory that contains the hadoop distribution ^|
@echo +================================================================+
exit /b 1
)
if not defined HADOOP_CONF_DIR (
set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop
)
@rem
@rem Allow alternate conf dir location.
@rem
if "%1" == "--config" (
set HADOOP_CONF_DIR=%2
shift
shift
)
@rem
@rem check to see it is specified whether to use the workers or the
@rem masters file
@rem
if "%1" == "--hosts" (
set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
shift
shift
)
@rem
@rem Set log level. Default to INFO.
@rem
if "%1" == "--loglevel" (
set HADOOP_LOGLEVEL=%2
shift
shift
)
if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
call %HADOOP_CONF_DIR%\hadoop-env.cmd
)
@rem
@rem setup java environment variables
@rem
if not defined JAVA_HOME (
echo Error: JAVA_HOME is not set.
goto :eof
)
if not exist %JAVA_HOME%\bin\java.exe (
echo Error: JAVA_HOME is incorrectly set.
echo Please update %HADOOP_CONF_DIR%\hadoop-env.cmd
goto :eof
)
set JAVA=%JAVA_HOME%\bin\java
@rem some Java parameters
set JAVA_HEAP_MAX=-Xmx1000m
@rem
@rem check envvars which might override default args
@rem
if defined HADOOP_HEAPSIZE (
set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m
)
@rem
@rem CLASSPATH initially contains %HADOOP_CONF_DIR%
@rem
set CLASSPATH=%HADOOP_CONF_DIR%
if not defined HADOOP_COMMON_HOME (
if exist %HADOOP_HOME%\share\hadoop\common (
set HADOOP_COMMON_HOME=%HADOOP_HOME%
)
)
@rem
@rem for releases, add core hadoop jar & webapps to CLASSPATH
@rem
if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps (
set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%
)
if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% (
set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\*
)
set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\*
@rem
@rem default log directory % file
@rem
if not defined HADOOP_LOG_DIR (
set HADOOP_LOG_DIR=%HADOOP_HOME%\logs
)
if not defined HADOOP_LOGFILE (
set HADOOP_LOGFILE=hadoop.log
)
if not defined HADOOP_LOGLEVEL (
set HADOOP_LOGLEVEL=INFO
)
if not defined HADOOP_ROOT_LOGGER (
set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
)
@rem
@rem default policy file for service-level authorization
@rem
if not defined HADOOP_POLICYFILE (
set HADOOP_POLICYFILE=hadoop-policy.xml
)
@rem
@rem Determine the JAVA_PLATFORM
@rem
for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A
@rem replace space with underscore
set JAVA_PLATFORM=%JAVA_PLATFORM: =_%
@rem
@rem setup 'java.library.path' for native hadoop code if necessary
@rem
@rem Check if we're running hadoop directly from the build
if exist %HADOOP_COMMON_HOME%\target\bin (
if defined JAVA_LIBRARY_PATH (
set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\target\bin
) else (
set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\target\bin
)
)
@rem For the distro case, check the bin folder
if exist %HADOOP_COMMON_HOME%\bin (
if defined JAVA_LIBRARY_PATH (
set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\bin
) else (
set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin
)
)
@rem
@rem setup a default TOOL_PATH
@rem
set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\*
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR%
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE%
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME%
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING%
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER%
if defined JAVA_LIBRARY_PATH (
set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
)
set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE%
@rem
@rem Disable ipv6 as it can cause issues
@rem
set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
@rem
@rem put hdfs in classpath if present
@rem
if not defined HADOOP_HDFS_HOME (
if exist %HADOOP_HOME%\%HDFS_DIR% (
set HADOOP_HDFS_HOME=%HADOOP_HOME%
)
)
if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps (
set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%
)
if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% (
set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\*
)
set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\*
@rem
@rem put yarn in classpath if present
@rem
if not defined HADOOP_YARN_HOME (
if exist %HADOOP_HOME%\%YARN_DIR% (
set HADOOP_YARN_HOME=%HADOOP_HOME%
)
)
if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps (
set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%
)
if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% (
set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
)
set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\*
@rem
@rem put mapred in classpath if present AND different from YARN
@rem
if not defined HADOOP_MAPRED_HOME (
if exist %HADOOP_HOME%\%MAPRED_DIR% (
set HADOOP_MAPRED_HOME=%HADOOP_HOME%
)
)
if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps (
set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%
)
if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% (
set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
)
set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\*
)
@rem
@rem add user-specified CLASSPATH last
@rem
if defined HADOOP_CLASSPATH (
if not defined HADOOP_USE_CLIENT_CLASSLOADER (
if defined HADOOP_USER_CLASSPATH_FIRST (
set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
) else (
set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
)
)
)
:eof

View File

@ -0,0 +1,165 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####
# IMPORTANT
####
## The hadoop-config.sh tends to get executed by non-Hadoop scripts.
## Those parts expect this script to parse/manipulate $@. In order
## to maintain backward compatibility, this means a surprising
## lack of functions for bits that would be much better off in
## a function.
##
## In other words, yes, there is some bad things happen here and
## unless we break the rest of the ecosystem, we can't change it. :(
# included in all the hadoop scripts with source command
# should not be executable directly
# also should not be passed any arguments, since we need original $*
#
# after doing more config, caller should also exec finalize
# function to finish last minute/default configs for
# settings that might be different between daemons & interactive
# you must be this high to ride the ride
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|| [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
|| [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
echo "bash v3.2+ is required. Sorry."
exit 1
fi
# In order to get partially bootstrapped, we need to figure out where
# we are located. Chances are good that our caller has already done
# this work for us, but just in case...
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
_hadoop_common_this="${BASH_SOURCE-$0}"
HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P)
fi
# get our functions defined for usage later
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
. "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
. "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
else
echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
exit 1
fi
hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME
# allow overrides of the above and pre-defines of the below
if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
[[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
. "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
. "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
fi
#
# IMPORTANT! We are not executing user provided code yet!
#
# Let's go! Base definitions so we can move forward
hadoop_bootstrap
# let's find our conf.
#
# first, check and process params passed to us
# we process this in-line so that we can directly modify $@
# if something downstream is processing that directly,
# we need to make sure our params have been ripped out
# note that we do many of them here for various utilities.
# this provides consistency and forces a more consistent
# user experience
# save these off in case our caller needs them
# shellcheck disable=SC2034
HADOOP_USER_PARAMS=("$@")
hadoop_parse_args "$@"
shift "${HADOOP_PARSE_COUNTER}"
#
# Setup the base-line environment
#
hadoop_find_confdir
hadoop_exec_hadoopenv
hadoop_import_shellprofiles
hadoop_exec_userfuncs
#
# IMPORTANT! User provided code is now available!
#
hadoop_exec_user_hadoopenv
hadoop_verify_confdir
hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
# do all the OS-specific startup bits here
# this allows us to get a decent JAVA_HOME,
# call crle for LD_LIBRARY_PATH, etc.
hadoop_os_tricks
hadoop_java_setup
hadoop_basic_init
# inject any sub-project overrides, defaults, etc.
if declare -F hadoop_subproject_init >/dev/null ; then
hadoop_subproject_init
fi
hadoop_shellprofiles_init
# get the native libs in there pretty quick
hadoop_add_javalibpath "${HADOOP_HOME}/build/native"
hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
hadoop_shellprofiles_nativelib
# get the basic java class path for these subprojects
# in as quickly as possible since other stuff
# will definitely depend upon it.
hadoop_add_common_to_classpath
hadoop_shellprofiles_classpath
# user API commands can now be run since the runtime
# environment has been configured
hadoop_exec_hadooprc
#
# backwards compatibility. new stuff should
# call this when they are ready
#
if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then
hadoop_finalize
fi

View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run a Hadoop command on all slave hosts.
function hadoop_usage
{
echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) <hadoop-command> <args...>"
}
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
# let's locate libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# shellcheck disable=SC2034
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
else
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
exit 1
fi
if [[ $# = 0 ]]; then
hadoop_exit_with_usage 1
fi
daemonmode=$1
shift
if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
hdfsscript="${HADOOP_HOME}/bin/hdfs"
else
hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
fi
hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated."
hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead."
#
# Original input was usually:
# hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options)
# we're going to turn this into
# hdfs --workers --daemon (start|stop) (rest of options)
#
for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
do
if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] ||
[[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] ||
[[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then
unset HADOOP_USER_PARAMS[$i]
fi
done
${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run a shell command on all worker hosts.
#
# Environment Variables
#
# HADOOP_WORKERS File naming remote hosts.
# Default is ${HADOOP_CONF_DIR}/workers.
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
##
function hadoop_usage
{
echo "Usage: workers.sh [--config confdir] command..."
}
# let's locate libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi
HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# shellcheck disable=SC2034
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
. "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
else
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
exit 1
fi
# if no args specified, show usage
if [[ $# -le 0 ]]; then
hadoop_exit_with_usage 1
fi
hadoop_connect_to_hosts "$@"

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
</configuration>

View File

@ -0,0 +1,90 @@
@echo off
@rem Licensed to the Apache Software Foundation (ASF) under one or more
@rem contributor license agreements. See the NOTICE file distributed with
@rem this work for additional information regarding copyright ownership.
@rem The ASF licenses this file to You under the Apache License, Version 2.0
@rem (the "License"); you may not use this file except in compliance with
@rem the License. You may obtain a copy of the License at
@rem
@rem http://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem Set Hadoop-specific environment variables here.
@rem The only required environment variable is JAVA_HOME. All others are
@rem optional. When running a distributed configuration it is best to
@rem set JAVA_HOME in this file, so that it is correctly defined on
@rem remote nodes.
@rem The java implementation to use. Required.
set JAVA_HOME=%JAVA_HOME%
@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
@rem set JSVC_HOME=%JSVC_HOME%
@rem set HADOOP_CONF_DIR=
@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
if exist %HADOOP_HOME%\contrib\capacity-scheduler (
if not defined HADOOP_CLASSPATH (
set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
) else (
set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
)
)
@rem The maximum amount of heap to use, in MB. Default is 1000.
@rem set HADOOP_HEAPSIZE=
@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
@rem Extra Java runtime options. Empty by default.
@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
@rem Command specific options appended to HADOOP_OPTS when specified
if not defined HADOOP_SECURITY_LOGGER (
set HADOOP_SECURITY_LOGGER=INFO,RFAS
)
if not defined HDFS_AUDIT_LOGGER (
set HDFS_AUDIT_LOGGER=INFO,NullAppender
)
set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
@rem On secure datanodes, user to run the datanode as after dropping privileges
set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
@rem Where log files are stored. %HADOOP_HOME%/logs by default.
@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
@rem Where log files are stored in the secure data environment.
set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
@rem
@rem Router-based HDFS Federation specific parameters
@rem Specify the JVM options to be used when starting the RBF Routers.
@rem These options will be appended to the options specified as HADOOP_OPTS
@rem and therefore may override any similar flags set in HADOOP_OPTS
@rem
@rem set HADOOP_DFSROUTER_OPTS=""
@rem
@rem The directory where pid files are stored. /tmp by default.
@rem NOTE: this should be set to a directory that can only be written to by
@rem the user that will run the hadoop daemons. Otherwise there is the
@rem potential for a symlink attack.
set HADOOP_PID_DIR=%HADOOP_PID_DIR%
set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
@rem A string representing this instance of hadoop. %USERNAME% by default.
set HADOOP_IDENT_STRING=%USERNAME%

View File

@ -0,0 +1,439 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
##
## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE,
## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
##
## Precedence rules:
##
## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
##
## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
##
# Many of the options here are built from the perspective that users
# may want to provide OVERWRITING values on the command line.
# For example:
#
# JAVA_HOME=/usr/java/testing hdfs dfs -ls
#
# Therefore, the vast majority (BUT NOT ALL!) of these defaults
# are configured for substitution and not append. If append
# is preferable, modify this file accordingly.
###
# Generic settings for HADOOP
###
# Technically, the only required environment variable is JAVA_HOME.
# All others are optional. However, the defaults are probably not
# preferred. Many sites configure these options outside of Hadoop,
# such as in /etc/profile.d
# The java implementation to use. By default, this environment
# variable is REQUIRED on ALL platforms except OS X!
# export JAVA_HOME=
# Location of Hadoop. By default, Hadoop will attempt to determine
# this location based upon its execution path.
# export HADOOP_HOME=
# Location of Hadoop's configuration information. i.e., where this
# file is living. If this is not defined, Hadoop will attempt to
# locate it based upon its execution path.
#
# NOTE: It is recommend that this variable not be set here but in
# /etc/profile.d or equivalent. Some options (such as
# --config) may react strangely otherwise.
#
# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
# The maximum amount of heap to use (Java -Xmx). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xmx setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
# export HADOOP_HEAPSIZE_MAX=
# The minimum amount of heap to use (Java -Xms). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xms setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
# export HADOOP_HEAPSIZE_MIN=
# Enable extra debugging of Hadoop's JAAS binding, used to set up
# Kerberos security.
# export HADOOP_JAAS_DEBUG=true
# Extra Java runtime options for all Hadoop commands. We don't support
# IPv6 yet/still, so by default the preference is set to IPv4.
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
# For Kerberos debugging, an extended option set logs more information
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
# Some parts of the shell code may do special things dependent upon
# the operating system. We have to set this here. See the next
# section as to why....
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
# Extra Java runtime options for some Hadoop commands
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
# such commands. In most cases, # this should be left empty and
# let users supply it on the command line.
# export HADOOP_CLIENT_OPTS=""
#
# A note about classpaths.
#
# By default, Apache Hadoop overrides Java's CLASSPATH
# environment variable. It is configured such
# that it starts out blank with new entries added after passing
# a series of checks (file/dir exists, not already listed aka
# de-deduplication). During de-deduplication, wildcards and/or
# directories are *NOT* expanded to keep it simple. Therefore,
# if the computed classpath has two specific mentions of
# awesome-methods-1.0.jar, only the first one added will be seen.
# If two directories are in the classpath that both contain
# awesome-methods-1.0.jar, then Java will pick up both versions.
# An additional, custom CLASSPATH. Site-wide configs should be
# handled via the shellprofile functionality, utilizing the
# hadoop_add_classpath function for greater control and much
# harder for apps/end-users to accidentally override.
# Similarly, end users should utilize ${HOME}/.hadooprc .
# This variable should ideally only be used as a short-cut,
# interactive way for temporary additions on the command line.
# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
# export HADOOP_USER_CLASSPATH_FIRST="yes"
# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
# with the main jar are handled by a separate isolated
# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
# is utilized. If it is set, HADOOP_CLASSPATH and
# HADOOP_USER_CLASSPATH_FIRST are ignored.
# export HADOOP_USE_CLIENT_CLASSLOADER=true
# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
# is enabled. Names ending in '.' (period) are treated as package names, and
# names starting with a '-' are treated as negative matches. For example,
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
# Enable optional, bundled Hadoop features
# This is a comma delimited list. It may NOT be overridden via .hadooprc
# Entries may be added/removed as needed.
# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
###
# Options for remote shell connectivity
###
# There are some optional components of hadoop that allow for
# command and control of remote hosts. For example,
# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
# Options to pass to SSH when one of the "log into a host and
# start/stop daemons" scripts is executed
# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
# The built-in ssh handler will limit itself to 10 simultaneous connections.
# For pdsh users, this sets the fanout size ( -f )
# Change this to increase/decrease as necessary.
# export HADOOP_SSH_PARALLEL=10
# Filename which contains all of the hosts for any remote execution
# helper scripts # such as workers.sh, start-dfs.sh, etc.
# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
###
# Options for all daemons
###
#
#
# Many options may also be specified as Java properties. It is
# very common, and in many cases, desirable, to hard-set these
# in daemon _OPTS variables. Where applicable, the appropriate
# Java property is also identified. Note that many are re-used
# or set differently in certain contexts (e.g., secure vs
# non-secure)
#
# Where (primarily) daemon log files are stored.
# ${HADOOP_HOME}/logs by default.
# Java property: hadoop.log.dir
# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
# A string representing this instance of hadoop. $USER by default.
# This is used in writing log and pid files, so keep that in mind!
# Java property: hadoop.id.str
# export HADOOP_IDENT_STRING=$USER
# How many seconds to pause after stopping a daemon
# export HADOOP_STOP_TIMEOUT=5
# Where pid files are stored. /tmp by default.
# export HADOOP_PID_DIR=/tmp
# Default log4j setting for interactive commands
# Java property: hadoop.root.logger
# export HADOOP_ROOT_LOGGER=INFO,console
# Default log4j setting for daemons spawned explicitly by
# --daemon option of hadoop, hdfs, mapred and yarn command.
# Java property: hadoop.root.logger
# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
# Default log level and output location for security-related messages.
# You will almost certainly want to change this on a per-daemon basis via
# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
# defaults for the NN and 2NN override this by default.)
# Java property: hadoop.security.logger
# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
# Default process priority level
# Note that sub-processes will also run at this level!
# export HADOOP_NICENESS=0
# Default name for the service level authorization file
# Java property: hadoop.policy.file
# export HADOOP_POLICYFILE="hadoop-policy.xml"
#
# NOTE: this is not used by default! <-----
# You can define variables right here and then re-use them later on.
# For example, it is common to use the same garbage collection settings
# for all the daemons. So one could define:
#
# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
#
# .. and then use it as per the b option under the namenode.
###
# Secure/privileged execution
###
#
# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
# on privileged ports. This functionality can be replaced by providing
# custom functions. See hadoop-functions.sh for more information.
#
# The jsvc implementation to use. Jsvc is required to run secure datanodes
# that bind to privileged ports to provide authentication of data transfer
# protocol. Jsvc is not required if SASL is configured for authentication of
# data transfer protocol using non-privileged ports.
# export JSVC_HOME=/usr/bin
#
# This directory contains pids for secure and privileged processes.
#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
#
# This directory contains the logs for secure and privileged processes.
# Java property: hadoop.log.dir
# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
#
# When running a secure daemon, the default value of HADOOP_IDENT_STRING
# ends up being a bit bogus. Therefore, by default, the code will
# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants
# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
# export HADOOP_SECURE_IDENT_PRESERVE="true"
###
# NameNode specific parameters
###
# Default log level and output location for file system related change
# messages. For non-namenode daemons, the Java property must be set in
# the appropriate _OPTS if one wants something other than INFO,NullAppender
# Java property: hdfs.audit.logger
# export HDFS_AUDIT_LOGGER=INFO,NullAppender
# Specify the JVM options to be used when starting the NameNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# a) Set JMX options
# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
#
# b) Set garbage collection logs
# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
#
# c) ... or set them directly
# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
# this is the default:
# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
###
# SecondaryNameNode specific parameters
###
# Specify the JVM options to be used when starting the SecondaryNameNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# This is the default:
# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
###
# DataNode specific parameters
###
# Specify the JVM options to be used when starting the DataNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# This is the default:
# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
# On secure datanodes, user to run the datanode as after dropping privileges.
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
# to provide authentication of data transfer protocol. This **MUST NOT** be
# defined if SASL is configured for authentication of data transfer protocol
# using non-privileged ports.
# This will replace the hadoop.id.str Java property in secure mode.
# export HDFS_DATANODE_SECURE_USER=hdfs
# Supplemental options for secure datanodes
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
###
# NFS3 Gateway specific parameters
###
# Specify the JVM options to be used when starting the NFS3 Gateway.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_NFS3_OPTS=""
# Specify the JVM options to be used when starting the Hadoop portmapper.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_PORTMAP_OPTS="-Xmx512m"
# Supplemental options for priviliged gateways
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
# On privileged gateways, user to run the gateway as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export HDFS_NFS3_SECURE_USER=nfsserver
###
# ZKFailoverController specific parameters
###
# Specify the JVM options to be used when starting the ZKFailoverController.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_ZKFC_OPTS=""
###
# QuorumJournalNode specific parameters
###
# Specify the JVM options to be used when starting the QuorumJournalNode.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_JOURNALNODE_OPTS=""
###
# HDFS Balancer specific parameters
###
# Specify the JVM options to be used when starting the HDFS Balancer.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_BALANCER_OPTS=""
###
# HDFS Mover specific parameters
###
# Specify the JVM options to be used when starting the HDFS Mover.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_MOVER_OPTS=""
###
# Router-based HDFS Federation specific parameters
# Specify the JVM options to be used when starting the RBF Routers.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_DFSROUTER_OPTS=""
###
# Ozone Manager specific parameters
###
# Specify the JVM options to be used when starting the Ozone Manager.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_OM_OPTS=""
###
# HDFS StorageContainerManager specific parameters
###
# Specify the JVM options to be used when starting the HDFS Storage Container Manager.
# These options will be appended to the options specified as HADOOP_OPTS
# and therefore may override any similar flags set in HADOOP_OPTS
#
# export HDFS_STORAGECONTAINERMANAGER_OPTS=""
###
# Advanced Users Only!
###
#
# When building Hadoop, one can add the class paths to the commands
# via this special env var:
# export HADOOP_ENABLE_BUILD_PATHS="true"
#
# To prevent accidents, shell commands be (superficially) locked
# to only allow certain users to execute certain subcommands.
# It uses the format of (command)_(subcommand)_USER.
#
# For example, to limit who can execute the namenode command,
# export HDFS_NAMENODE_USER=hdfs
###
# Registry DNS specific parameters
###
# For privileged registry DNS, user to run as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export HADOOP_REGISTRYDNS_SECURE_USER=yarn
# Supplemental options for privileged registry DNS
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server"

View File

@ -0,0 +1,99 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# syntax: [prefix].[source|sink].[instance].[options]
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
# default sampling period, in seconds
*.period=10
# The namenode-metrics.out will contain metrics from all context
#namenode.sink.file.filename=namenode-metrics.out
# Specifying a special sampling period for namenode:
#namenode.sink.*.period=8
#datanode.sink.file.filename=datanode-metrics.out
#resourcemanager.sink.file.filename=resourcemanager-metrics.out
#nodemanager.sink.file.filename=nodemanager-metrics.out
#mrappmaster.sink.file.filename=mrappmaster-metrics.out
#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
# the following example split metrics of different
# context to different sinks (in this case files)
#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
#nodemanager.sink.file_jvm.context=jvm
#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
#nodemanager.sink.file_mapred.context=mapred
#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
#
# Below are for sending metrics to Ganglia
#
# for Ganglia 3.0 support
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
#
# for Ganglia 3.1 support
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
# *.sink.ganglia.period=10
# default for supportsparse is false
# *.sink.ganglia.supportsparse=true
#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
# Tag values to use for the ganglia prefix. If not defined no tags are used.
# If '*' all tags are used. If specifying multiple tags separate them with
# commas. Note that the last segment of the property name is the context name.
#
# A typical use of tags is separating the metrics by the HDFS rpc port
# and HDFS service rpc port.
# For example:
# With following HDFS configuration:
# dfs.namenode.rpc-address is set as namenodeAddress:9110
# dfs.namenode.servicerpc-address is set as namenodeAddress:9111
# If no tags are used, following metric would be gathered:
# rpc.rpc.NumOpenConnections
# If using "*.sink.ganglia.tagsForPrefix.rpc=port",
# following metrics would be gathered:
# rpc.rpc.port=9110.NumOpenConnections
# rpc.rpc.port=9111.NumOpenConnections
#
#*.sink.ganglia.tagsForPrefix.jvm=ProcessName
#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync
#*.sink.ganglia.tagsForPrefix.rpc=port
#*.sink.ganglia.tagsForPrefix.rpcdetailed=port
#*.sink.ganglia.tagsForPrefix.metricssystem=*
#*.sink.ganglia.tagsForPrefix.ugi=*
#*.sink.ganglia.tagsForPrefix.mapred=
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

View File

@ -0,0 +1,275 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>security.client.protocol.acl</name>
<value>*</value>
<description>ACL for ClientProtocol, which is used by user code
via the DistributedFileSystem.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.client.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
for block recovery.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for DatanodeProtocol, which is used by datanodes to
communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.inter.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for InterDatanodeProtocol, the inter-datanode protocol
for updating generation timestamp.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.namenode.protocol.acl</name>
<value>*</value>
<description>ACL for NamenodeProtocol, the protocol used by the secondary
namenode to communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.admin.operations.protocol.acl</name>
<value>*</value>
<description>ACL for AdminOperationsProtocol. Used for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.refresh.user.mappings.protocol.acl</name>
<value>*</value>
<description>ACL for RefreshUserMappingsProtocol. Used to refresh
users mappings. The ACL is a comma-separated list of user and
group names. The user and group list is separated by a blank. For
e.g. "alice,bob users,wheel". A special value of "*" means all
users are allowed.</description>
</property>
<property>
<name>security.refresh.policy.protocol.acl</name>
<value>*</value>
<description>ACL for RefreshAuthorizationPolicyProtocol, used by the
dfsadmin and mradmin commands to refresh the security policy in-effect.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.ha.service.protocol.acl</name>
<value>*</value>
<description>ACL for HAService protocol used by HAAdmin to manage the
active and stand-by states of namenode.</description>
</property>
<property>
<name>security.router.admin.protocol.acl</name>
<value>*</value>
<description>ACL for RouterAdmin Protocol. The ACL is a comma-separated
list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
</description>
</property>
<property>
<name>security.zkfc.protocol.acl</name>
<value>*</value>
<description>ACL for access to the ZK Failover Controller
</description>
</property>
<property>
<name>security.qjournal.service.protocol.acl</name>
<value>*</value>
<description>ACL for QJournalProtocol, used by the NN to communicate with
JNs when using the QuorumJournalManager for edit logs.</description>
</property>
<property>
<name>security.interqjournal.service.protocol.acl</name>
<value>*</value>
<description>ACL for InterQJournalProtocol, used by the JN to
communicate with other JN
</description>
</property>
<property>
<name>security.mrhs.client.protocol.acl</name>
<value>*</value>
<description>ACL for HSClientProtocol, used by job clients to
communciate with the MR History Server job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<!-- YARN Protocols -->
<property>
<name>security.resourcetracker.protocol.acl</name>
<value>*</value>
<description>ACL for ResourceTrackerProtocol, used by the
ResourceManager and NodeManager to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.resourcemanager-administration.protocol.acl</name>
<value>*</value>
<description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationclient.protocol.acl</name>
<value>*</value>
<description>ACL for ApplicationClientProtocol, used by the ResourceManager
and applications submission clients to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationmaster.protocol.acl</name>
<value>*</value>
<description>ACL for ApplicationMasterProtocol, used by the ResourceManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.containermanagement.protocol.acl</name>
<value>*</value>
<description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.resourcelocalizer.protocol.acl</name>
<value>*</value>
<description>ACL for ResourceLocalizer protocol, used by the NodeManager
and ResourceLocalizer to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.task.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.client.protocol.acl</name>
<value>*</value>
<description>ACL for MRClientProtocol, used by job clients to
communciate with the MR ApplicationMaster to query job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationhistory.protocol.acl</name>
<value>*</value>
<description>ACL for ApplicationHistoryProtocol, used by the timeline
server and the generic history service client to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.collector-nodemanager.protocol.acl</name>
<value>*</value>
<description>ACL for CollectorNodemanagerProtocol, used by nodemanager
if timeline service v2 is enabled, for the timeline collector and nodemanager
to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationmaster-nodemanager.applicationmaster.protocol.acl</name>
<value>*</value>
<description>ACL for ApplicationMasterProtocol, used by the Nodemanager
and ApplicationMasters to communicate.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.distributedscheduling.protocol.acl</name>
<value>*</value>
<description>ACL for DistributedSchedulingAMProtocol, used by the Nodemanager
and Resourcemanager to communicate.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
</configuration>

View File

@ -0,0 +1,78 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and stable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *stable* .proto interface.
*/
option java_package = "org.apache.hadoop.fs";
option java_outer_classname = "FSProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.fs;
message FsPermissionProto {
required uint32 perm = 1; // UNIX-style mode bits
}
/*
* FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but
* cross-serialization is not an explicitly supported use case. Unlike HDFS,
* most fields are optional and do not define defaults.
*/
message FileStatusProto {
enum FileType {
FT_DIR = 1;
FT_FILE = 2;
FT_SYMLINK = 3;
}
enum Flags {
HAS_ACL = 0x01; // has ACLs
HAS_CRYPT = 0x02; // encrypted
HAS_EC = 0x04; // erasure coded
SNAPSHOT_ENABLED = 0x08; // snapshot enabled
}
required FileType fileType = 1;
required string path = 2;
optional uint64 length = 3;
optional FsPermissionProto permission = 4;
optional string owner = 5;
optional string group = 6;
optional uint64 modification_time = 7;
optional uint64 access_time = 8;
optional string symlink = 9;
optional uint32 block_replication = 10;
optional uint64 block_size = 11;
// locations = 12
// alias = 13
// childrenNum = 14
optional bytes encryption_data = 15;
// storagePolicy = 16
optional bytes ec_data = 17;
optional uint32 flags = 18 [default = 0];
}
/**
* Placeholder type for consistent basic FileSystem operations.
*/
message LocalFileSystemPathHandleProto {
optional uint64 mtime = 1;
optional string path = 2;
}

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and stable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *stable* .proto interface.
*/
option java_package = "org.apache.hadoop.security.proto";
option java_outer_classname = "SecurityProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.common;
/**
* Security token identifier
*/
message TokenProto {
required bytes identifier = 1;
required bytes password = 2;
required string kind = 3;
required string service = 4;
}
message CredentialsKVProto {
required string alias = 1;
optional hadoop.common.TokenProto token = 2;
optional bytes secret = 3;
}
message CredentialsProto {
repeated hadoop.common.CredentialsKVProto tokens = 1;
repeated hadoop.common.CredentialsKVProto secrets = 2;
}
message GetDelegationTokenRequestProto {
required string renewer = 1;
}
message GetDelegationTokenResponseProto {
optional hadoop.common.TokenProto token = 1;
}
message RenewDelegationTokenRequestProto {
required hadoop.common.TokenProto token = 1;
}
message RenewDelegationTokenResponseProto {
required uint64 newExpiryTime = 1;
}
message CancelDelegationTokenRequestProto {
required hadoop.common.TokenProto token = 1;
}
message CancelDelegationTokenResponseProto { // void response
}

View File

@ -0,0 +1,82 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone</artifactId>
<version>0.4.1-SNAPSHOT</version>
</parent>
<artifactId>hadoop-ozone-assemblies</artifactId>
<name>Apache Hadoop Ozone Assemblies</name>
<description>Apache Hadoop Ozone Assemblies</description>
<properties>
<failIfNoTests>false</failIfNoTests>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<inherited>false</inherited>
<configuration>
<rules>
<requireMavenVersion>
<version>${enforced.maven.version}</version>
</requireMavenVersion>
<requireJavaVersion>
<version>${enforced.java.version}</version>
</requireJavaVersion>
</rules>
</configuration>
<executions>
<execution>
<id>clean</id>
<goals>
<goal>enforce</goal>
</goals>
<phase>pre-clean</phase>
</execution>
<execution>
<id>default</id>
<goals>
<goal>enforce</goal>
</goals>
<phase>validate</phase>
</execution>
<execution>
<id>site</id>
<goals>
<goal>enforce</goal>
</goals>
<phase>pre-site</phase>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
</plugin>
</plugins>
</build>
</project>

View File

@ -17,39 +17,50 @@
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
<id>hadoop-src</id>
<id>ozone-src</id>
<formats>
<format>tar.gz</format>
</formats>
<includeBaseDirectory>true</includeBaseDirectory>
<files>
<file>
<source>pom.ozone.xml</source>
<outputDirectory>/</outputDirectory>
<destName>pom.xml</destName>
</file>
</files>
<fileSets>
<fileSet>
<directory>.</directory>
<includes>
<include>LICENCE.txt</include>
<include>pom.ozone.xml</include>
<include>LICENSE.txt</include>
<include>README.txt</include>
<include>NOTICE.txt</include>
</includes>
</fileSet>
<fileSet>
<directory>.</directory>
<directory>hadoop-hdds</directory>
<useDefaultExcludes>true</useDefaultExcludes>
<excludes>
<exclude>.git/**</exclude>
<exclude>**/.gitignore</exclude>
<exclude>**/.svn</exclude>
<exclude>**/*.iws</exclude>
<exclude>**/*.ipr</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/.classpath</exclude>
<exclude>**/.project</exclude>
<exclude>**/.settings</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/target/**</exclude>
</excludes>
</fileSet>
<fileSet>
<directory>hadoop-ozone</directory>
<useDefaultExcludes>true</useDefaultExcludes>
<excludes>
<exclude>**/ozone-recon-web/build/**</exclude>
<exclude>**/ozone-recon-web/node_modules/**</exclude>
<exclude>**/.classpath</exclude>
<exclude>**/.project</exclude>
<exclude>**/.settings</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/target/**</exclude>
<!-- until the code that does this is fixed -->
<exclude>**/*.log</exclude>
<exclude>**/build/**</exclude>
<exclude>**/file:/**</exclude>
<exclude>**/SecurityAuth.audit*</exclude>
</excludes>
</fileSet>
</fileSets>

View File

@ -138,9 +138,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<protocVersion>${protobuf.version}</protocVersion>
<protocCommand>${protoc.path}</protocCommand>
<imports>
<param>
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
</param>
<param>
${basedir}/../../hadoop-hdds/common/src/main/proto/
</param>

View File

@ -86,7 +86,7 @@ run mkdir -p ./etc
run mkdir -p ./libexec
run mkdir -p ./tests
run cp -r "${ROOT}/hadoop-common-project/hadoop-common/src/main/conf" "etc/hadoop"
run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" "etc/hadoop"
run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop"
run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties" "etc/hadoop"
run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties" "etc/hadoop"
@ -97,15 +97,15 @@ run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/"
run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/docker"
run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh" "libexec/"
run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd" "libexec/"
run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh" "libexec/"
run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.sh" "libexec/"
run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.cmd" "libexec/"
run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-functions.sh" "libexec/"
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone-config.sh" "libexec/"
run cp -r "${ROOT}/hadoop-ozone/common/src/main/shellprofile.d" "libexec/"
run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh" "sbin/"
run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/workers.sh" "sbin/"
run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh" "sbin/"
run cp "${ROOT}/hadoop-hdds/common/src/main/bin/workers.sh" "sbin/"
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/"
run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"

View File

@ -38,8 +38,8 @@ function run()
fi
}
run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
run gzip -f "ozone-${VERSION}.tar"
run tar -c -f "hadoop-ozone-${VERSION}.tar" "ozone-${VERSION}"
run gzip -f "hadoop-ozone-${VERSION}.tar"
echo
echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
echo "Ozone dist tar available at: ${BASEDIR}/hadoop-ozone-${VERSION}.tar.gz"
echo

View File

@ -83,8 +83,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
</properties>
<modules>
<module>hadoop-ozone</module>
<module>hadoop-hdds</module>
<module>hadoop-ozone</module>
</modules>
<profiles>
@ -109,12 +109,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<finalName>hadoop-${project.version}-src</finalName>
<outputDirectory>hadoop-dist/target</outputDirectory>
<finalName>hadoop-ozone-${project.version}-src</finalName>
<outputDirectory>hadoop-ozone/dist/target</outputDirectory>
<!-- Not using descriptorRef and hadoop-assembly dependency -->
<!-- to avoid making hadoop-main to depend on a module -->
<descriptors>
<descriptor>hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml</descriptor>
<descriptor>hadoop-ozone/assemblies/src/main/resources/assemblies/ozone-src.xml</descriptor>
</descriptors>
</configuration>
</execution>
@ -134,63 +134,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<configuration>
<target>
<echo/>
<echo>Hadoop source tar available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src.tar.gz</echo>
<echo/>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>hdds-src</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<inherited>false</inherited>
<executions>
<execution>
<id>src-dist</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<finalName>hadoop-${project.version}-src-with-hdds</finalName>
<outputDirectory>hadoop-dist/target</outputDirectory>
<!-- Not using descriptorRef and hadoop-assembly dependency -->
<!-- to avoid making hadoop-main to depend on a module -->
<descriptors>
<descriptor>hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml</descriptor>
</descriptors>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<inherited>false</inherited>
<executions>
<execution>
<id>src-dist-msg</id>
<phase>package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<echo/>
<echo>Hadoop source tar (including HDDS) available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src-with-hdds.tar.gz</echo>
<echo>Hadoop Ozone source tar available at: ${basedir}/hadoop-ozone/dist/target/hadoop-ozone-${project.version}-src.tar.gz</echo>
<echo/>
</target>
</configuration>
@ -296,15 +240,5 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
</plugins>
</build>
</profile>
<profile>
<id>hdds</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<modules>
<module>hadoop-ozone</module>
<module>hadoop-hdds</module>
</modules>
</profile>
</profiles>
</project>

58
pom.xml
View File

@ -587,62 +587,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
</plugins>
</build>
</profile>
<profile>
<id>hdds-src</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<inherited>false</inherited>
<executions>
<execution>
<id>src-dist</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<attach>false</attach>
<finalName>hadoop-${project.version}-src-with-hdds</finalName>
<outputDirectory>hadoop-dist/target</outputDirectory>
<!-- Not using descriptorRef and hadoop-assembly dependency -->
<!-- to avoid making hadoop-main to depend on a module -->
<descriptors>
<descriptor>hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml</descriptor>
</descriptors>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<inherited>false</inherited>
<executions>
<execution>
<id>src-dist-msg</id>
<phase>package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<echo/>
<echo>Hadoop source tar (including HDDS) available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src-with-hdds.tar.gz</echo>
<echo/>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>submarine-src</id>
<activation>
@ -802,8 +746,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
<activeByDefault>false</activeByDefault>
</activation>
<modules>
<module>hadoop-ozone</module>
<module>hadoop-hdds</module>
<module>hadoop-ozone</module>
</modules>
</profile>
<profile>