Merge trunk into HA branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1196458 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-11-02 05:34:31 +00:00
commit 571d7990f4
990 changed files with 55880 additions and 37998 deletions

1
.gitignore vendored
View File

@ -7,3 +7,4 @@
.project
.settings
target
hadoop-hdfs-project/hadoop-hdfs/downloads

View File

@ -1,5 +1,4 @@
Build instructions for Hadoop Common/HDFS using Maven
Build instructions for Hadoop
----------------------------------------------------------------------------------
Requirements:
@ -9,19 +8,24 @@ Requirements:
* Maven 3.0
* Forrest 0.8 (if generating docs)
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.4.1+ (for MapReduce)
* Autotools (if compiling native code)
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
----------------------------------------------------------------------------------
Maven modules:
Maven main modules:
hadoop (Main Hadoop project)
- hadoop-project (Parent POM for all Hadoop Maven modules. )
(All plugins & dependencies versions are defined here.)
- hadoop-project-dist (Parent POM for modules that generate distributions.)
- hadoop-annotations (Generates the Hadoop doclet used to generated the Javadocs)
- hadoop-common (Hadoop Common)
- hadoop-hdfs (Hadoop HDFS)
- hadoop-assemblies (Maven assemblies used by the different modules)
- hadoop-common-project (Hadoop Common)
- hadoop-hdfs-project (Hadoop HDFS)
- hadoop-mapreduce-project (Hadoop MapReduce)
- hadoop-tools (Hadoop tools like Streaming, Distcp, etc.)
- hadoop-dist (Hadoop distribution assembler)
----------------------------------------------------------------------------------
Where to run Maven from?
@ -45,6 +49,7 @@ Maven build goals:
* Run Rat : mvn apache-rat:check
* Build javadocs : mvn javadoc:javadoc
* Build distribution : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar]
* Change Hadoop version : mvn versions:set -DnewVersion=NEWVERSION
Build options:
@ -52,15 +57,34 @@ Maven build goals:
* Use -Dsnappy.prefix=(/usr/local) & -Dbundle.snappy=(false) to compile
Snappy JNI bindings and to bundle Snappy SO files
* Use -Pdocs to generate & bundle the documentation in the distribution (using -Pdist)
* Use -Psrc to bundle the source in the distribution (using -Pdist)
* Use -Psrc to create a project source TAR.GZ
* Use -Dtar to create a TAR with the distribution (using -Pdist)
Tests options:
* Use -DskipTests to skip tests when running the following Maven goals:
'package', 'install', 'deploy' or 'verify'
* -Dtest=<TESTCLASSNAME>,....
* -Dtest=<TESTCLASSNAME>,<TESTCLASSNAME#METHODNAME>,....
* -Dtest.exclude=<TESTCLASSNAME>
* -Dtest.exclude.pattern=**/<TESTCLASSNAME1>.java,**/<TESTCLASSNAME2>.java
----------------------------------------------------------------------------------
Building distributions:
Create binary distribution without native code and without documentation:
$ mvn package -Pdist -DskipTests -Dtar
Create binary distribution with native code and with documentation:
$ mvn package -Pdist,native,docs -DskipTests -Dtar
Create source distribution:
$ mvn package -Psrc -DskipTests
Create source and binary distributions with native code and documentation:
$ mvn package -Pdist,native,docs,src -DskipTests -Dtar
----------------------------------------------------------------------------------

View File

@ -1,72 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
PATCH_FILE=$1
if [ -z "$PATCH_FILE" ]; then
echo usage: $0 patch-file
exit 1
fi
PATCH=${PATCH:-patch} # allow overriding patch binary
# Cleanup handler for temporary files
TOCLEAN=""
cleanup() {
rm $TOCLEAN
exit $1
}
trap "cleanup 1" HUP INT QUIT TERM
# Allow passing "-" for stdin patches
if [ "$PATCH_FILE" == "-" ]; then
PATCH_FILE=/tmp/tmp.in.$$
cat /dev/fd/0 > $PATCH_FILE
TOCLEAN="$TOCLEAN $PATCH_FILE"
fi
# Come up with a list of changed files into $TMP
TMP=/tmp/tmp.paths.$$
TOCLEAN="$TOCLEAN $TMP"
grep '^+++\|^---' $PATCH_FILE | cut -c '5-' | grep -v /dev/null | sort | uniq > $TMP
# Assume p0 to start
PLEVEL=0
# if all of the lines start with a/ or b/, then this is a git patch that
# was generated without --no-prefix
if ! grep -qv '^a/\|^b/' $TMP ; then
echo Looks like this is a git patch. Stripping a/ and b/ prefixes
echo and incrementing PLEVEL
PLEVEL=$[$PLEVEL + 1]
sed -i -e 's,^[ab]/,,' $TMP
fi
# if all of the lines start with common/, hdfs/, or mapreduce/, this is
# relative to the hadoop root instead of the subproject root, so we need
# to chop off another layer
PREFIX_DIRS=$(cut -d '/' -f 1 $TMP | sort | uniq)
if [[ "$PREFIX_DIRS" =~ ^(hdfs|common|mapreduce)$ ]]; then
echo Looks like this is relative to project root. Increasing PLEVEL
PLEVEL=$[$PLEVEL + 1]
elif ! echo "$PREFIX_DIRS" | grep -vxq 'common\|hdfs\|mapreduce' ; then
echo Looks like this is a cross-subproject patch. Not supported!
exit 1
fi
echo Going to apply patch with: $PATCH -p$PLEVEL
$PATCH -p$PLEVEL -E < $PATCH_FILE
cleanup 0

View File

@ -1,701 +0,0 @@
#!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#set -x
ulimit -n 1024
### Setup some variables.
### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
### Read variables from properties file
bindir=$(dirname $0)
. $bindir/../test-patch.properties
###############################################################################
parseArgs() {
case "$1" in
HUDSON)
### Set HUDSON to true to indicate that this script is being run by Hudson
HUDSON=true
if [[ $# != 16 ]] ; then
echo "ERROR: usage $0 HUDSON <PATCH_DIR> <SUPPORT_DIR> <PS_CMD> <WGET_CMD> <JIRACLI> <SVN_CMD> <GREP_CMD> <PATCH_CMD> <FINDBUGS_HOME> <FORREST_HOME> <ECLIPSE_HOME> <WORKSPACE_BASEDIR> <JIRA_PASSWD> <CURL_CMD> <DEFECT> "
cleanupAndExit 0
fi
PATCH_DIR=$2
SUPPORT_DIR=$3
PS=$4
WGET=$5
JIRACLI=$6
SVN=$7
GREP=$8
PATCH=$9
FINDBUGS_HOME=${10}
FORREST_HOME=${11}
ECLIPSE_HOME=${12}
BASEDIR=${13}
JIRA_PASSWD=${14}
CURL=${15}
defect=${16}
### Retrieve the defect number
if [ -z "$defect" ] ; then
echo "Could not determine the patch to test. Exiting."
cleanupAndExit 0
fi
if [ ! -e "$PATCH_DIR" ] ; then
mkdir -p $PATCH_DIR
fi
ECLIPSE_PROPERTY="-Declipse.home=$ECLIPSE_HOME"
;;
DEVELOPER)
### Set HUDSON to false to indicate that this script is being run by a developer
HUDSON=false
if [[ $# != 9 ]] ; then
echo "ERROR: usage $0 DEVELOPER <PATCH_FILE> <SCRATCH_DIR> <SVN_CMD> <GREP_CMD> <PATCH_CMD> <FINDBUGS_HOME> <FORREST_HOME> <WORKSPACE_BASEDIR>"
cleanupAndExit 0
fi
### PATCH_FILE contains the location of the patchfile
PATCH_FILE=$2
if [[ ! -e "$PATCH_FILE" ]] ; then
echo "Unable to locate the patch file $PATCH_FILE"
cleanupAndExit 0
fi
PATCH_DIR=$3
### Check if $PATCH_DIR exists. If it does not exist, create a new directory
if [[ ! -e "$PATCH_DIR" ]] ; then
mkdir "$PATCH_DIR"
if [[ $? == 0 ]] ; then
echo "$PATCH_DIR has been created"
else
echo "Unable to create $PATCH_DIR"
cleanupAndExit 0
fi
fi
SVN=$4
GREP=$5
PATCH=$6
FINDBUGS_HOME=$7
FORREST_HOME=$8
BASEDIR=$9
### Obtain the patch filename to append it to the version number
defect=`basename $PATCH_FILE`
;;
*)
echo "ERROR: usage $0 HUDSON [args] | DEVELOPER [args]"
cleanupAndExit 0
;;
esac
}
###############################################################################
checkout () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Testing patch for ${defect}."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
### When run by a developer, if the workspace contains modifications, do not continue
status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
if [[ $HUDSON == "false" ]] ; then
if [[ "$status" != "" ]] ; then
echo "ERROR: can't run in a workspace that contains the following modifications"
echo "$status"
cleanupAndExit 1
fi
else
cd $BASEDIR
$SVN revert -R .
rm -rf `$SVN status --no-ignore`
$SVN update
fi
return $?
}
###############################################################################
setup () {
### Download latest patch file (ignoring .htm and .html) when run from patch process
if [[ $HUDSON == "true" ]] ; then
$WGET -q -O $PATCH_DIR/jira http://issues.apache.org/jira/browse/$defect
if [[ `$GREP -c 'Patch Available' $PATCH_DIR/jira` == 0 ]] ; then
echo "$defect is not \"Patch Available\". Exiting."
cleanupAndExit 0
fi
relativePatchURL=`$GREP -o '"/jira/secure/attachment/[0-9]*/[^"]*' $PATCH_DIR/jira | $GREP -v -e 'htm[l]*$' | sort | tail -1 | $GREP -o '/jira/secure/attachment/[0-9]*/[^"]*'`
patchURL="http://issues.apache.org${relativePatchURL}"
patchNum=`echo $patchURL | $GREP -o '[0-9]*/' | $GREP -o '[0-9]*'`
echo "$defect patch is being downloaded at `date` from"
echo "$patchURL"
$WGET -q -O $PATCH_DIR/patch $patchURL
VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
JIRA_COMMENT="Here are the results of testing the latest attachment
$patchURL
against trunk revision ${SVN_REVISION}."
### Copy in any supporting files needed by this process
cp -r $SUPPORT_DIR/lib/* ./lib
#PENDING: cp -f $SUPPORT_DIR/etc/checkstyle* ./src/test
### Copy the patch file to $PATCH_DIR
else
VERSION=PATCH-${defect}
cp $PATCH_FILE $PATCH_DIR/patch
if [[ $? == 0 ]] ; then
echo "Patch file $PATCH_FILE copied to $PATCH_DIR"
else
echo "Could not copy $PATCH_FILE to $PATCH_DIR"
cleanupAndExit 0
fi
fi
### exit if warnings are NOT defined in the properties file
if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]]; then
echo "Please define the following properties in test-patch.properties file"
echo "OK_FINDBUGS_WARNINGS"
echo "OK_RELEASEAUDIT_WARNINGS"
echo "OK_JAVADOC_WARNINGS"
cleanupAndExit 1
fi
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Pre-build trunk to verify trunk stability and javac warnings"
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
$ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
if [[ $? != 0 ]] ; then
echo "Trunk compilation is broken?"
cleanupAndExit 1
fi
}
###############################################################################
### Check for @author tags in the patch
checkAuthor () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Checking there are no @author tags in the patch."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
authorTags=`$GREP -c -i '@author' $PATCH_DIR/patch`
echo "There appear to be $authorTags @author tags in the patch."
if [[ $authorTags != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 @author. The patch appears to contain $authorTags @author tags which the Hadoop community has agreed to not allow in code contributions."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 @author. The patch does not contain any @author tags."
return 0
}
###############################################################################
### Check for tests in the patch
checkTests () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Checking there are new or changed tests in the patch."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
testReferences=`$GREP -c -i '/test' $PATCH_DIR/patch`
echo "There appear to be $testReferences test files referenced in the patch."
if [[ $testReferences == 0 ]] ; then
if [[ $HUDSON == "true" ]] ; then
patchIsDoc=`$GREP -c -i 'title="documentation' $PATCH_DIR/jira`
if [[ $patchIsDoc != 0 ]] ; then
echo "The patch appears to be a documentation patch that doesn't require tests."
JIRA_COMMENT="$JIRA_COMMENT
+0 tests included. The patch appears to be a documentation patch that doesn't require tests."
return 0
fi
fi
JIRA_COMMENT="$JIRA_COMMENT
-1 tests included. The patch doesn't appear to include any new or modified tests.
Please justify why no new tests are needed for this patch.
Also please list what manual steps were performed to verify this patch."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 tests included. The patch appears to include $testReferences new or modified tests."
return 0
}
cleanUpXml () {
cd $BASEDIR/conf
for file in `ls *.xml.template`
do
rm -f `basename $file .template`
done
cd $BASEDIR
}
###############################################################################
### Attempt to apply the patch
applyPatch () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Applying patch."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
export PATCH
$bindir/smart-apply-patch.sh $PATCH_DIR/patch
if [[ $? != 0 ]] ; then
echo "PATCH APPLICATION FAILED"
JIRA_COMMENT="$JIRA_COMMENT
-1 patch. The patch command could not apply the patch."
return 1
fi
return 0
}
###############################################################################
### Check there are no javadoc warnings
checkJavadocWarnings () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Determining number of patched javadoc warnings."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt
javadocWarnings=`$GREP -o '\[javadoc\] [0-9]* warning' $PATCH_DIR/patchJavadocWarnings.txt | awk '{total += $2} END {print total}'`
echo ""
echo ""
echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
### if current warnings greater than OK_JAVADOC_WARNINGS
if [[ $javadocWarnings > $OK_JAVADOC_WARNINGS ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 javadoc. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 javadoc. The javadoc tool did not generate any warning messages."
return 0
}
###############################################################################
### Check there are no changes in the number of Javac warnings
checkJavacWarnings () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Determining number of patched javac warnings."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1
if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 javac. The patch appears to cause tar ant target to fail."
return 1
fi
### Compare trunk and patch javac warning numbers
if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
trunkJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/trunkJavacWarnings.txt | awk '{total += $2} END {print total}'`
patchJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/patchJavacWarnings.txt | awk '{total += $2} END {print total}'`
echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 javac. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
return 1
fi
fi
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 javac. The applied patch does not increase the total number of javac compiler warnings."
return 0
}
###############################################################################
### Check there are no changes in the number of release audit (RAT) warnings
checkReleaseAuditWarnings () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Determining number of patched release audit warnings."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1
### Compare trunk and patch release audit warning numbers
if [[ -f $PATCH_DIR/patchReleaseAuditWarnings.txt ]] ; then
patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt`
echo ""
echo ""
echo "There appear to be $OK_RELEASEAUDIT_WARNINGS release audit warnings before the patch and $patchReleaseAuditWarnings release audit warnings after applying the patch."
if [[ $patchReleaseAuditWarnings != "" && $OK_RELEASEAUDIT_WARNINGS != "" ]] ; then
if [[ $patchReleaseAuditWarnings -gt $OK_RELEASEAUDIT_WARNINGS ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 release audit. The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $OK_RELEASEAUDIT_WARNINGS warnings)."
$GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
$JIRA_COMMENT_FOOTER"
return 1
fi
fi
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 release audit. The applied patch does not increase the total number of release audit warnings."
return 0
}
###############################################################################
### Check there are no changes in the number of Checkstyle warnings
checkStyle () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Determining number of patched checkstyle warnings."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "THIS IS NOT IMPLEMENTED YET"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle
JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
$JIRA_COMMENT_FOOTER"
### TODO: calculate actual patchStyleErrors
# patchStyleErrors=0
# if [[ $patchStyleErrors != 0 ]] ; then
# JIRA_COMMENT="$JIRA_COMMENT
#
# -1 checkstyle. The patch generated $patchStyleErrors code style errors."
# return 1
# fi
# JIRA_COMMENT="$JIRA_COMMENT
#
# +1 checkstyle. The patch generated 0 code style errors."
return 0
}
###############################################################################
### Check there are no changes in the number of Findbugs warnings
checkFindbugsWarnings () {
findbugs_version=`${FINDBUGS_HOME}/bin/findbugs -version`
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Determining number of patched Findbugs warnings."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=${FINDBUGS_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs
if [ $? != 0 ] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 findbugs. The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
return 1
fi
JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/build/test/findbugs/newPatchFindbugsWarnings.html
$JIRA_COMMENT_FOOTER"
cp $BASEDIR/build/test/findbugs/*.xml $PATCH_DIR/patchFindbugsWarnings.xml
$FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \
$PATCH_DIR/patchFindbugsWarnings.xml \
$PATCH_DIR/patchFindbugsWarnings.xml
findbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings.xml \
$BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml | /usr/bin/awk '{print $1}'`
$FINDBUGS_HOME/bin/convertXmlToText -html \
$BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml \
$BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html
cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html $PATCH_DIR/newPatchFindbugsWarnings.html
cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml $PATCH_DIR/newPatchFindbugsWarnings.xml
### if current warnings greater than OK_FINDBUGS_WARNINGS
if [[ $findbugsWarnings > $OK_FINDBUGS_WARNINGS ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 findbugs. The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 findbugs. The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings."
return 0
}
###############################################################################
### Run the test-core target
runCoreTests () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Running core tests."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
PreTestTarget=""
if [[ $defect == MAPREDUCE-* ]] ; then
PreTestTarget="create-c++-configure"
fi
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`grep -l -E "<failure|<error" $WORKSPACE/trunk/build/test/*.xml | sed -e "s|.*build/test/TEST-| |g" | sed -e "s|\.xml||g"`
JIRA_COMMENT="$JIRA_COMMENT
-1 core tests. The patch failed these core unit tests:
$failed_tests"
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 core tests. The patch passed core unit tests."
return 0
}
###############################################################################
### Run the test-contrib target
runContribTests () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Running contrib tests."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
if [[ `$GREP -c 'test-contrib' build.xml` == 0 ]] ; then
echo "No contrib tests in this project."
return 0
fi
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib"
$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib
if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 contrib tests. The patch failed contrib unit tests."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 contrib tests. The patch passed contrib unit tests."
return 0
}
###############################################################################
### Run the inject-system-faults target
checkInjectSystemFaults () {
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Checking the integrity of system test framework code."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults"
$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults
if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 system test framework. The patch failed system test framework compile."
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT
+1 system test framework. The patch passed system test framework compile."
return 0
}
###############################################################################
### Submit a comment to the defect's Jira
submitJiraComment () {
local result=$1
### Do not output the value of JIRA_COMMENT_FOOTER when run by a developer
if [[ $HUDSON == "false" ]] ; then
JIRA_COMMENT_FOOTER=""
fi
if [[ $result == 0 ]] ; then
comment="+1 overall. $JIRA_COMMENT
$JIRA_COMMENT_FOOTER"
else
comment="-1 overall. $JIRA_COMMENT
$JIRA_COMMENT_FOOTER"
fi
### Output the test result to the console
echo "
$comment"
if [[ $HUDSON == "true" ]] ; then
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Adding comment to Jira."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
### Update Jira with a comment
export USER=hudson
$JIRACLI -s https://issues.apache.org/jira -a addcomment -u hadoopqa -p $JIRA_PASSWD --comment "$comment" --issue $defect
$JIRACLI -s https://issues.apache.org/jira -a logout -u hadoopqa -p $JIRA_PASSWD
fi
}
###############################################################################
### Cleanup files
cleanupAndExit () {
local result=$1
if [[ $HUDSON == "true" ]] ; then
if [ -e "$PATCH_DIR" ] ; then
mv $PATCH_DIR $BASEDIR
fi
fi
echo ""
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Finished build."
echo "======================================================================"
echo "======================================================================"
echo ""
echo ""
exit $result
}
###############################################################################
###############################################################################
###############################################################################
JIRA_COMMENT=""
JIRA_COMMENT_FOOTER="Console output: $BUILD_URL/console
This message is automatically generated."
### Check if arguments to the script have been specified properly or not
parseArgs $@
cd $BASEDIR
checkout
RESULT=$?
if [[ $HUDSON == "true" ]] ; then
if [[ $RESULT != 0 ]] ; then
exit 100
fi
fi
setup
checkAuthor
RESULT=$?
if [[ $HUDSON == "true" ]] ; then
cleanUpXml
fi
checkTests
(( RESULT = RESULT + $? ))
applyPatch
if [[ $? != 0 ]] ; then
submitJiraComment 1
cleanupAndExit 1
fi
checkJavadocWarnings
(( RESULT = RESULT + $? ))
checkJavacWarnings
(( RESULT = RESULT + $? ))
### Checkstyle not implemented yet
#checkStyle
#(( RESULT = RESULT + $? ))
checkFindbugsWarnings
(( RESULT = RESULT + $? ))
checkReleaseAuditWarnings
(( RESULT = RESULT + $? ))
### Do not call these when run by a developer
if [[ $HUDSON == "true" ]] ; then
runCoreTests
(( RESULT = RESULT + $? ))
runContribTests
(( RESULT = RESULT + $? ))
fi
checkInjectSystemFaults
(( RESULT = RESULT + $? ))
JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
$JIRA_COMMENT_FOOTER"
submitJiraComment $RESULT
cleanupAndExit $RESULT

View File

@ -598,19 +598,22 @@ runTests () {
echo ""
echo ""
echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
echo "$MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess"
$MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
fi
if [[ -n "$failed_tests" ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
-1 core tests. The patch failed these unit tests:
$failed_tests"
else
JIRA_COMMENT="$JIRA_COMMENT
-1 core tests. The patch failed the unit tests build"
fi
return 1
fi
JIRA_COMMENT="$JIRA_COMMENT

View File

@ -107,10 +107,6 @@
<directory>${project.build.directory}/site</directory>
<outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
</fileSet>
<fileSet>
<directory>${project.build.directory}/src</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/src</outputDirectory>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>

View File

@ -0,0 +1,79 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>hadoop-mapreduce-dist</id>
<formats>
<format>dir</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<!-- TODO: this layout is wrong. We need module specific bin files in module specific dirs -->
<fileSets>
<fileSet>
<directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/target/native/target/usr/local/bin</directory>
<outputDirectory>bin</outputDirectory>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>hadoop-yarn/bin</directory>
<outputDirectory>bin</outputDirectory>
<includes>
<include>*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>bin</directory>
<outputDirectory>bin</outputDirectory>
<includes>
<include>*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>hadoop-yarn/conf</directory>
<outputDirectory>conf</outputDirectory>
<includes>
<include>**/*</include>
</includes>
</fileSet>
</fileSets>
<moduleSets>
<moduleSet>
<excludes>
<exclude>org.apache.hadoop:hadoop-yarn-server-tests</exclude>
</excludes>
<binaries>
<outputDirectory>modules</outputDirectory>
<includeDependencies>false</includeDependencies>
<unpack>false</unpack>
</binaries>
</moduleSet>
</moduleSets>
<dependencySets>
<dependencySet>
<useProjectArtifact>false</useProjectArtifact>
<outputDirectory>/lib</outputDirectory>
<!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
<excludes>
<exclude>org.apache.hadoop:hadoop-common</exclude>
<exclude>org.apache.hadoop:hadoop-hdfs</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -19,18 +19,29 @@
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>hadoop-src</id>
<formats>
<format>dir</format>
<format>tar.gz</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<includeBaseDirectory>true</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>${project.basedir}</directory>
<outputDirectory>src/</outputDirectory>
<directory>.</directory>
<useDefaultExcludes>true</useDefaultExcludes>
<excludes>
<exclude>.git/**</exclude>
<exclude>**/.gitignore</exclude>
<exclude>**/.svn</exclude>
<exclude>**/*.iws</exclude>
<exclude>**/*.ipr</exclude>
<exclude>**/*.iml</exclude>
<exclude>**/.classpath</exclude>
<exclude>**/.project</exclude>
<exclude>**/.settings</exclude>
<exclude>**/target/**</exclude>
<!-- until the code that does this is fixed -->
<exclude>**/*.log</exclude>
<exclude>**/build/**</exclude>
<exclude>**/target/**</exclude>
<exclude>**/file:/**</exclude>
<exclude>**/SecurityAuth.audit*</exclude>
</excludes>
</fileSet>
</fileSets>

View File

@ -0,0 +1,63 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
import com.sun.tools.doclets.standard.Standard;
/**
* A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
* that only includes class-level elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Public}.
* Class-level elements with no annotation are excluded.
* In addition, all elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
* {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}
* are also excluded.
* It delegates to the Standard Doclet, and takes the same options.
*/
public class IncludePublicAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
}
public static boolean start(RootDoc root) {
System.out.println(
IncludePublicAnnotationsStandardDoclet.class.getSimpleName());
RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
return Standard.start(RootDocProcessor.process(root));
}
public static int optionLength(String option) {
Integer length = StabilityOptions.optionLength(option);
if (length != null) {
return length;
}
return Standard.optionLength(option);
}
public static boolean validOptions(String[][] options,
DocErrorReporter reporter) {
StabilityOptions.validOptions(options, reporter);
String[][] filteredOptions = StabilityOptions.filterOptions(options);
return Standard.validOptions(filteredOptions, reporter);
}
}

View File

@ -50,6 +50,7 @@
class RootDocProcessor {
static String stability = StabilityOptions.UNSTABLE_OPTION;
static boolean treatUnannotatedClassesAsPrivate = false;
public static RootDoc process(RootDoc root) {
return (RootDoc) process(root, RootDoc.class);
@ -201,6 +202,17 @@ private static boolean exclude(Doc doc) {
}
}
}
for (AnnotationDesc annotation : annotations) {
String qualifiedTypeName =
annotation.annotationType().qualifiedTypeName();
if (qualifiedTypeName.equals(
InterfaceAudience.Public.class.getCanonicalName())) {
return false;
}
}
}
if (treatUnannotatedClassesAsPrivate) {
return doc.isClass() || doc.isInterface() || doc.isAnnotationType();
}
return false;
}

View File

@ -151,15 +151,13 @@ public void init(Properties config) throws ServletException {
throw new ServletException("Keytab does not exist: " + keytab);
}
String nameRules = config.getProperty(NAME_RULES, "DEFAULT");
KerberosName.setRules(nameRules);
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
KerberosConfiguration kerberosConfiguration = new KerberosConfiguration(keytab, principal);
LOG.info("Login using keytab "+keytab+", for principal "+principal);
loginContext = new LoginContext("", subject, null, kerberosConfiguration);
loginContext.login();

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#banner {
height: 93px;
background: none;
}
#bannerLeft img {
margin-left: 30px;
margin-top: 10px;
}
#bannerRight img {
margin: 17px;
}

View File

@ -13,16 +13,10 @@
-->
<project name="Hadoop Auth">
<version position="right"/>
<bannerLeft>
<name>&nbsp;</name>
</bannerLeft>
<skin>
<groupId>org.apache.maven.skins</groupId>
<artifactId>maven-stylus-skin</artifactId>
<version>1.1</version>
<version>1.2</version>
</skin>
<body>

View File

@ -16,9 +16,6 @@ Trunk (unreleased changes)
HADOOP-7635. RetryInvocationHandler should release underlying resources on
close (atm)
HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
belongs to local host. (suresh)
HADOOP-7687 Make getProtocolSignature public (sanjay)
HADOOP-7693. Enhance AvroRpcEngine to support the new #addProtocol
@ -30,6 +27,34 @@ Trunk (unreleased changes)
HADOOP-7717. Move handling of concurrent client fail-overs to
RetryInvocationHandler (atm)
HADOOP-6490. Use StringUtils over String#replace in Path#normalizePath.
(Uma Maheswara Rao G via harsh)
HADOOP-7736. Remove duplicate Path#normalizePath call. (harsh)
HADOOP-7664. Remove warmings when overriding final parameter configuration
if the override value is same as the final parameter value.
(Ravi Prakash via suresh)
HADOOP-7737. normalize hadoop-mapreduce & hadoop-dist dist/tar build with
common/hdfs. (tucu)
HADOOP-7743. Add Maven profile to create a full source tarball. (tucu)
HADOOP-7729. Send back valid HTTP response if user hits IPC port with
HTTP GET. (todd)
HADOOP-7758. Make GlobFilter class public. (tucu)
HADOOP-7728. Enable task memory management to be configurable in hadoop
config setup script. (ramya)
HADOOP-7424. Log an error if the topology script doesn't handle multiple args.
(Uma Maheswara Rao G via eli)
HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
(jitendra)
BUGS
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
@ -37,7 +62,8 @@ Trunk (unreleased changes)
HADOOP-7610. Fix for hadoop debian package (Eric Yang via gkesavan)
HADOOP-7641. Add Apache License to template config files (Eric Yang via atm)
HADOOP-7641. Add Apache License to template config files.
(Eric Yang via atm)
HADOOP-7621. alfredo config should be in a file not readable by users
(Alejandro Abdelnur via atm)
@ -45,16 +71,31 @@ Trunk (unreleased changes)
HADOOP-7669 Fix newly introduced release audit warning.
(Uma Maheswara Rao G via stevel)
HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions if interrupted
in startup (stevel)
HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions
if interrupted in startup (stevel)
HADOOP-7703. Improved excpetion handling of shutting down web server.
HADOOP-7703. Improved exception handling of shutting down web server.
(Devaraj K via Eric Yang)
HADOOP-7704. Reduce number of object created by JMXJsonServlet.
(Devaraj K via Eric Yang)
Release 0.23.0 - Unreleased
HADOOP-7695. RPC.stopProxy can throw unintended exception while logging
error (atm)
HADOOP-7769. TestJMXJsonServlet is failing. (tomwhite)
HADOOP-7770. ViewFS getFileChecksum throws FileNotFoundException for files in
/tmp and /user. (Ravi Prakash via jitendra)
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
HADOOP-7773. Add support for protocol buffer based RPC engine.
(suresh)
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
@ -122,6 +163,9 @@ Release 0.23.0 - Unreleased
IMPROVEMENTS
HADOOP-7655. Provide a small validation script that smoke tests the installed
cluster. (Arpit Gupta via mattf)
HADOOP-7042. Updates to test-patch.sh to include failed test names and
improve other messaging. (nigel)
@ -435,6 +479,47 @@ Release 0.23.0 - Unreleased
HADOOP-7720. Added parameter for HBase user to setup config script.
(Arpit Gupta via Eric Yang)
HADOOP-7624. Set things up for a top level hadoop-tools module. (tucu)
HADOOP-7627. Improve MetricsAsserts to give more understandable output
on failure. (todd)
HADOOP-7642. create hadoop-dist module where TAR stitching would happen.
(Thomas White via tucu)
HADOOP-7709. Running a set of methods in a Single Test Class.
(Jonathan Eagles via mahadev)
HADOOP-7705. Add a log4j back end that can push out JSON data,
one per line. (stevel)
HADOOP-7749. Add a NetUtils createSocketAddr call which provides more
help in exception messages. (todd)
HADOOP-7762. Common side of MR-2736. (eli)
HADOOP-7668. Add a NetUtils method that can tell if an InetAddress
belongs to local host. (suresh)
HADOOP-7509. Improve exception message thrown when Authentication is
required. (Ravi Prakash via suresh)
HADOOP-7745. Fix wrong variable name in exception message introduced
in HADOOP-7509. (Ravi Prakash via suresh)
MAPREDUCE-2764. Fix renewal of dfs delegation tokens. (Owen via jitendra)
HADOOP-7360. Preserve relative paths that do not contain globs in FsShell.
(Daryn Sharp and Kihwal Lee via szetszwo)
HADOOP-7771. FsShell -copyToLocal, -get, etc. commands throw NPE if the
destination directory does not exist. (John George and Daryn Sharp
via szetszwo)
HADOOP-7782. Aggregate project javadocs. (tomwhite)
HADOOP-7789. Improvements to site navigation. (acmurthy)
OPTIMIZATIONS
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@ -443,8 +528,20 @@ Release 0.23.0 - Unreleased
HADOOP-7445. Implement bulk checksum verification using efficient native
code. (todd)
HADOOP-7753. Support fadvise and sync_file_range in NativeIO. Add
ReadaheadPool infrastructure for use in HDFS and MR. (todd)
HADOOP-7446. Implement CRC32C native code using SSE4.2 instructions.
(Kihwal Lee and todd via todd)
HADOOP-7763. Add top-level navigation to APT docs. (tomwhite)
HADOOP-7785. Add equals, hashcode, toString to DataChecksum (todd)
BUG FIXES
HADOOP-7740. Fixed security audit logger configuration. (Arpit Gupta via Eric Yang)
HADOOP-7630. hadoop-metrics2.properties should have a property *.period
set to a default value for metrics. (Eric Yang via mattf)
@ -681,6 +778,26 @@ Release 0.23.0 - Unreleased
HADOOP-7708. Fixed hadoop-setup-conf.sh to handle config files
consistently. (Eric Yang)
HADOOP-7724. Fixed hadoop-setup-conf.sh to put proxy user in
core-site.xml. (Arpit Gupta via Eric Yang)
HADOOP-7755. Detect MapReduce PreCommit Trunk builds silently failing
when running test-patch.sh. (Jonathan Eagles via tomwhite)
HADOOP-7744. Ensure failed tests exit with proper error code. (Jonathan
Eagles via acmurthy)
HADOOP-7764. Allow HttpServer to set both ACL list and path spec filters.
(Jonathan Eagles via acmurthy)
HADOOP-7766. The auth to local mappings are not being respected, with webhdfs
and security enabled. (jitendra)
HADOOP-7721. Add log before login in KerberosAuthenticationHandler.
(jitendra)
HADOOP-7778. FindBugs warning in Token.getKind(). (tomwhite)
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
@ -933,6 +1050,10 @@ Release 0.22.0 - Unreleased
HADOOP-7325. The hadoop command should not accept class names starting with
a hyphen. (Brock Noland via todd)
HADOOP-7772. javadoc the topology classes (stevel)
HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
OPTIMIZATIONS
HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).

View File

@ -270,4 +270,8 @@
<!-- backward compatibility -->
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
</Match>
<Match>
<!-- protobuf generated code -->
<Class name="org.apache.hadoop.ipc.protobuf.HadoopRpcProtos"/>
</Match>
</FindBugsFilter>

View File

@ -338,6 +338,7 @@
TODO: from a previous run is present
-->
<delete dir="${test.build.data}"/>
<mkdir dir="${test.build.data}"/>
<mkdir dir="${hadoop.log.dir}"/>
<copy toDir="${project.build.directory}/test-classes">
@ -346,6 +347,18 @@
</target>
</configuration>
</execution>
<execution>
<phase>pre-site</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<tasks>
<copy file="src/main/resources/core-default.xml" todir="src/site/resources"/>
<copy file="src/main/xsl/configuration.xsl" todir="src/site/resources"/>
</tasks>
</configuration>
</execution>
</executions>
</plugin>
<plugin>

View File

@ -63,22 +63,6 @@ case $COMMAND in
fi
;;
#mapred commands
mradmin|jobtracker|tasktracker|pipes|job|queue)
echo "DEPRECATED: Use of this script to execute mapred command is deprecated."
echo "Instead use the mapred command for it."
echo ""
#try to locate mapred and if present, delegate to it.
if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
exec "${HADOOP_MAPRED_HOME}"/bin/mapred $*
elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
exec "${HADOOP_PREFIX}"/bin/mapred $*
else
echo "MAPRED not found."
exit
fi
;;
classpath)
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
@ -119,6 +103,9 @@ case $COMMAND in
fi
shift
#make sure security appender is turned off
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
fi

View File

@ -217,7 +217,6 @@ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_PREFIX"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,console}"
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
fi
@ -248,24 +247,8 @@ if $cygwin; then
HADOOP_HDFS_HOME=`cygpath -w "$HADOOP_HDFS_HOME"`
fi
# set mapred home if mapred is present
if [ "$HADOOP_MAPRED_HOME" = "" ]; then
if [ -d "${HADOOP_PREFIX}/share/hadoop/mapreduce" ]; then
HADOOP_MAPRED_HOME=$HADOOP_PREFIX
fi
fi
if [ -d "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/webapps" ]; then
CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/webapps
fi
if [ -d "$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib" ]; then
CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib'/*'
fi
# cygwin path translation
if $cygwin; then
HADOOP_MAPRED_HOME=`cygpath -w "$HADOOP_MAPRED_HOME"`
TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
fi

View File

@ -29,8 +29,3 @@ bin=`cd "$bin"; pwd`
if [ -f "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh ]; then
"${HADOOP_HDFS_HOME}"/bin/start-dfs.sh --config $HADOOP_CONF_DIR
fi
# start mapred daemons if mapred is present
if [ -f "${HADOOP_MAPRED_HOME}"/bin/start-mapred.sh ]; then
"${HADOOP_MAPRED_HOME}"/bin/start-mapred.sh --config $HADOOP_CONF_DIR
fi

View File

@ -29,9 +29,3 @@ bin=`cd "$bin"; pwd`
if [ -f "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh ]; then
"${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh --config $HADOOP_CONF_DIR
fi
# stop mapred daemons if mapred is present
if [ -f "${HADOOP_MAPRED_HOME}"/bin/stop-mapred.sh ]; then
"${HADOOP_MAPRED_HOME}"/bin/stop-mapred.sh --config $HADOOP_CONF_DIR
fi

View File

@ -51,7 +51,6 @@
#*.sink.ganglia.tagsForPrefix.dfs=
#*.sink.ganglia.tagsForPrefix.rpc=
#*.sink.ganglia.tagsForPrefix.mapred=
#*.sink.ganglia.tagsForPrefix.fairscheduler=
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

View File

@ -627,7 +627,7 @@
</tr>
<tr>
<td>conf/hdfs-site.xml</td>
<td>dfs.block.size</td>
<td>dfs.blocksize</td>
<td>134217728</td>
<td>HDFS blocksize of 128MB for large file-systems.</td>
</tr>

View File

@ -1580,7 +1580,7 @@ private void loadProperty(Properties properties, Object name, String attr,
if (!finalParameters.contains(attr)) {
properties.setProperty(attr, value);
updatingResource.put(attr, name.toString());
} else {
} else if (!value.equals(properties.getProperty(attr))) {
LOG.warn(name+":an attempt to override final parameter: "+attr
+"; Ignoring.");
}

View File

@ -93,5 +93,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/** Default value for IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY */
public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
256 * 1024;
/**
* Service Authorization
*/
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY =
"security.refresh.policy.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_GET_USER_MAPPINGS =
"security.get.user.mappings.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
"security.refresh.user.mappings.protocol.acl";
}

View File

@ -44,6 +44,8 @@
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RpcClientException;
import org.apache.hadoop.ipc.RpcServerException;
@ -443,7 +445,9 @@ public static FileContext getFileContext(final URI defaultFsUri,
*/
public static FileContext getFileContext(final Configuration aConf)
throws UnsupportedFileSystemException {
return getFileContext(URI.create(FsConfig.getDefaultFsURI(aConf)), aConf);
return getFileContext(
URI.create(aConf.get(FS_DEFAULT_NAME_KEY, FS_DEFAULT_NAME_DEFAULT)),
aConf);
}
/**

View File

@ -1,114 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_HOME_DIR_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_HOME_DIR_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
/**
* This class is thin layer to manage the FS related keys in
* a configuration object.
* It provides convenience static method to set and get the keys from a
* a configuration.
*
*/
final class FsConfig {
private FsConfig() {}
// Configuration keys and default values in the config file
// TBD note we should deprecate the keys constants elsewhere
// The Keys
static final String FS_REPLICATION_FACTOR_KEY = "dfs.replication";
static final String FS_BLOCK_SIZE_KEY = "dfs.block.size";
// The default values
// Default values of SERVER_DEFAULT(-1) implies use the ones from
// the target file system where files are created.
static final short FS_DEFAULT_REPLICATION_FACTOR = 3;
static final long FS_DEFAULT_BLOCK_SIZE = 32 * 1024 * 1024;
public static String getDefaultFsURI(final Configuration conf) {
return conf.get(FS_DEFAULT_NAME_KEY, FS_DEFAULT_NAME_DEFAULT);
}
public static String getHomeDir(final Configuration conf) {
return conf.get(FS_HOME_DIR_KEY, FS_HOME_DIR_DEFAULT);
}
public static short getDefaultReplicationFactor(final Configuration conf) {
return (short)
conf.getInt(FS_REPLICATION_FACTOR_KEY, FS_DEFAULT_REPLICATION_FACTOR);
}
public static long getDefaultBlockSize(final Configuration conf) {
return conf.getLong(FS_BLOCK_SIZE_KEY, FS_DEFAULT_BLOCK_SIZE);
}
public static int getDefaultIOBuffersize(final Configuration conf) {
return conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
}
public static Class<?> getImplClass(URI uri, Configuration conf) {
String scheme = uri.getScheme();
if (scheme == null) {
throw new IllegalArgumentException("No scheme");
}
return conf.getClass("fs." + uri.getScheme() + ".impl", null);
}
/**
* The Setters: see the note on the javdoc for the class above.
*/
public static void setDefaultFS(final Configuration conf, String uri) {
conf.set(FS_DEFAULT_NAME_KEY, uri);
}
public static void setHomeDir(final Configuration conf, String path) {
conf.set(FS_HOME_DIR_KEY, path);
}
public static void setDefaultReplicationFactor(final Configuration conf,
short rf) {
conf.setInt(FS_REPLICATION_FACTOR_KEY, rf);
}
public static void setDefaultBlockSize(final Configuration conf, long bs) {
conf.setLong(FS_BLOCK_SIZE_KEY, bs);
}
public static void setDefaultIOBuffersize(final Configuration conf, int bs) {
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, bs);
}
}

View File

@ -21,8 +21,15 @@
import java.util.regex.PatternSyntaxException;
import java.io.IOException;
// A class that could decide if a string matches the glob or not
class GlobFilter implements PathFilter {
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A filter for POSIX glob pattern with brace expansions.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class GlobFilter implements PathFilter {
private final static PathFilter DEFAULT_FILTER = new PathFilter() {
public boolean accept(Path file) {
return true;
@ -32,11 +39,24 @@ public boolean accept(Path file) {
private PathFilter userFilter = DEFAULT_FILTER;
private GlobPattern pattern;
GlobFilter(String filePattern) throws IOException {
/**
* Creates a glob filter with the specified file pattern.
*
* @param filePattern the file pattern.
* @throws IOException thrown if the file pattern is incorrect.
*/
public GlobFilter(String filePattern) throws IOException {
init(filePattern, DEFAULT_FILTER);
}
GlobFilter(String filePattern, PathFilter filter) throws IOException {
/**
* Creates a glob filter with the specified file pattern and an user filter.
*
* @param filePattern the file pattern.
* @param filter user filter in addition to the glob pattern.
* @throws IOException thrown if the file pattern is incorrect.
*/
public GlobFilter(String filePattern, PathFilter filter) throws IOException {
init(filePattern, filter);
}

View File

@ -18,10 +18,12 @@
package org.apache.hadoop.fs;
import java.net.*;
import java.io.*;
import org.apache.avro.reflect.Stringable;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -75,7 +77,7 @@ public Path(Path parent, Path child) {
}
URI resolved = parentUri.resolve(child.uri);
initialize(resolved.getScheme(), resolved.getAuthority(),
normalizePath(resolved.getPath()), resolved.getFragment());
resolved.getPath(), resolved.getFragment());
}
private void checkPathArg( String path ) {
@ -157,8 +159,8 @@ private void initialize(String scheme, String authority, String path,
private String normalizePath(String path) {
// remove double slashes & backslashes
path = path.replace("//", "/");
path = path.replace("\\", "/");
path = StringUtils.replace(path, "//", "/");
path = StringUtils.replace(path, "\\", "/");
// trim trailing slash from non-root path (ignoring windows drive)
int minLength = hasWindowsDrive(path, true) ? 4 : 1;

View File

@ -55,6 +55,7 @@ abstract public class Command extends Configured {
protected int exitCode = 0;
protected int numErrors = 0;
protected boolean recursive = false;
private int depth = 0;
protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
private static final Log LOG = LogFactory.getLog(Command.class);
@ -86,6 +87,10 @@ protected boolean isRecursive() {
return recursive;
}
protected int getDepth() {
return depth;
}
/**
* Execute the command on the input path
*
@ -269,6 +274,7 @@ protected void processArgument(PathData item) throws IOException {
protected void processPathArgument(PathData item) throws IOException {
// null indicates that the call is not via recursion, ie. there is
// no parent directory that was expanded
depth = 0;
processPaths(null, item);
}
@ -326,7 +332,12 @@ protected void processPath(PathData item) throws IOException {
* @throws IOException if anything goes wrong...
*/
protected void recursePath(PathData item) throws IOException {
try {
depth++;
processPaths(item, item.getDirectoryContents());
} finally {
depth--;
}
}
/**

View File

@ -20,13 +20,18 @@
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.PathExceptions.PathExistsException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIsDirectoryException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
import org.apache.hadoop.fs.shell.PathExceptions.PathNotFoundException;
import org.apache.hadoop.fs.shell.PathExceptions.PathOperationException;
import org.apache.hadoop.io.IOUtils;
/**
* Provides: argument processing to ensure the destination is valid
@ -106,43 +111,31 @@ protected void processArguments(LinkedList<PathData> args)
}
@Override
protected void processPaths(PathData parent, PathData ... items)
protected void processPathArgument(PathData src)
throws IOException {
PathData savedDst = dst;
try {
// modify dst as we descend to append the basename of the
// current directory being processed
if (parent != null) dst = dst.getPathDataForChild(parent);
super.processPaths(parent, items);
} finally {
dst = savedDst;
if (src.stat.isDirectory() && src.fs.equals(dst.fs)) {
PathData target = getTargetPath(src);
String srcPath = src.fs.makeQualified(src.path).toString();
String dstPath = dst.fs.makeQualified(target.path).toString();
if (dstPath.equals(srcPath)) {
PathIOException e = new PathIOException(src.toString(),
"are identical");
e.setTargetPath(dstPath.toString());
throw e;
}
if (dstPath.startsWith(srcPath+Path.SEPARATOR)) {
PathIOException e = new PathIOException(src.toString(),
"is a subdirectory of itself");
e.setTargetPath(target.toString());
throw e;
}
}
super.processPathArgument(src);
}
@Override
protected void processPath(PathData src) throws IOException {
PathData target;
// if the destination is a directory, make target a child path,
// else use the destination as-is
if (dst.exists && dst.stat.isDirectory()) {
target = dst.getPathDataForChild(src);
} else {
target = dst;
}
if (target.exists && !overwrite) {
throw new PathExistsException(target.toString());
}
try {
// invoke processPath with both a source and resolved target
processPath(src, target);
} catch (PathIOException e) {
// add the target unless it already has one
if (e.getTargetPath() == null) {
e.setTargetPath(target.toString());
}
throw e;
}
processPath(src, getTargetPath(src));
}
/**
@ -151,6 +144,103 @@ protected void processPath(PathData src) throws IOException {
* @param target for the operation
* @throws IOException if anything goes wrong
*/
protected abstract void processPath(PathData src, PathData target)
throws IOException;
protected void processPath(PathData src, PathData dst) throws IOException {
if (src.stat.isSymlink()) {
// TODO: remove when FileContext is supported, this needs to either
// copy the symlink or deref the symlink
throw new PathOperationException(src.toString());
} else if (src.stat.isFile()) {
copyFileToTarget(src, dst);
} else if (src.stat.isDirectory() && !isRecursive()) {
throw new PathIsDirectoryException(src.toString());
}
}
@Override
protected void recursePath(PathData src) throws IOException {
PathData savedDst = dst;
try {
// modify dst as we descend to append the basename of the
// current directory being processed
dst = getTargetPath(src);
if (dst.exists) {
if (!dst.stat.isDirectory()) {
throw new PathIsNotDirectoryException(dst.toString());
}
} else {
if (!dst.fs.mkdirs(dst.path)) {
// too bad we have no clue what failed
PathIOException e = new PathIOException(dst.toString());
e.setOperation("mkdir");
throw e;
}
dst.refreshStatus(); // need to update stat to know it exists now
}
super.recursePath(src);
} finally {
dst = savedDst;
}
}
protected PathData getTargetPath(PathData src) throws IOException {
PathData target;
// on the first loop, the dst may be directory or a file, so only create
// a child path if dst is a dir; after recursion, it's always a dir
if ((getDepth() > 0) || (dst.exists && dst.stat.isDirectory())) {
target = dst.getPathDataForChild(src);
} else {
target = dst;
}
return target;
}
/**
* Copies the source file to the target.
* @param src item to copy
* @param target where to copy the item
* @throws IOException if copy fails
*/
protected void copyFileToTarget(PathData src, PathData target) throws IOException {
copyStreamToTarget(src.fs.open(src.path), target);
}
/**
* Copies the stream contents to a temporary file. If the copy is
* successful, the temporary file will be renamed to the real path,
* else the temporary file will be deleted.
* @param in the input stream for the copy
* @param target where to store the contents of the stream
* @throws IOException if copy fails
*/
protected void copyStreamToTarget(InputStream in, PathData target)
throws IOException {
if (target.exists && (target.stat.isDirectory() || !overwrite)) {
throw new PathExistsException(target.toString());
}
PathData tempFile = null;
try {
tempFile = target.createTempFile(target+"._COPYING_");
FSDataOutputStream out = target.fs.create(tempFile.path, true);
IOUtils.copyBytes(in, out, getConf(), true);
// the rename method with an option to delete the target is deprecated
if (target.exists && !target.fs.delete(target.path, false)) {
// too bad we don't know why it failed
PathIOException e = new PathIOException(target.toString());
e.setOperation("delete");
throw e;
}
if (!tempFile.fs.rename(tempFile.path, target.path)) {
// too bad we don't know why it failed
PathIOException e = new PathIOException(tempFile.toString());
e.setOperation("rename");
e.setTargetPath(target.toString());
throw e;
}
tempFile = null;
} finally {
if (tempFile != null) {
tempFile.fs.delete(tempFile.path, false);
}
}
}
}

View File

@ -26,15 +26,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.PathExceptions.PathExistsException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
import org.apache.hadoop.fs.shell.PathExceptions.PathOperationException;
import org.apache.hadoop.io.IOUtils;
/** Various commands for copy files */
@InterfaceAudience.Private
@ -97,18 +89,10 @@ protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
cf.parse(args);
setOverwrite(cf.getOpt("f"));
// should have a -r option
setRecursive(true);
getRemoteDestination(args);
}
@Override
protected void processPath(PathData src, PathData target)
throws IOException {
if (!FileUtil.copy(src.fs, src.path, target.fs, target.path, false, overwrite, getConf())) {
// we have no idea what the error is... FileUtils masks it and in
// some cases won't even report an error
throw new PathIOException(src.toString());
}
}
}
/**
@ -128,15 +112,12 @@ public static class Get extends CommandWithDestination {
* It must be at least three characters long, required by
* {@link java.io.File#createTempFile(String, String, File)}.
*/
private static final String COPYTOLOCAL_PREFIX = "_copyToLocal_";
private boolean copyCrc;
private boolean verifyChecksum;
private LocalFileSystem localFs;
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
localFs = FileSystem.getLocal(getConf());
CommandFormat cf = new CommandFormat(
1, Integer.MAX_VALUE, "crc", "ignoreCrc");
cf.parse(args);
@ -148,7 +129,7 @@ protected void processOptions(LinkedList<String> args)
}
@Override
protected void processPath(PathData src, PathData target)
protected void copyFileToTarget(PathData src, PathData target)
throws IOException {
src.fs.setVerifyChecksum(verifyChecksum);
@ -157,51 +138,11 @@ protected void processPath(PathData src, PathData target)
copyCrc = false;
}
File targetFile = localFs.pathToFile(target.path);
if (src.stat.isFile()) {
// copy the file and maybe its crc
copyFileToLocal(src, target.path);
super.copyFileToTarget(src, target);
if (copyCrc) {
copyCrcToLocal(src, target.path);
// should we delete real file if crc copy fails?
super.copyFileToTarget(src.getChecksumFile(), target.getChecksumFile());
}
} else if (src.stat.isDirectory()) {
// create the remote directory structure locally
if (!targetFile.mkdirs()) {
throw new PathIOException(target.toString());
}
} else {
throw new PathOperationException(src.toString());
}
}
private void copyFileToLocal(PathData src, Path target)
throws IOException {
File targetFile = localFs.pathToFile(target);
File tmpFile = FileUtil.createLocalTempFile(
targetFile, COPYTOLOCAL_PREFIX, true);
// too bad we can't tell exactly why it failed...
if (!FileUtil.copy(src.fs, src.path, tmpFile, false, getConf())) {
PathIOException e = new PathIOException(src.toString());
e.setOperation("copy");
e.setTargetPath(tmpFile.toString());
throw e;
}
// too bad we can't tell exactly why it failed...
if (!tmpFile.renameTo(targetFile)) {
PathIOException e = new PathIOException(tmpFile.toString());
e.setOperation("rename");
e.setTargetPath(targetFile.toString());
throw e;
}
}
private void copyCrcToLocal(PathData src, Path target)
throws IOException {
ChecksumFileSystem srcFs = (ChecksumFileSystem)src.fs;
Path srcPath = srcFs.getChecksumFile(src.path);
src = new PathData(srcFs.getRawFileSystem(), srcPath);
copyFileToLocal(src, localFs.getChecksumFile(target));
}
}
@ -221,6 +162,8 @@ protected void processOptions(LinkedList<String> args) throws IOException {
cf.parse(args);
setOverwrite(cf.getOpt("f"));
getRemoteDestination(args);
// should have a -r option
setRecursive(true);
}
// commands operating on local paths have no need for glob expansion
@ -236,30 +179,11 @@ protected void processArguments(LinkedList<PathData> args)
throws IOException {
// NOTE: this logic should be better, mimics previous implementation
if (args.size() == 1 && args.get(0).toString().equals("-")) {
if (dst.exists && !overwrite) {
throw new PathExistsException(dst.toString());
}
copyFromStdin();
copyStreamToTarget(System.in, getTargetPath(args.get(0)));
return;
}
super.processArguments(args);
}
@Override
protected void processPath(PathData src, PathData target)
throws IOException {
target.fs.copyFromLocalFile(false, overwrite, src.path, target.path);
}
/** Copies from stdin to the destination file. */
protected void copyFromStdin() throws IOException {
FSDataOutputStream out = dst.fs.create(dst.path);
try {
IOUtils.copyBytes(System.in, out, getConf(), false);
} finally {
out.close();
}
}
}
public static class CopyFromLocal extends Put {

View File

@ -81,6 +81,6 @@ protected void processOptions(LinkedList<String> args) {
@Override
protected void processPath(PathData src) throws IOException {
ContentSummary summary = src.fs.getContentSummary(src.path);
out.println(summary.toString(showQuotas) + src.path);
out.println(summary.toString(showQuotas) + src);
}
}

View File

@ -113,7 +113,7 @@ protected void processPath(PathData item) throws IOException {
stat.getGroup(),
formatSize(stat.getLen()),
dateFormat.format(new Date(stat.getModificationTime())),
item.path.toUri().getPath()
item
);
out.println(line);
}

View File

@ -21,27 +21,34 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.PathExceptions.PathIOException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIsDirectoryException;
import org.apache.hadoop.fs.shell.PathExceptions.PathIsNotDirectoryException;
import org.apache.hadoop.fs.shell.PathExceptions.PathNotFoundException;
/**
* Encapsulates a Path (path), its FileStatus (stat), and its FileSystem (fs).
* The stat field will be null if the path does not exist.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@InterfaceStability.Unstable
public class PathData {
protected String string = null;
protected final URI uri;
public final FileSystem fs;
public final Path path;
public FileStatus stat;
public final FileSystem fs;
public boolean exists;
/**
@ -53,10 +60,7 @@ public class PathData {
* @throws IOException if anything goes wrong...
*/
public PathData(String pathString, Configuration conf) throws IOException {
this.string = pathString;
this.path = new Path(pathString);
this.fs = path.getFileSystem(conf);
setStat(getStat(fs, path));
this(FileSystem.get(URI.create(pathString), conf), pathString);
}
/**
@ -68,85 +72,127 @@ public PathData(String pathString, Configuration conf) throws IOException {
* @throws IOException if anything goes wrong...
*/
public PathData(File localPath, Configuration conf) throws IOException {
this.string = localPath.toString();
this.path = new Path(this.string);
this.fs = FileSystem.getLocal(conf);
setStat(getStat(fs, path));
this(FileSystem.getLocal(conf), localPath.toString());
}
/**
* Creates an object to wrap the given parameters as fields.
* @param fs the FileSystem
* @param path a Path
* @param stat the FileStatus (may be null if the path doesn't exist)
*/
public PathData(FileSystem fs, Path path, FileStatus stat) {
this.string = path.toString();
this.path = path;
this.fs = fs;
setStat(stat);
}
/**
* Convenience ctor that looks up the file status for a path. If the path
* Looks up the file status for a path. If the path
* doesn't exist, then the status will be null
* @param fs the FileSystem for the path
* @param path the pathname to lookup
* @param pathString a string for a path
* @throws IOException if anything goes wrong
*/
public PathData(FileSystem fs, Path path) throws IOException {
this(fs, path, getStat(fs, path));
private PathData(FileSystem fs, String pathString) throws IOException {
this(fs, pathString, lookupStat(fs, pathString, true));
}
/**
* Creates an object to wrap the given parameters as fields. The string
* used to create the path will be recorded since the Path object does not
* return exactly the same string used to initialize it. If the FileStatus
* is not null, then its Path will be used to initialized the path, else
* the string of the path will be used.
* return exactly the same string used to initialize it.
* @param fs the FileSystem
* @param pathString a String of the path
* @param stat the FileStatus (may be null if the path doesn't exist)
*/
public PathData(FileSystem fs, String pathString, FileStatus stat) {
this.string = pathString;
this.path = (stat != null) ? stat.getPath() : new Path(pathString);
private PathData(FileSystem fs, String pathString, FileStatus stat)
throws IOException {
this.fs = fs;
this.uri = stringToUri(pathString);
this.path = fs.makeQualified(new Path(uri));
setStat(stat);
}
// need a static method for the ctor above
private static FileStatus getStat(FileSystem fs, Path path)
/**
* Get the FileStatus info
* @param ignoreFNF if true, stat will be null if the path doesn't exist
* @return FileStatus for the given path
* @throws IOException if anything goes wrong
*/
private static
FileStatus lookupStat(FileSystem fs, String pathString, boolean ignoreFNF)
throws IOException {
FileStatus status = null;
try {
status = fs.getFileStatus(path);
} catch (FileNotFoundException e) {} // ignore FNF
status = fs.getFileStatus(new Path(pathString));
} catch (FileNotFoundException e) {
if (!ignoreFNF) throw new PathNotFoundException(pathString);
}
// TODO: should consider wrapping other exceptions into Path*Exceptions
return status;
}
private void setStat(FileStatus theStat) {
stat = theStat;
private void setStat(FileStatus stat) {
this.stat = stat;
exists = (stat != null);
}
/**
* Convenience ctor that extracts the path from the given file status
* @param fs the FileSystem for the FileStatus
* @param stat the FileStatus
*/
public PathData(FileSystem fs, FileStatus stat) {
this(fs, stat.getPath(), stat);
}
/**
* Updates the paths's file status
* @return the updated FileStatus
* @throws IOException if anything goes wrong...
*/
public FileStatus refreshStatus() throws IOException {
setStat(fs.getFileStatus(path));
return stat;
FileStatus status = null;
try {
status = lookupStat(fs, toString(), false);
} finally {
// always set the status. the caller must get the correct result
// if it catches the exception and later interrogates the status
setStat(status);
}
return status;
}
protected enum FileTypeRequirement {
SHOULD_NOT_BE_DIRECTORY, SHOULD_BE_DIRECTORY
};
/**
* Ensure that the file exists and if it is or is not a directory
* @param typeRequirement Set it to the desired requirement.
* @throws PathIOException if file doesn't exist or the type does not match
* what was specified in typeRequirement.
*/
private void checkIfExists(FileTypeRequirement typeRequirement)
throws PathIOException {
if (!exists) {
throw new PathNotFoundException(toString());
}
if ((typeRequirement == FileTypeRequirement.SHOULD_BE_DIRECTORY)
&& !stat.isDirectory()) {
throw new PathIsNotDirectoryException(toString());
} else if ((typeRequirement == FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY)
&& stat.isDirectory()) {
throw new PathIsDirectoryException(toString());
}
}
/**
* Return the corresponding crc data for a file. Avoids exposing the fs
* contortions to the caller.
* @return PathData of the crc file
* @throws IOException is anything goes wrong
*/
public PathData getChecksumFile() throws IOException {
checkIfExists(FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY);
ChecksumFileSystem srcFs = (ChecksumFileSystem)fs;
Path srcPath = srcFs.getChecksumFile(path);
return new PathData(srcFs.getRawFileSystem(), srcPath.toString());
}
/**
* Returns a temporary file for this PathData with the given extension.
* The file will be deleted on exit.
* @param extension for the temporary file
* @return PathData
* @throws IOException shouldn't happen
*/
public PathData createTempFile(String extension) throws IOException {
PathData tmpFile = new PathData(fs, uri+"._COPYING_");
fs.deleteOnExit(tmpFile.path);
return tmpFile;
}
/**
@ -156,18 +202,13 @@ public FileStatus refreshStatus() throws IOException {
* @throws IOException if anything else goes wrong...
*/
public PathData[] getDirectoryContents() throws IOException {
if (!stat.isDirectory()) {
throw new PathIsNotDirectoryException(string);
}
checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY);
FileStatus[] stats = fs.listStatus(path);
PathData[] items = new PathData[stats.length];
for (int i=0; i < stats.length; i++) {
// preserve relative paths
String basename = stats[i].getPath().getName();
String parent = string;
if (!parent.endsWith(Path.SEPARATOR)) parent += Path.SEPARATOR;
items[i] = new PathData(fs, parent + basename, stats[i]);
String child = getStringForChildPath(stats[i].getPath());
items[i] = new PathData(fs, child, stats[i]);
}
return items;
}
@ -179,11 +220,29 @@ public PathData[] getDirectoryContents() throws IOException {
* @throws IOException if this object does not exist or is not a directory
*/
public PathData getPathDataForChild(PathData child) throws IOException {
if (!stat.isDirectory()) {
throw new PathIsNotDirectoryException(string);
checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY);
return new PathData(fs, getStringForChildPath(child.path));
}
return new PathData(fs, new Path(path, child.path.getName()));
/**
* Given a child of this directory, use the directory's path and the child's
* basename to construct the string to the child. This preserves relative
* paths since Path will fully qualify.
* @param child a path contained within this directory
* @return String of the path relative to this directory
*/
private String getStringForChildPath(Path childPath) {
String basename = childPath.getName();
if (Path.CUR_DIR.equals(toString())) {
return basename;
}
// check getPath() so scheme slashes aren't considered part of the path
String separator = uri.getPath().endsWith(Path.SEPARATOR)
? "" : Path.SEPARATOR;
return uri + separator + basename;
}
protected enum PathType { HAS_SCHEME, SCHEMELESS_ABSOLUTE, RELATIVE };
/**
* Expand the given path as a glob pattern. Non-existent paths do not
@ -207,35 +266,184 @@ public static PathData[] expandAsGlob(String pattern, Configuration conf)
if (stats == null) {
// not a glob & file not found, so add the path with a null stat
items = new PathData[]{ new PathData(fs, pattern, null) };
} else if (
// this is very ugly, but needed to avoid breaking hdfs tests...
// if a path has no authority, then the FileStatus from globStatus
// will add the "-fs" authority into the path, so we need to sub
// it back out to satisfy the tests
stats.length == 1
&&
stats[0].getPath().equals(fs.makeQualified(globPath)))
{
// if the fq path is identical to the pattern passed, use the pattern
// to initialize the string value
items = new PathData[]{ new PathData(fs, pattern, stats[0]) };
} else {
// figure out what type of glob path was given, will convert globbed
// paths to match the type to preserve relativity
PathType globType;
URI globUri = globPath.toUri();
if (globUri.getScheme() != null) {
globType = PathType.HAS_SCHEME;
} else if (new File(globUri.getPath()).isAbsolute()) {
globType = PathType.SCHEMELESS_ABSOLUTE;
} else {
globType = PathType.RELATIVE;
}
// convert stats to PathData
items = new PathData[stats.length];
int i=0;
for (FileStatus stat : stats) {
items[i++] = new PathData(fs, stat);
URI matchUri = stat.getPath().toUri();
String globMatch = null;
switch (globType) {
case HAS_SCHEME: // use as-is, but remove authority if necessary
if (globUri.getAuthority() == null) {
matchUri = removeAuthority(matchUri);
}
globMatch = matchUri.toString();
break;
case SCHEMELESS_ABSOLUTE: // take just the uri's path
globMatch = matchUri.getPath();
break;
case RELATIVE: // make it relative to the current working dir
URI cwdUri = fs.getWorkingDirectory().toUri();
globMatch = relativize(cwdUri, matchUri, stat.isDirectory());
break;
}
items[i++] = new PathData(fs, globMatch, stat);
}
}
return items;
}
private static URI removeAuthority(URI uri) {
try {
uri = new URI(
uri.getScheme(), "",
uri.getPath(), uri.getQuery(), uri.getFragment()
);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e.getLocalizedMessage());
}
return uri;
}
private static String relativize(URI cwdUri, URI srcUri, boolean isDir) {
String uriPath = srcUri.getPath();
String cwdPath = cwdUri.getPath();
if (cwdPath.equals(uriPath)) {
return Path.CUR_DIR;
}
// find common ancestor
int lastSep = findLongestDirPrefix(cwdPath, uriPath, isDir);
StringBuilder relPath = new StringBuilder();
// take the remaining path fragment after the ancestor
if (lastSep < uriPath.length()) {
relPath.append(uriPath.substring(lastSep+1));
}
// if cwd has a path fragment after the ancestor, convert them to ".."
if (lastSep < cwdPath.length()) {
while (lastSep != -1) {
if (relPath.length() != 0) relPath.insert(0, Path.SEPARATOR);
relPath.insert(0, "..");
lastSep = cwdPath.indexOf(Path.SEPARATOR, lastSep+1);
}
}
return relPath.toString();
}
private static int findLongestDirPrefix(String cwd, String path, boolean isDir) {
// add the path separator to dirs to simplify finding the longest match
if (!cwd.endsWith(Path.SEPARATOR)) {
cwd += Path.SEPARATOR;
}
if (isDir && !path.endsWith(Path.SEPARATOR)) {
path += Path.SEPARATOR;
}
// find longest directory prefix
int len = Math.min(cwd.length(), path.length());
int lastSep = -1;
for (int i=0; i < len; i++) {
if (cwd.charAt(i) != path.charAt(i)) break;
if (cwd.charAt(i) == Path.SEPARATOR_CHAR) lastSep = i;
}
return lastSep;
}
/**
* Returns the printable version of the path that is either the path
* as given on the commandline, or the full path
* @return String of the path
*/
public String toString() {
return (string != null) ? string : path.toString();
String scheme = uri.getScheme();
// No interpretation of symbols. Just decode % escaped chars.
String decodedRemainder = uri.getSchemeSpecificPart();
if (scheme == null) {
return decodedRemainder;
} else {
StringBuilder buffer = new StringBuilder();
buffer.append(scheme);
buffer.append(":");
buffer.append(decodedRemainder);
return buffer.toString();
}
}
/**
* Get the path to a local file
* @return File representing the local path
* @throws IllegalArgumentException if this.fs is not the LocalFileSystem
*/
public File toFile() {
if (!(fs instanceof LocalFileSystem)) {
throw new IllegalArgumentException("Not a local path: " + path);
}
return ((LocalFileSystem)fs).pathToFile(path);
}
/** Construct a URI from a String with unescaped special characters
* that have non-standard sematics. e.g. /, ?, #. A custom parsing
* is needed to prevent misbihaviors.
* @param pathString The input path in string form
* @return URI
*/
private static URI stringToUri(String pathString) {
// We can't use 'new URI(String)' directly. Since it doesn't do quoting
// internally, the internal parser may fail or break the string at wrong
// places. Use of multi-argument ctors will quote those chars for us,
// but we need to do our own parsing and assembly.
// parse uri components
String scheme = null;
String authority = null;
int start = 0;
// parse uri scheme, if any
int colon = pathString.indexOf(':');
int slash = pathString.indexOf('/');
if (colon > 0 && (slash == colon +1)) {
// has a non zero-length scheme
scheme = pathString.substring(0, colon);
start = colon + 1;
}
// parse uri authority, if any
if (pathString.startsWith("//", start) &&
(pathString.length()-start > 2)) {
start += 2;
int nextSlash = pathString.indexOf('/', start);
int authEnd = nextSlash > 0 ? nextSlash : pathString.length();
authority = pathString.substring(start, authEnd);
start = authEnd;
}
// uri path is the rest of the string. ? or # are not interpreated,
// but any occurrence of them will be quoted by the URI ctor.
String path = pathString.substring(start, pathString.length());
// Construct the URI
try {
return new URI(scheme, authority, path, null, null);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
}

View File

@ -318,7 +318,7 @@ public FileChecksum getFileChecksum(final Path f)
IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getFileChecksum(f);
return res.targetFileSystem.getFileChecksum(res.remainingPath);
}
@Override

View File

@ -316,7 +316,7 @@ public FileChecksum getFileChecksum(final Path f)
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getFileChecksum(f);
return res.targetFileSystem.getFileChecksum(res.remainingPath);
}
@Override

View File

@ -123,7 +123,7 @@ public HttpServer(String name, String bindAddress, int port, boolean findPort
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector) throws IOException {
this(name, bindAddress, port, findPort, conf, null, connector);
this(name, bindAddress, port, findPort, conf, null, connector, null);
}
/**
@ -142,11 +142,7 @@ public HttpServer(String name, String bindAddress, int port,
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
this(name, bindAddress, port, findPort, conf, null, null);
for (String path : pathSpecs) {
LOG.info("adding path spec: " + path);
addFilterPathMapping(path, webAppContext);
}
this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
}
/**
@ -160,19 +156,20 @@ public HttpServer(String name, String bindAddress, int port,
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf) throws IOException {
this(name, bindAddress, port, findPort, conf, null, null);
this(name, bindAddress, port, findPort, conf, null, null, null);
}
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl)
throws IOException {
this(name, bindAddress, port, findPort, conf, adminsAcl, null);
this(name, bindAddress, port, findPort, conf, adminsAcl, null, null);
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
@ -182,6 +179,26 @@ public HttpServer(String name, String bindAddress, int port,
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector) throws IOException {
this(name, bindAddress, port, findPort, conf, adminsAcl, connector, null);
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
* @param adminsAcl {@link AccessControlList} of the admins
* @param connector A jetty connection listener
* @param pathSpecs Path specifications that this httpserver will be serving.
* These will be added to any filters.
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector, String[] pathSpecs) throws IOException {
webServer = new Server();
this.findPort = findPort;
this.adminsAcl = adminsAcl;
@ -230,7 +247,15 @@ public HttpServer(String name, String bindAddress, int port,
c.initFilter(this, conf);
}
}
addDefaultServlets();
if (pathSpecs != null) {
for (String path : pathSpecs) {
LOG.info("adding path spec: " + path);
addFilterPathMapping(path, webAppContext);
}
}
}
/**

View File

@ -0,0 +1,237 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.lang.reflect.Field;
import java.nio.ByteOrder;
import java.security.AccessController;
import java.security.PrivilegedAction;
import sun.misc.Unsafe;
import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
/**
* Utility code to do optimized byte-array comparison.
* This is borrowed and slightly modified from Guava's {@link UnsignedBytes}
* class to be able to compare arrays that start at non-zero offsets.
*/
abstract class FastByteComparisons {
/**
* Lexicographically compare two byte arrays.
*/
public static int compareTo(byte[] b1, int s1, int l1, byte[] b2, int s2,
int l2) {
return LexicographicalComparerHolder.BEST_COMPARER.compareTo(
b1, s1, l1, b2, s2, l2);
}
private interface Comparer<T> {
abstract public int compareTo(T buffer1, int offset1, int length1,
T buffer2, int offset2, int length2);
}
private static Comparer<byte[]> lexicographicalComparerJavaImpl() {
return LexicographicalComparerHolder.PureJavaComparer.INSTANCE;
}
/**
* Provides a lexicographical comparer implementation; either a Java
* implementation or a faster implementation based on {@link Unsafe}.
*
* <p>Uses reflection to gracefully fall back to the Java implementation if
* {@code Unsafe} isn't available.
*/
private static class LexicographicalComparerHolder {
static final String UNSAFE_COMPARER_NAME =
LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
static final Comparer<byte[]> BEST_COMPARER = getBestComparer();
/**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java
* implementation if unable to do so.
*/
static Comparer<byte[]> getBestComparer() {
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer =
(Comparer<byte[]>) theClass.getEnumConstants()[0];
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
return lexicographicalComparerJavaImpl();
}
}
private enum PureJavaComparer implements Comparer<byte[]> {
INSTANCE;
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
// Bring WritableComparator code local
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = (buffer1[i] & 0xff);
int b = (buffer2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
}
@SuppressWarnings("unused") // used via reflection
private enum UnsafeComparer implements Comparer<byte[]> {
INSTANCE;
static final Unsafe theUnsafe;
/** The offset to the first element in a byte array. */
static final int BYTE_ARRAY_BASE_OFFSET;
static {
theUnsafe = (Unsafe) AccessController.doPrivileged(
new PrivilegedAction<Object>() {
@Override
public Object run() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return f.get(null);
} catch (NoSuchFieldException e) {
// It doesn't matter what we throw;
// it's swallowed in getBestComparer().
throw new Error();
} catch (IllegalAccessException e) {
throw new Error();
}
}
});
BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
// sanity check - this should never fail
if (theUnsafe.arrayIndexScale(byte[].class) != 1) {
throw new AssertionError();
}
}
static final boolean littleEndian =
ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
/**
* Returns true if x1 is less than x2, when both values are treated as
* unsigned.
*/
static boolean lessThanUnsigned(long x1, long x2) {
return (x1 + Long.MIN_VALUE) < (x2 + Long.MIN_VALUE);
}
/**
* Lexicographically compare two arrays.
*
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
int minLength = Math.min(length1, length2);
int minWords = minLength / Longs.BYTES;
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
/*
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
* time is no slower than comparing 4 bytes at a time even on 32-bit.
* On the other hand, it is substantially faster on 64-bit.
*/
for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
long diff = lw ^ rw;
if (diff != 0) {
if (!littleEndian) {
return lessThanUnsigned(lw, rw) ? -1 : 1;
}
// Use binary search
int n = 0;
int y;
int x = (int) diff;
if (x == 0) {
x = (int) (diff >>> 32);
n = 32;
}
y = x << 16;
if (y == 0) {
n += 16;
} else {
x = y;
}
y = x << 8;
if (y == 0) {
n += 8;
}
return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
}
}
// The epilogue to cover the last (minLength % 8) elements.
for (int i = minWords * Longs.BYTES; i < minLength; i++) {
int result = UnsignedBytes.compare(
buffer1[offset1 + i],
buffer2[offset2 + i]);
if (result != 0) {
return result;
}
}
return length1 - length2;
}
}
}
}

View File

@ -0,0 +1,242 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.FileDescriptor;
import java.io.IOException;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.nativeio.NativeIO;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Manages a pool of threads which can issue readahead requests on file descriptors.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReadaheadPool {
static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
private static final int POOL_SIZE = 4;
private static final int MAX_POOL_SIZE = 16;
private static final int CAPACITY = 1024;
private final ThreadPoolExecutor pool;
private static ReadaheadPool instance;
/**
* Return the singleton instance for the current process.
*/
public static ReadaheadPool getInstance() {
synchronized (ReadaheadPool.class) {
if (instance == null && NativeIO.isAvailable()) {
instance = new ReadaheadPool();
}
return instance;
}
}
private ReadaheadPool() {
pool = new ThreadPoolExecutor(POOL_SIZE, MAX_POOL_SIZE, 3L, TimeUnit.SECONDS,
new ArrayBlockingQueue<Runnable>(CAPACITY));
pool.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardOldestPolicy());
pool.setThreadFactory(new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Readahead Thread #%d")
.build());
}
/**
* Issue a request to readahead on the given file descriptor.
*
* @param identifier a textual identifier that will be used in error
* messages (e.g. the file name)
* @param fd the file descriptor to read ahead
* @param curPos the current offset at which reads are being issued
* @param readaheadLength the configured length to read ahead
* @param maxOffsetToRead the maximum offset that will be readahead
* (useful if, for example, only some segment of the file is
* requested by the user). Pass {@link Long.MAX_VALUE} to allow
* readahead to the end of the file.
* @param lastReadahead the result returned by the previous invocation
* of this function on this file descriptor, or null if this is
* the first call
* @return an object representing this outstanding request, or null
* if no readahead was performed
*/
public ReadaheadRequest readaheadStream(
String identifier,
FileDescriptor fd,
long curPos,
long readaheadLength,
long maxOffsetToRead,
ReadaheadRequest lastReadahead) {
Preconditions.checkArgument(curPos <= maxOffsetToRead,
"Readahead position %s higher than maxOffsetToRead %s",
curPos, maxOffsetToRead);
if (readaheadLength <= 0) {
return null;
}
long lastOffset = Long.MIN_VALUE;
if (lastReadahead != null) {
lastOffset = lastReadahead.getOffset();
}
// trigger each readahead when we have reached the halfway mark
// in the previous readahead. This gives the system time
// to satisfy the readahead before we start reading the data.
long nextOffset = lastOffset + readaheadLength / 2;
if (curPos >= nextOffset) {
// cancel any currently pending readahead, to avoid
// piling things up in the queue. Each reader should have at most
// one outstanding request in the queue.
if (lastReadahead != null) {
lastReadahead.cancel();
lastReadahead = null;
}
long length = Math.min(readaheadLength,
maxOffsetToRead - curPos);
if (length <= 0) {
// we've reached the end of the stream
return null;
}
return submitReadahead(identifier, fd, curPos, length);
} else {
return lastReadahead;
}
}
/**
* Submit a request to readahead on the given file descriptor.
* @param identifier a textual identifier used in error messages, etc.
* @param fd the file descriptor to readahead
* @param off the offset at which to start the readahead
* @param len the number of bytes to read
* @return an object representing this pending request
*/
public ReadaheadRequest submitReadahead(
String identifier, FileDescriptor fd, long off, long len) {
ReadaheadRequestImpl req = new ReadaheadRequestImpl(
identifier, fd, off, len);
pool.execute(req);
if (LOG.isTraceEnabled()) {
LOG.trace("submit readahead: " + req);
}
return req;
}
/**
* An outstanding readahead request that has been submitted to
* the pool. This request may be pending or may have been
* completed.
*/
public interface ReadaheadRequest {
/**
* Cancels the request for readahead. This should be used
* if the reader no longer needs the requested data, <em>before</em>
* closing the related file descriptor.
*
* It is safe to use even if the readahead request has already
* been fulfilled.
*/
public void cancel();
/**
* @return the requested offset
*/
public long getOffset();
/**
* @return the requested length
*/
public long getLength();
}
private static class ReadaheadRequestImpl implements Runnable, ReadaheadRequest {
private final String identifier;
private final FileDescriptor fd;
private final long off, len;
private volatile boolean canceled = false;
private ReadaheadRequestImpl(String identifier, FileDescriptor fd, long off, long len) {
this.identifier = identifier;
this.fd = fd;
this.off = off;
this.len = len;
}
public void run() {
if (canceled) return;
// There's a very narrow race here that the file will close right at
// this instant. But if that happens, we'll likely receive an EBADF
// error below, and see that it's canceled, ignoring the error.
// It's also possible that we'll end up requesting readahead on some
// other FD, which may be wasted work, but won't cause a problem.
try {
NativeIO.posixFadviseIfPossible(fd, off, len,
NativeIO.POSIX_FADV_WILLNEED);
} catch (IOException ioe) {
if (canceled) {
// no big deal - the reader canceled the request and closed
// the file.
return;
}
LOG.warn("Failed readahead on " + identifier,
ioe);
}
}
@Override
public void cancel() {
canceled = true;
// We could attempt to remove it from the work queue, but that would
// add complexity. In practice, the work queues remain very short,
// so removing canceled requests has no gain.
}
@Override
public long getOffset() {
return off;
}
@Override
public long getLength() {
return len;
}
@Override
public String toString() {
return "ReadaheadRequestImpl [identifier='" + identifier + "', fd=" + fd
+ ", off=" + off + ", len=" + len + "]";
}
}
}

View File

@ -151,16 +151,7 @@ public int compare(Object a, Object b) {
/** Lexicographic order of binary data. */
public static int compareBytes(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int end1 = s1 + l1;
int end2 = s2 + l2;
for (int i = s1, j = s2; i < end1 && j < end2; i++, j++) {
int a = (b1[i] & 0xff);
int b = (b2[j] & 0xff);
if (a != b) {
return a - b;
}
}
return l1 - l2;
return FastByteComparisons.compareTo(b1, s1, l1, b2, s2, l2);
}
/** Compute hash for binary data. */

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,11 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.snappy;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
public class TestSimulatorSerialJobSubmission extends TestSimulatorEndToEnd {
public TestSimulatorSerialJobSubmission() {
super();
policy = SimulatorJobSubmissionPolicy.SERIAL;
}
}

View File

@ -46,10 +46,41 @@ public class NativeIO {
public static final int O_FSYNC = O_SYNC;
public static final int O_NDELAY = O_NONBLOCK;
// Flags for posix_fadvise() from bits/fcntl.h
/* No further special treatment. */
public static final int POSIX_FADV_NORMAL = 0;
/* Expect random page references. */
public static final int POSIX_FADV_RANDOM = 1;
/* Expect sequential page references. */
public static final int POSIX_FADV_SEQUENTIAL = 2;
/* Will need these pages. */
public static final int POSIX_FADV_WILLNEED = 3;
/* Don't need these pages. */
public static final int POSIX_FADV_DONTNEED = 4;
/* Data will be accessed once. */
public static final int POSIX_FADV_NOREUSE = 5;
/* Wait upon writeout of all pages
in the range before performing the
write. */
public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
/* Initiate writeout of all those
dirty pages in the range which are
not presently under writeback. */
public static final int SYNC_FILE_RANGE_WRITE = 2;
/* Wait upon writeout of all pages in
the range after performing the
write. */
public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;
private static final Log LOG = LogFactory.getLog(NativeIO.class);
private static boolean nativeLoaded = false;
private static boolean workaroundNonThreadSafePasswdCalls = false;
private static boolean fadvisePossible = true;
private static boolean syncFileRangePossible = true;
static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
"hadoop.workaround.non.threadsafe.getpwuid";
@ -88,9 +119,58 @@ public static boolean isAvailable() {
/** Wrapper around chmod(2) */
public static native void chmod(String path, int mode) throws IOException;
/** Wrapper around posix_fadvise(2) */
static native void posix_fadvise(
FileDescriptor fd, long offset, long len, int flags) throws NativeIOException;
/** Wrapper around sync_file_range(2) */
static native void sync_file_range(
FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException;
/** Initialize the JNI method ID and class ID cache */
private static native void initNative();
/**
* Call posix_fadvise on the given file descriptor. See the manpage
* for this syscall for more information. On systems where this
* call is not available, does nothing.
*
* @throws NativeIOException if there is an error with the syscall
*/
public static void posixFadviseIfPossible(
FileDescriptor fd, long offset, long len, int flags)
throws NativeIOException {
if (nativeLoaded && fadvisePossible) {
try {
posix_fadvise(fd, offset, len, flags);
} catch (UnsupportedOperationException uoe) {
fadvisePossible = false;
} catch (UnsatisfiedLinkError ule) {
fadvisePossible = false;
}
}
}
/**
* Call sync_file_range on the given file descriptor. See the manpage
* for this syscall for more information. On systems where this
* call is not available, does nothing.
*
* @throws NativeIOException if there is an error with the syscall
*/
public static void syncFileRangeIfPossible(
FileDescriptor fd, long offset, long nbytes, int flags)
throws NativeIOException {
if (nativeLoaded && syncFileRangePossible) {
try {
sync_file_range(fd, offset, nbytes, flags);
} catch (UnsupportedOperationException uoe) {
syncFileRangePossible = false;
} catch (UnsatisfiedLinkError ule) {
syncFileRangePossible = false;
}
}
}
/**
* Result type of the fstat call

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,14 +15,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.nativeio;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
package org.apache.hadoop.mapred;
public class SimulatorThreadWakeUpEvent extends SimulatorEvent {
public SimulatorThreadWakeUpEvent(SimulatorEventListener listener,
long timestamp) {
super(listener, timestamp);
}
}

View File

@ -0,0 +1,389 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.Closeable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcExceptionProto;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcResponseProto;
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcResponseProto.ResponseStatus;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
import com.google.protobuf.Descriptors.MethodDescriptor;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.ServiceException;
/**
* RPC Engine for for protobuf based RPCs.
*/
@InterfaceStability.Evolving
public class ProtobufRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
private static final ClientCache CLIENTS = new ClientCache();
@Override
@SuppressWarnings("unchecked")
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
SocketFactory factory, int rpcTimeout) throws IOException {
return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
.getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
addr, ticket, conf, factory, rpcTimeout)), false);
}
private static class Invoker implements InvocationHandler, Closeable {
private Map<String, Message> returnTypes = new ConcurrentHashMap<String, Message>();
private boolean isClosed = false;
private Client.ConnectionId remoteId;
private Client client;
public Invoker(Class<?> protocol, InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf, SocketFactory factory,
int rpcTimeout) throws IOException {
this.remoteId = Client.ConnectionId.getConnectionId(addr, protocol,
ticket, rpcTimeout, conf);
this.client = CLIENTS.getClient(conf, factory,
RpcResponseWritable.class);
}
private HadoopRpcRequestProto constructRpcRequest(Method method,
Object[] params) throws ServiceException {
HadoopRpcRequestProto rpcRequest;
HadoopRpcRequestProto.Builder builder = HadoopRpcRequestProto
.newBuilder();
builder.setMethodName(method.getName());
if (params.length != 2) { // RpcController + Message
throw new ServiceException("Too many parameters for request. Method: ["
+ method.getName() + "]" + ", Expected: 2, Actual: "
+ params.length);
}
if (params[1] == null) {
throw new ServiceException("null param while calling Method: ["
+ method.getName() + "]");
}
Message param = (Message) params[1];
builder.setRequest(param.toByteString());
rpcRequest = builder.build();
return rpcRequest;
}
/**
* This is the client side invoker of RPC method. It only throws
* ServiceException, since the invocation proxy expects only
* ServiceException to be thrown by the method in case protobuf service.
*
* ServiceException has the following causes:
* <ol>
* <li>Exceptions encountered in this methods are thrown as
* RpcClientException, wrapped in RemoteException</li>
* <li>Remote exceptions are thrown wrapped in RemoteException</li>
* </ol>
*
* Note that the client calling protobuf RPC methods, must handle
* ServiceException by getting the cause from the ServiceException. If the
* cause is RemoteException, then unwrap it to get the exception thrown by
* the server.
*/
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws ServiceException {
long startTime = 0;
if (LOG.isDebugEnabled()) {
startTime = System.currentTimeMillis();
}
HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
RpcResponseWritable val = null;
try {
val = (RpcResponseWritable) client.call(
new RpcRequestWritable(rpcRequest), remoteId);
} catch (Exception e) {
RpcClientException ce = new RpcClientException("Client exception", e);
throw new ServiceException(getRemoteException(ce));
}
HadoopRpcResponseProto response = val.message;
if (LOG.isDebugEnabled()) {
long callTime = System.currentTimeMillis() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
// Wrap the received message
ResponseStatus status = response.getStatus();
if (status != ResponseStatus.SUCCESS) {
RemoteException re = new RemoteException(response.getException()
.getExceptionName(), response.getException().getStackTrace());
re.fillInStackTrace();
throw new ServiceException(re);
}
Message prototype = null;
try {
prototype = getReturnProtoType(method);
} catch (Exception e) {
throw new ServiceException(e);
}
Message returnMessage;
try {
returnMessage = prototype.newBuilderForType()
.mergeFrom(response.getResponse()).build();
} catch (InvalidProtocolBufferException e) {
RpcClientException ce = new RpcClientException("Client exception", e);
throw new ServiceException(getRemoteException(ce));
}
return returnMessage;
}
public void close() throws IOException {
if (!isClosed) {
isClosed = true;
CLIENTS.stopClient(client);
}
}
private Message getReturnProtoType(Method method) throws Exception {
if (returnTypes.containsKey(method.getName())) {
return returnTypes.get(method.getName());
}
Class<?> returnType = method.getReturnType();
Method newInstMethod = returnType.getMethod("getDefaultInstance");
newInstMethod.setAccessible(true);
Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null);
returnTypes.put(method.getName(), prototype);
return prototype;
}
}
@Override
public Object[] call(Method method, Object[][] params,
InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf) {
throw new UnsupportedOperationException();
}
/**
* Writable Wrapper for Protocol Buffer Requests
*/
private static class RpcRequestWritable implements Writable {
HadoopRpcRequestProto message;
@SuppressWarnings("unused")
public RpcRequestWritable() {
}
RpcRequestWritable(HadoopRpcRequestProto message) {
this.message = message;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(message.toByteArray().length);
out.write(message.toByteArray());
}
@Override
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
byte[] bytes = new byte[length];
in.readFully(bytes);
message = HadoopRpcRequestProto.parseFrom(bytes);
}
}
/**
* Writable Wrapper for Protocol Buffer Responses
*/
private static class RpcResponseWritable implements Writable {
HadoopRpcResponseProto message;
@SuppressWarnings("unused")
public RpcResponseWritable() {
}
public RpcResponseWritable(HadoopRpcResponseProto message) {
this.message = message;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(message.toByteArray().length);
out.write(message.toByteArray());
}
@Override
public void readFields(DataInput in) throws IOException {
int length = in.readInt();
byte[] bytes = new byte[length];
in.readFully(bytes);
message = HadoopRpcResponseProto.parseFrom(bytes);
}
}
@VisibleForTesting
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
RpcResponseWritable.class);
}
@Override
public RPC.Server getServer(Class<?> protocol, Object instance,
String bindAddress, int port, int numHandlers, int numReaders,
int queueSizePerHandler, boolean verbose, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager)
throws IOException {
return new Server(instance, conf, bindAddress, port, numHandlers,
numReaders, queueSizePerHandler, verbose, secretManager);
}
private static RemoteException getRemoteException(Exception e) {
return new RemoteException(e.getClass().getName(),
StringUtils.stringifyException(e));
}
public static class Server extends RPC.Server {
private BlockingService service;
private boolean verbose;
private static String classNameBase(String className) {
String[] names = className.split("\\.", -1);
if (names == null || names.length == 0) {
return className;
}
return names[names.length - 1];
}
/**
* Construct an RPC server.
*
* @param instance the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
*/
public Server(Object instance, Configuration conf, String bindAddress,
int port, int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager)
throws IOException {
super(bindAddress, port, RpcRequestWritable.class, numHandlers,
numReaders, queueSizePerHandler, conf, classNameBase(instance
.getClass().getName()), secretManager);
this.service = (BlockingService) instance;
this.verbose = verbose;
}
/**
* This is a server side method, which is invoked over RPC. On success
* the return response has protobuf response payload. On failure, the
* exception name and the stack trace are return in the resposne. See {@link HadoopRpcResponseProto}
*
* In this method there three types of exceptions possible and they are
* returned in response as follows.
* <ol>
* <li> Exceptions encountered in this method that are returned as {@link RpcServerException} </li>
* <li> Exceptions thrown by the service is wrapped in ServiceException. In that
* this method returns in response the exception thrown by the service.</li>
* <li> Other exceptions thrown by the service. They are returned as
* it is.</li>
* </ol>
*/
@Override
public Writable call(String protocol, Writable writableRequest,
long receiveTime) throws IOException {
RpcRequestWritable request = (RpcRequestWritable) writableRequest;
HadoopRpcRequestProto rpcRequest = request.message;
String methodName = rpcRequest.getMethodName();
if (verbose)
LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
MethodDescriptor methodDescriptor = service.getDescriptorForType()
.findMethodByName(methodName);
if (methodDescriptor == null) {
String msg = "Unknown method " + methodName + " called on " + protocol
+ " protocol.";
LOG.warn(msg);
return handleException(new RpcServerException(msg));
}
Message prototype = service.getRequestPrototype(methodDescriptor);
Message param = prototype.newBuilderForType()
.mergeFrom(rpcRequest.getRequest()).build();
Message result;
try {
result = service.callBlockingMethod(methodDescriptor, null, param);
} catch (ServiceException e) {
Throwable cause = e.getCause();
return handleException(cause != null ? cause : e);
} catch (Exception e) {
return handleException(e);
}
HadoopRpcResponseProto response = constructProtoSpecificRpcSuccessResponse(result);
return new RpcResponseWritable(response);
}
private RpcResponseWritable handleException(Throwable e) {
HadoopRpcExceptionProto exception = HadoopRpcExceptionProto.newBuilder()
.setExceptionName(e.getClass().getName())
.setStackTrace(StringUtils.stringifyException(e)).build();
HadoopRpcResponseProto response = HadoopRpcResponseProto.newBuilder()
.setStatus(ResponseStatus.ERRROR).setException(exception).build();
return new RpcResponseWritable(response);
}
private HadoopRpcResponseProto constructProtoSpecificRpcSuccessResponse(
Message message) {
HadoopRpcResponseProto res = HadoopRpcResponseProto.newBuilder()
.setResponse(message.toByteString())
.setStatus(ResponseStatus.SUCCESS)
.build();
return res;
}
}
}

View File

@ -489,7 +489,8 @@ public static void stopProxy(Object proxy) {
}
} else {
LOG.error("Could not get invocation handler " + invocationHandler +
" for proxy " + proxy + ", or invocation handler is not closeable.");
" for proxy class " + (proxy == null ? null : proxy.getClass()) +
", or invocation handler is not closeable.");
}
}

View File

@ -25,10 +25,9 @@ public class RpcServerException extends RpcException {
/**
* Constructs exception with the specified detail message.
*
* @param messages detailed message.
* @param message detailed message.
*/
RpcServerException(final String message) {
public RpcServerException(final String message) {
super(message);
}
@ -36,12 +35,11 @@ public class RpcServerException extends RpcException {
* Constructs exception with the specified detail message and cause.
*
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
* (A <tt>null</tt> value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
RpcServerException(final String message, final Throwable cause) {
public RpcServerException(final String message, final Throwable cause) {
super(message, cause);
}
}

View File

@ -102,6 +102,23 @@ public abstract class Server {
*/
public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
/**
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
* and send back a nicer response.
*/
private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
"GET ".getBytes());
/**
* An HTTP response to send back if we detect an HTTP request to our IPC
* port.
*/
static final String RECEIVED_HTTP_REQ_RESPONSE =
"HTTP/1.1 404 Not Found\r\n" +
"Content-type: text/plain\r\n\r\n" +
"It looks like you are making an HTTP request to a Hadoop IPC port. " +
"This is not the correct port for the web interface on this daemon.\r\n";
// 1 : Introduce ping and server does not throw away RPCs
// 3 : Introduce the protocol into the RPC connection header
// 4 : Introduced SASL security layer
@ -910,6 +927,7 @@ public class Connection {
private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
// Fake 'call' for SASL context setup
private static final int SASL_CALLID = -33;
private final Call saslCall = new Call(SASL_CALLID, null, this);
private final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
@ -1157,6 +1175,15 @@ public int readAndProcess() throws IOException, InterruptedException {
authMethod = AuthMethod.read(new DataInputStream(
new ByteArrayInputStream(method)));
dataLengthBuffer.flip();
// Check if it looks like the user is hitting an IPC port
// with an HTTP GET - this is a common error, so we can
// send back a simple string indicating as much.
if (HTTP_GET_BYTES.equals(dataLengthBuffer)) {
setupHttpRequestOnIpcPortResponse();
return -1;
}
if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
//Warning is ok since this is not supposed to happen.
LOG.warn("Incorrect header or version mismatch from " +
@ -1171,8 +1198,12 @@ public int readAndProcess() throws IOException, InterruptedException {
throw new IOException("Unable to read authentication method");
}
if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
AccessControlException ae = new AccessControlException(
"Authentication is required");
AccessControlException ae = new AccessControlException("Authorization ("
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
+ ") is enabled but authentication ("
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+ ") is configured as simple. Please configure another method "
+ "like kerberos or digest.");
setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
null, ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall);
@ -1272,6 +1303,13 @@ private void setupBadVersionResponse(int clientVersion) throws IOException {
}
}
private void setupHttpRequestOnIpcPortResponse() throws IOException {
Call fakeCall = new Call(0, null, this);
fakeCall.setResponse(ByteBuffer.wrap(
RECEIVED_HTTP_REQ_RESPONSE.getBytes()));
responder.doRespond(fakeCall);
}
/// Reads the connection header following version
private void processHeader(byte[] buf) throws IOException {
DataInputStream in =
@ -1772,6 +1810,16 @@ public void authorize(UserGroupInformation user,
}
}
/**
* Get the port on which the IPC Server is listening for incoming connections.
* This could be an ephemeral port too, in which case we return the real
* port on which the Server has bound.
* @return port on which IPC Server is listening
*/
public int getPort() {
return port;
}
/**
* The number of open RPC conections
* @return the number of open rpc connections

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.ipc.protobuf;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -168,6 +168,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) {
if (splitStrings.length != 2) {
jg.writeStringField("result", "ERROR");
jg.writeStringField("message", "query format is not as expected.");
jg.flush();
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}

View File

@ -0,0 +1,262 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import org.apache.log4j.Layout;
import org.apache.log4j.helpers.ISO8601DateFormat;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.MappingJsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.ContainerNode;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.text.DateFormat;
import java.util.Date;
/**
* This offers a log layout for JSON, with some test entry points. It's purpose is
* to allow Log4J to generate events that are easy for other programs to parse, but which are somewhat
* human-readable.
*
* Some features.
*
* <ol>
* <li>Every event is a standalone JSON clause</li>
* <li>Time is published as a time_t event since 1/1/1970
* -this is the fastest to generate.</li>
* <li>An ISO date is generated, but this is cached and will only be accurate to within a second</li>
* <li>the stack trace is included as an array</li>
* </ol>
*
* A simple log event will resemble the following
* <pre>
* {"name":"test","time":1318429136789,"date":"2011-10-12 15:18:56,789","level":"INFO","thread":"main","message":"test message"}
* </pre>
*
* An event with an error will contain data similar to that below (which has been reformatted to be multi-line).
*
* <pre>
* {
* "name":"testException",
* "time":1318429136789,
* "date":"2011-10-12 15:18:56,789",
* "level":"INFO",
* "thread":"quoted\"",
* "message":"new line\n and {}",
* "exceptionclass":"java.net.NoRouteToHostException",
* "stack":[
* "java.net.NoRouteToHostException: that box caught fire 3 years ago",
* "\tat org.apache.hadoop.log.TestLog4Json.testException(TestLog4Json.java:49)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat junit.framework.TestCase.runTest(TestCase.java:168)",
* "\tat junit.framework.TestCase.runBare(TestCase.java:134)",
* "\tat junit.framework.TestResult$1.protect(TestResult.java:110)",
* "\tat junit.framework.TestResult.runProtected(TestResult.java:128)",
* "\tat junit.framework.TestResult.run(TestResult.java:113)",
* "\tat junit.framework.TestCase.run(TestCase.java:124)",
* "\tat junit.framework.TestSuite.runTest(TestSuite.java:232)",
* "\tat junit.framework.TestSuite.run(TestSuite.java:227)",
* "\tat org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)",
* "\tat org.apache.maven.surefire.junit4.JUnit4TestSet.execute(JUnit4TestSet.java:59)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.executeTestSet(AbstractDirectoryTestSuite.java:120)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.execute(AbstractDirectoryTestSuite.java:145)",
* "\tat org.apache.maven.surefire.Surefire.run(Surefire.java:104)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.runSuitesInProcess(SurefireBooter.java:290)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.main(SurefireBooter.java:1017)"
* ]
* }
* </pre>
*/
public class Log4Json extends Layout {
/**
* Jackson factories are thread safe when constructing parsers and generators.
* They are not thread safe in configure methods; if there is to be any
* configuration it must be done in a static intializer block.
*/
private static final JsonFactory factory = new MappingJsonFactory();
public static final String DATE = "date";
public static final String EXCEPTION_CLASS = "exceptionclass";
public static final String LEVEL = "level";
public static final String MESSAGE = "message";
public static final String NAME = "name";
public static final String STACK = "stack";
public static final String THREAD = "thread";
public static final String TIME = "time";
public static final String JSON_TYPE = "application/json";
private final DateFormat dateFormat;
public Log4Json() {
dateFormat = new ISO8601DateFormat();
}
/**
* @return the mime type of JSON
*/
@Override
public String getContentType() {
return JSON_TYPE;
}
@Override
public String format(LoggingEvent event) {
try {
return toJson(event);
} catch (IOException e) {
//this really should not happen, and rather than throw an exception
//which may hide the real problem, the log class is printed
//in JSON format. The classname is used to ensure valid JSON is
//returned without playing escaping games
return "{ \"logfailure\":\"" + e.getClass().toString() + "\"}";
}
}
/**
* Convert an event to JSON
*
* @param event the event -must not be null
* @return a string value
* @throws IOException on problems generating the JSON
*/
public String toJson(LoggingEvent event) throws IOException {
StringWriter writer = new StringWriter();
toJson(writer, event);
return writer.toString();
}
/**
* Convert an event to JSON
*
* @param writer the destination writer
* @param event the event -must not be null
* @return the writer
* @throws IOException on problems generating the JSON
*/
public Writer toJson(final Writer writer, final LoggingEvent event)
throws IOException {
ThrowableInformation ti = event.getThrowableInformation();
toJson(writer,
event.getLoggerName(),
event.getTimeStamp(),
event.getLevel().toString(),
event.getThreadName(),
event.getRenderedMessage(),
ti);
return writer;
}
/**
* Build a JSON entry from the parameters. This is public for testing.
*
* @param writer destination
* @param loggerName logger name
* @param timeStamp time_t value
* @param level level string
* @param threadName name of the thread
* @param message rendered message
* @param ti nullable thrown information
* @return the writer
* @throws IOException on any problem
*/
public Writer toJson(final Writer writer,
final String loggerName,
final long timeStamp,
final String level,
final String threadName,
final String message,
final ThrowableInformation ti) throws IOException {
JsonGenerator json = factory.createJsonGenerator(writer);
json.writeStartObject();
json.writeStringField(NAME, loggerName);
json.writeNumberField(TIME, timeStamp);
Date date = new Date(timeStamp);
json.writeStringField(DATE, dateFormat.format(date));
json.writeStringField(LEVEL, level);
json.writeStringField(THREAD, threadName);
json.writeStringField(MESSAGE, message);
if (ti != null) {
//there is some throwable info, but if the log event has been sent over the wire,
//there may not be a throwable inside it, just a summary.
Throwable thrown = ti.getThrowable();
String eclass = (thrown != null) ?
thrown.getClass().getName()
: "";
json.writeStringField(EXCEPTION_CLASS, eclass);
String[] stackTrace = ti.getThrowableStrRep();
json.writeArrayFieldStart(STACK);
for (String row : stackTrace) {
json.writeString(row);
}
json.writeEndArray();
}
json.writeEndObject();
json.flush();
json.close();
return writer;
}
/**
* This appender does not ignore throwables
*
* @return false, always
*/
@Override
public boolean ignoresThrowable() {
return false;
}
/**
* Do nothing
*/
@Override
public void activateOptions() {
}
/**
* For use in tests
*
* @param json incoming JSON to parse
* @return a node tree
* @throws IOException on any parsing problems
*/
public static ContainerNode parse(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper(factory);
JsonNode jsonNode = mapper.readTree(json);
if (!(jsonNode instanceof ContainerNode)) {
throw new IOException("Wrong JSON data: " + json);
}
return (ContainerNode) jsonNode;
}
}

View File

@ -38,13 +38,17 @@ public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
private Map<String, String> cache = new ConcurrentHashMap<String, String>();
protected DNSToSwitchMapping rawMapping;
/**
* cache a raw DNS mapping
* @param rawMapping the raw mapping to cache
*/
public CachedDNSToSwitchMapping(DNSToSwitchMapping rawMapping) {
this.rawMapping = rawMapping;
}
/**
* Returns the hosts from 'names' that have not been cached previously
* @param names a list of hostnames to probe for being cached
* @return the hosts from 'names' that have not been cached previously
*/
private List<String> getUncachedHosts(List<String> names) {
// find out all names without cached resolved location
@ -58,7 +62,12 @@ private List<String> getUncachedHosts(List<String> names) {
}
/**
* Caches the resolved hosts
* Caches the resolved host:rack mappings. The two list
* parameters must be of equal size.
*
* @param uncachedHosts a list of hosts that were uncached
* @param resolvedHosts a list of resolved host entries where the element
* at index(i) is the resolved value for the entry in uncachedHosts[i]
*/
private void cacheResolvedHosts(List<String> uncachedHosts,
List<String> resolvedHosts) {
@ -71,8 +80,9 @@ private void cacheResolvedHosts(List<String> uncachedHosts,
}
/**
* Returns the cached resolution of the list of hostnames/addresses.
* Returns null if any of the names are not currently in the cache
* @param names a list of hostnames to look up (can be be empty)
* @return the cached resolution of the list of hostnames/addresses.
* or null if any of the names are not currently in the cache
*/
private List<String> getCachedHosts(List<String> names) {
List<String> result = new ArrayList<String>(names.size());
@ -88,6 +98,7 @@ private List<String> getCachedHosts(List<String> names) {
return result;
}
@Override
public List<String> resolve(List<String> names) {
// normalize all input names to be in the form of IP addresses
names = NetUtils.normalizeHostNames(names);
@ -97,12 +108,14 @@ public List<String> resolve(List<String> names) {
return result;
}
List<String> uncachedHosts = this.getUncachedHosts(names);
List<String> uncachedHosts = getUncachedHosts(names);
// Resolve the uncached hosts
List<String> resolvedHosts = rawMapping.resolve(uncachedHosts);
this.cacheResolvedHosts(uncachedHosts, resolvedHosts);
return this.getCachedHosts(names);
//cache them
cacheResolvedHosts(uncachedHosts, resolvedHosts);
//now look up the entire list in the cache
return getCachedHosts(names);
}
}

View File

@ -23,7 +23,7 @@
import org.apache.hadoop.classification.InterfaceStability;
/**
* An interface that should be implemented to allow pluggable
* An interface that must be implemented to allow pluggable
* DNS-name/IP-address to RackID resolvers.
*
*/
@ -40,8 +40,9 @@ public interface DNSToSwitchMapping {
* Note the hostname/ip-address is not part of the returned path.
* The network topology of the cluster would determine the number of
* components in the network path.
* @param names
* @return list of resolved network paths
* @param names the list of hosts to resolve (can be empty)
* @return list of resolved network paths.
* If <i>names</i> is empty, the returned list is also empty
*/
public List<String> resolve(List<String> names);
}

View File

@ -150,12 +150,38 @@ public static InetSocketAddress createSocketAddr(String target) {
*/
public static InetSocketAddress createSocketAddr(String target,
int defaultPort) {
return createSocketAddr(target, defaultPort, null);
}
/**
* Create an InetSocketAddress from the given target string and
* default port. If the string cannot be parsed correctly, the
* <code>configName</code> parameter is used as part of the
* exception message, allowing the user to better diagnose
* the misconfiguration.
*
* @param target a string of either "host" or "host:port"
* @param defaultPort the default port if <code>target</code> does not
* include a port number
* @param configName the name of the configuration from which
* <code>target</code> was loaded. This is used in the
* exception message in the case that parsing fails.
*/
public static InetSocketAddress createSocketAddr(String target,
int defaultPort,
String configName) {
String helpText = "";
if (configName != null) {
helpText = " (configuration property '" + configName + "')";
}
if (target == null) {
throw new IllegalArgumentException("Target address cannot be null.");
throw new IllegalArgumentException("Target address cannot be null." +
helpText);
}
int colonIndex = target.indexOf(':');
if (colonIndex < 0 && defaultPort == -1) {
throw new RuntimeException("Not a host:port pair: " + target);
throw new RuntimeException("Not a host:port pair: " + target +
helpText);
}
String hostname;
int port = -1;
@ -165,7 +191,14 @@ public static InetSocketAddress createSocketAddr(String target,
} else {
// must be the old style <host>:<port>
hostname = target.substring(0, colonIndex);
port = Integer.parseInt(target.substring(colonIndex + 1));
String portStr = target.substring(colonIndex + 1);
try {
port = Integer.parseInt(portStr);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException(
"Can't parse port '" + portStr + "'"
+ helpText);
}
}
} else {
// a new uri

View File

@ -45,8 +45,8 @@ public class NetworkTopology {
public static final Log LOG =
LogFactory.getLog(NetworkTopology.class);
/* Inner Node represent a switch/router of a data center or rack.
* Different from a leave node, it has non-null children.
/** InnerNode represents a switch/router of a data center or rack.
* Different from a leaf node, it has non-null children.
*/
private class InnerNode extends NodeBase {
private ArrayList<Node> children=new ArrayList<Node>();
@ -68,16 +68,16 @@ private class InnerNode extends NodeBase {
super(name, location, parent, level);
}
/** Get its children */
/** @return its children */
Collection<Node> getChildren() {return children;}
/** Return the number of children this node has */
/** @return the number of children this node has */
int getNumOfChildren() {
return children.size();
}
/** Judge if this node represents a rack
* Return true if it has no child or its children are not InnerNodes
* @return true if it has no child or its children are not InnerNodes
*/
boolean isRack() {
if (children.isEmpty()) {
@ -225,7 +225,11 @@ boolean remove(Node n) {
}
} // end of remove
/** Given a node's string representation, return a reference to the node */
/** Given a node's string representation, return a reference to the node
* @param loc string location of the form /rack/node
* @return null if the node is not found or the childnode is there but
* not an instance of {@link InnerNode}
*/
private Node getLoc(String loc) {
if (loc == null || loc.length() == 0) return this;
@ -246,7 +250,12 @@ private Node getLoc(String loc) {
}
/** get <i>leafIndex</i> leaf of this subtree
* if it is not in the <i>excludedNode</i>*/
* if it is not in the <i>excludedNode</i>
*
* @param leafIndex an indexed leaf of the node
* @param excludedNode an excluded node (can be null)
* @return
*/
private Node getLeaf(int leafIndex, Node excludedNode) {
int count=0;
// check if the excluded node a leaf
@ -298,8 +307,13 @@ int getNumOfLeaves() {
}
} // end of InnerNode
InnerNode clusterMap = new InnerNode(InnerNode.ROOT); // the root
private int numOfRacks = 0; // rack counter
/**
* the root cluster map
*/
InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
/** rack counter */
private int numOfRacks = 0;
/** the lock used to manage access */
private ReadWriteLock netlock;
public NetworkTopology() {
@ -308,8 +322,7 @@ public NetworkTopology() {
/** Add a leaf node
* Update node counter & rack counter if necessary
* @param node
* node to be added
* @param node node to be added; can be null
* @exception IllegalArgumentException if add a node to a leave
or node to be added is not a leaf
*/
@ -342,9 +355,8 @@ public void add(Node node) {
}
/** Remove a node
* Update node counter & rack counter if necessary
* @param node
* node to be removed
* Update node counter and rack counter if necessary
* @param node node to be removed; can be null
*/
public void remove(Node node) {
if (node==null) return;
@ -371,8 +383,7 @@ public void remove(Node node) {
/** Check if the tree contains node <i>node</i>
*
* @param node
* a node
* @param node a node
* @return true if <i>node</i> is already in the tree; false otherwise
*/
public boolean contains(Node node) {
@ -382,9 +393,10 @@ public boolean contains(Node node) {
Node parent = node.getParent();
for (int level = node.getLevel(); parent != null && level > 0;
parent = parent.getParent(), level--) {
if (parent == clusterMap)
if (parent == clusterMap) {
return true;
}
}
} finally {
netlock.readLock().unlock();
}
@ -409,7 +421,7 @@ public Node getNode(String loc) {
}
}
/** Return the total number of racks */
/** @return the total number of racks */
public int getNumOfRacks() {
netlock.readLock().lock();
try {
@ -419,7 +431,7 @@ public int getNumOfRacks() {
}
}
/** Return the total number of nodes */
/** @return the total number of leaf nodes */
public int getNumOfLeaves() {
netlock.readLock().lock();
try {
@ -435,8 +447,8 @@ public int getNumOfLeaves() {
* to their closest common ancestor.
* @param node1 one node
* @param node2 another node
* @return the distance between node1 and node2
* node1 or node2 do not belong to the cluster
* @return the distance between node1 and node2 which is zero if they are the same
* or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
*/
public int getDistance(Node node1, Node node2) {
if (node1 == node2) {
@ -477,8 +489,8 @@ public int getDistance(Node node1, Node node2) {
}
/** Check if two nodes are on the same rack
* @param node1 one node
* @param node2 another node
* @param node1 one node (can be null)
* @param node2 another node (can be null)
* @return true if node1 and node2 are on the same rack; false otherwise
* @exception IllegalArgumentException when either node1 or node2 is null, or
* node1 or node2 do not belong to the cluster
@ -622,6 +634,8 @@ static private void swap(Node[] nodes, int i, int j) {
* If neither local node or local rack node is found, put a random replica
* location at position 0.
* It leaves the rest nodes untouched.
* @param reader the node that wishes to read a block from one of the nodes
* @param nodes the list of nodes containing data for the reader
*/
public void pseudoSortByDistance( Node reader, Node[] nodes ) {
int tempIndex = 0;

View File

@ -33,20 +33,31 @@
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public interface Node {
/** Return the string representation of this node's network location */
/** @return the string representation of this node's network location */
public String getNetworkLocation();
/** Set the node's network location */
/** Set this node's network location
* @param location the location
*/
public void setNetworkLocation(String location);
/** Return this node's name */
/** @return this node's name */
public String getName();
/** Return this node's parent */
/** @return this node's parent */
public Node getParent();
/** Set this node's parent */
/** Set this node's parent
* @param parent the parent
*/
public void setParent(Node parent);
/** Return this node's level in the tree.
/** @return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
public int getLevel();
/** Set this node's level in the tree.*/
/** Set this node's level in the tree
* @param i the level
*/
public void setLevel(int i);
}

View File

@ -27,9 +27,12 @@
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NodeBase implements Node {
/** Path separator {@value} */
public final static char PATH_SEPARATOR = '/';
/** Path separator as a string {@value} */
public final static String PATH_SEPARATOR_STR = "/";
public final static String ROOT = ""; // string representation of root
/** string representation of root {@value} */
public final static String ROOT = "";
protected String name; //host:port#
protected String location; //string representation of this node's location
@ -55,7 +58,7 @@ public NodeBase(String path) {
}
/** Construct a node from its name and its location
* @param name this node's name
* @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
* @param location this node's location
*/
public NodeBase(String name, String location) {
@ -63,7 +66,7 @@ public NodeBase(String name, String location) {
}
/** Construct a node from its name and its location
* @param name this node's name
* @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
* @param location this node's location
* @param parent this node's parent node
* @param level this node's level in the tree
@ -74,7 +77,11 @@ public NodeBase(String name, String location, Node parent, int level) {
this.level = level;
}
/* set this node's name and location */
/**
* set this node's name and location
* @param name the (nullable) name -which cannot contain the {@link #PATH_SEPARATOR}
* @param location the location
*/
private void set(String name, String location) {
if (name != null && name.contains(PATH_SEPARATOR_STR))
throw new IllegalArgumentException(
@ -83,27 +90,43 @@ private void set(String name, String location) {
this.location = location;
}
/** Return this node's name */
/** @return this node's name */
@Override
public String getName() { return name; }
/** Return this node's network location */
/** @return this node's network location */
@Override
public String getNetworkLocation() { return location; }
/** Set this node's network location */
/** Set this node's network location
* @param location the location
*/
@Override
public void setNetworkLocation(String location) { this.location = location; }
/** Return this node's path */
/**
* Get the path of a node
* @param node a non-null node
* @return the path of a node
*/
public static String getPath(Node node) {
return node.getNetworkLocation()+PATH_SEPARATOR_STR+node.getName();
}
/** Return this node's string representation */
/** @return this node's path as its string representation */
@Override
public String toString() {
return getPath(this);
}
/** Normalize a path */
static public String normalize(String path) {
/** Normalize a path by stripping off any trailing {@link #PATH_SEPARATOR}
* @param path path to normalize.
* @return the normalised path
* If <i>path</i>is null or empty {@link #ROOT} is returned
* @throws IllegalArgumentException if the first character of a non empty path
* is not {@link #PATH_SEPARATOR}
*/
public static String normalize(String path) {
if (path == null || path.length() == 0) return ROOT;
if (path.charAt(0) != PATH_SEPARATOR) {
@ -119,20 +142,28 @@ static public String normalize(String path) {
return path;
}
/** Return this node's parent */
/** @return this node's parent */
@Override
public Node getParent() { return parent; }
/** Set this node's parent */
/** Set this node's parent
* @param parent the parent
*/
@Override
public void setParent(Node parent) {
this.parent = parent;
}
/** Return this node's level in the tree.
/** @return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
@Override
public int getLevel() { return level; }
/** Set this node's level in the tree */
/** Set this node's level in the tree
* @param level the level
*/
@Override
public void setLevel(int level) {
this.level = level;
}

View File

@ -23,16 +23,16 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.*;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* This class implements the {@link DNSToSwitchMapping} interface using a
* script configured via net.topology.script.file.name .
* script configured via the {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY}
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@ -43,30 +43,51 @@ public ScriptBasedMapping() {
super(new RawScriptBasedMapping());
}
// script must accept at least this many args
/**
* Minimum number of arguments: {@value}
*/
static final int MIN_ALLOWABLE_ARGS = 1;
/**
* Default number of arguments: {@value}
*/
static final int DEFAULT_ARG_COUNT =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT;
/**
* key to the script filename {@value}
*/
static final String SCRIPT_FILENAME_KEY =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
/**
* key to the argument count that the script supports
*/
static final String SCRIPT_ARG_COUNT_KEY =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
/**
* Create an instance from the given configuration
* @param conf configuration
*/
public ScriptBasedMapping(Configuration conf) {
this();
setConf(conf);
}
@Override
public Configuration getConf() {
return ((RawScriptBasedMapping)rawMapping).getConf();
}
@Override
public void setConf(Configuration conf) {
((RawScriptBasedMapping)rawMapping).setConf(conf);
}
/**
* This is the uncached script mapping that is fed into the cache managed
* by the superclass {@link CachedDNSToSwitchMapping}
*/
private static final class RawScriptBasedMapping
implements DNSToSwitchMapping {
private String scriptName;
@ -74,17 +95,32 @@ private static final class RawScriptBasedMapping
private int maxArgs; //max hostnames per call of the script
private static Log LOG =
LogFactory.getLog(ScriptBasedMapping.class);
/**
* Set the configuration and
* @param conf extract the configuration parameters of interest
*/
public void setConf (Configuration conf) {
this.scriptName = conf.get(SCRIPT_FILENAME_KEY);
this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
this.conf = conf;
}
/**
* Get the configuration
* @return the configuration
*/
public Configuration getConf () {
return conf;
}
/**
* Constructor. The mapping is not ready to use until
* {@link #setConf(Configuration)} has been called
*/
public RawScriptBasedMapping() {}
@Override
public List<String> resolve(List<String> names) {
List <String> m = new ArrayList<String>(names.size());
@ -109,7 +145,7 @@ public List<String> resolve(List<String> names) {
if (m.size() != names.size()) {
// invalid number of entries returned by the script
LOG.warn("Script " + scriptName + " returned "
LOG.error("Script " + scriptName + " returned "
+ Integer.toString(m.size()) + " values when "
+ Integer.toString(names.size()) + " were expected.");
return null;
@ -123,6 +159,14 @@ public List<String> resolve(List<String> names) {
return m;
}
/**
* Build and execute the resolution command. The command is
* executed in the directory specified by the system property
* "user.dir" if set; otherwise the current working directory is used
* @param args a list of arguments
* @return null if the number of arguments is out of range,
* or the output of the command.
*/
private String runResolveCommand(List<String> args) {
int loopCount = 0;
if (args.size() == 0) {

View File

@ -18,6 +18,7 @@
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.net.UnknownHostException;
@ -34,7 +35,9 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import sun.security.jgss.krb5.Krb5Util;
@ -313,19 +316,25 @@ public static void setSecurityInfoProviders(SecurityInfo... providers) {
* @param conf configuration object
* @return the KerberosInfo or null if it has no KerberosInfo defined
*/
public static KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
public static KerberosInfo
getKerberosInfo(Class<?> protocol, Configuration conf) {
synchronized (testProviders) {
for(SecurityInfo provider: testProviders) {
KerberosInfo result = provider.getKerberosInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
synchronized (securityInfoProviders) {
for(SecurityInfo provider: securityInfoProviders) {
KerberosInfo result = provider.getKerberosInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
return null;
}
@ -337,19 +346,43 @@ public static KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf
* @return the TokenInfo or null if it has no KerberosInfo defined
*/
public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
synchronized (testProviders) {
for(SecurityInfo provider: testProviders) {
TokenInfo result = provider.getTokenInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
synchronized (securityInfoProviders) {
for(SecurityInfo provider: securityInfoProviders) {
TokenInfo result = provider.getTokenInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
return null;
}
/**
* Set the given token's service to the format expected by the RPC client
* @param token a delegation token
* @param addr the socket for the rpc connection
*/
public static void setTokenService(Token<?> token, InetSocketAddress addr) {
token.setService(buildTokenService(addr));
}
/**
* Construct the service key for a token
* @param addr InetSocketAddress of remote connection with a token
* @return "ip:port"
*/
public static Text buildTokenService(InetSocketAddress addr) {
String host = addr.getAddress().getHostAddress();
return new Text(host + ":" + addr.getPort());
}
}

View File

@ -634,6 +634,23 @@ static void loginUserFromKeytab(String user,
+ " using keytab file " + keytabFile);
}
/**
* Re-login a user from keytab if TGT is expired or is close to expiry.
*
* @throws IOException
*/
public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
if (!isSecurityEnabled()
|| user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
|| !isKeytab)
return;
KerberosTicket tgt = getTGT();
if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) {
return;
}
reloginFromKeytab();
}
/**
* Re-Login a user in from a keytab file. Loads a user identity from a keytab
* file and logs them in. They become the currently logged-in user. This

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -22,11 +22,15 @@
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.ServiceLoader;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
@ -40,10 +44,12 @@
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Token<T extends TokenIdentifier> implements Writable {
public static final Log LOG = LogFactory.getLog(Token.class);
private byte[] identifier;
private byte[] password;
private Text kind;
private Text service;
private TokenRenewer renewer;
/**
* Construct a token given a token identifier and a secret manager for the
@ -82,6 +88,17 @@ public Token() {
service = new Text();
}
/**
* Clone a token.
* @param other the token to clone
*/
public Token(Token<T> other) {
this.identifier = other.identifier;
this.password = other.password;
this.kind = other.kind;
this.service = other.service;
}
/**
* Get the token identifier
* @return the token identifier
@ -102,10 +119,21 @@ public byte[] getPassword() {
* Get the token kind
* @return the kind of the token
*/
public Text getKind() {
public synchronized Text getKind() {
return kind;
}
/**
* Set the token kind. This is only intended to be used by services that
* wrap another service's token, such as HFTP wrapping HDFS.
* @param newKind
*/
@InterfaceAudience.Private
public synchronized void setKind(Text newKind) {
kind = newKind;
renewer = null;
}
/**
* Get the service on which the token is supposed to be used
* @return the service name
@ -244,4 +272,92 @@ public String toString() {
buffer.append(service.toString());
return buffer.toString();
}
private static ServiceLoader<TokenRenewer> renewers =
ServiceLoader.load(TokenRenewer.class);
private synchronized TokenRenewer getRenewer() throws IOException {
if (renewer != null) {
return renewer;
}
renewer = TRIVIAL_RENEWER;
synchronized (renewers) {
for (TokenRenewer canidate : renewers) {
if (canidate.handleKind(this.kind)) {
renewer = canidate;
return renewer;
}
}
}
LOG.warn("No TokenRenewer defined for token kind " + this.kind);
return renewer;
}
/**
* Is this token managed so that it can be renewed or cancelled?
* @return true, if it can be renewed and cancelled.
*/
public boolean isManaged() throws IOException {
return getRenewer().isManaged(this);
}
/**
* Renew this delegation token
* @return the new expiration time
* @throws IOException
* @throws InterruptedException
*/
public long renew(Configuration conf
) throws IOException, InterruptedException {
return getRenewer().renew(this, conf);
}
/**
* Cancel this delegation token
* @throws IOException
* @throws InterruptedException
*/
public void cancel(Configuration conf
) throws IOException, InterruptedException {
getRenewer().cancel(this, conf);
}
/**
* A trivial renewer for token kinds that aren't managed. Sub-classes need
* to implement getKind for their token kind.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public static class TrivialRenewer extends TokenRenewer {
// define the kind for this renewer
protected Text getKind() {
return null;
}
@Override
public boolean handleKind(Text kind) {
return kind.equals(getKind());
}
@Override
public boolean isManaged(Token<?> token) {
return false;
}
@Override
public long renew(Token<?> token, Configuration conf) {
throw new UnsupportedOperationException("Token renewal is not supported "+
" for " + token.kind + " tokens");
}
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException,
InterruptedException {
throw new UnsupportedOperationException("Token cancel is not supported " +
" for " + token.kind + " tokens");
}
}
private static final TokenRenewer TRIVIAL_RENEWER = new TrivialRenewer();
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
/**
* This is the interface for plugins that handle tokens.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class TokenRenewer {
/**
* Does this renewer handle this kind of token?
* @param kind the kind of the token
* @return true if this renewer can renew it
*/
public abstract boolean handleKind(Text kind);
/**
* Is the given token managed? Only managed tokens may be renewed or
* cancelled.
* @param token the token being checked
* @return true if the token may be renewed or cancelled
* @throws IOException
*/
public abstract boolean isManaged(Token<?> token) throws IOException;
/**
* Renew the given token.
* @return the new expiration time
* @throws IOException
* @throws InterruptedException
*/
public abstract long renew(Token<?> token,
Configuration conf
) throws IOException, InterruptedException;
/**
* Cancel the given token
* @throws IOException
* @throws InterruptedException
*/
public abstract void cancel(Token<?> token,
Configuration conf
) throws IOException, InterruptedException;
}

View File

@ -209,6 +209,21 @@ public synchronized byte[] retrievePassword(TokenIdent identifier)
return info.getPassword();
}
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
* @param password Password in the token.
* @throws InvalidToken
*/
public synchronized void verifyToken(TokenIdent identifier, byte[] password)
throws InvalidToken {
byte[] storedPassword = retrievePassword(identifier);
if (!Arrays.equals(password, storedPassword)) {
throw new InvalidToken("token (" + identifier
+ ") is invalid, password doesn't match");
}
}
/**
* Renew a delegation token.
* @param token the token to renew

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.security.token;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,12 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.tools;
import org.apache.hadoop.classification.InterfaceAudience;
package org.apache.hadoop.mapred;
/**
* Internal scheduling modes for pools.
*/
public enum SchedulingMode {
FAIR, FIFO
}

View File

@ -44,6 +44,10 @@ public class DataChecksum implements Checksum {
public static final int CHECKSUM_CRC32 = 1;
public static final int CHECKSUM_CRC32C = 2;
private static String[] NAMES = new String[] {
"NULL", "CRC32", "CRC32C"
};
private static final int CHECKSUM_NULL_SIZE = 0;
private static final int CHECKSUM_CRC32_SIZE = 4;
private static final int CHECKSUM_CRC32C_SIZE = 4;
@ -395,6 +399,32 @@ private void calculateChunkedSums(
}
}
@Override
public boolean equals(Object other) {
if (!(other instanceof DataChecksum)) {
return false;
}
DataChecksum o = (DataChecksum)other;
return o.bytesPerChecksum == this.bytesPerChecksum &&
o.type == this.type;
}
@Override
public int hashCode() {
return (this.type + 31) * this.bytesPerChecksum;
}
@Override
public String toString() {
String strType;
if (type < NAMES.length && type > 0) {
strType = NAMES[type];
} else {
strType = String.valueOf(type);
}
return "DataChecksum(type=" + strType +
", chunkSize=" + bytesPerChecksum + ")";
}
/**
* This just provides a dummy implimentation for Checksum class

View File

@ -40,6 +40,7 @@ AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_HEADER([config.h])
AC_SYS_LARGEFILE
AC_GNU_SOURCE
AM_INIT_AUTOMAKE(hadoop,1.0.0)
@ -57,10 +58,8 @@ if test $JAVA_HOME != ""
then
JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
fi
ldflags_bak=$LDFLAGS
LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
LDFLAGS=$ldflags_bak
AC_SUBST([JNI_LDFLAGS])
# Checks for header files.
@ -94,6 +93,12 @@ AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRAR
dnl Check for headers needed by the native Group resolution implementation
AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
dnl check for posix_fadvise
AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(posix_fadvise)])
dnl check for sync_file_range
AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(sync_file_range)])
# Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST

View File

@ -29,6 +29,7 @@
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "org_apache_hadoop.h"
@ -234,6 +235,81 @@ cleanup:
}
/**
* public static native void posix_fadvise(
* FileDescriptor fd, long offset, long len, int flags);
*/
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
JNIEnv *env, jclass clazz,
jobject fd_object, jlong offset, jlong len, jint flags)
{
#ifndef HAVE_POSIX_FADVISE
THROW(env, "java/lang/UnsupportedOperationException",
"fadvise support not available");
#else
int fd = fd_get(env, fd_object);
PASS_EXCEPTIONS(env);
int err = 0;
if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) {
throw_ioe(env, err);
}
#endif
}
#if defined(HAVE_SYNC_FILE_RANGE)
# define my_sync_file_range sync_file_range
#elif defined(SYS_sync_file_range)
// RHEL 5 kernels have sync_file_range support, but the glibc
// included does not have the library function. We can
// still call it directly, and if it's not supported by the
// kernel, we'd get ENOSYS. See RedHat Bugzilla #518581
static int manual_sync_file_range (int fd, __off64_t from, __off64_t to, unsigned int flags)
{
#ifdef __x86_64__
return syscall( SYS_sync_file_range, fd, from, to, flags);
#else
return syscall (SYS_sync_file_range, fd,
__LONG_LONG_PAIR ((long) (from >> 32), (long) from),
__LONG_LONG_PAIR ((long) (to >> 32), (long) to),
flags);
#endif
}
#define my_sync_file_range manual_sync_file_range
#endif
/**
* public static native void sync_file_range(
* FileDescriptor fd, long offset, long len, int flags);
*/
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
JNIEnv *env, jclass clazz,
jobject fd_object, jlong offset, jlong len, jint flags)
{
#ifndef my_sync_file_range
THROW(env, "java/lang/UnsupportedOperationException",
"sync_file_range support not available");
#else
int fd = fd_get(env, fd_object);
PASS_EXCEPTIONS(env);
if (my_sync_file_range(fd, (off_t)offset, (off_t)len, flags)) {
if (errno == ENOSYS) {
// we know the syscall number, but it's not compiled
// into the running kernel
THROW(env, "java/lang/UnsupportedOperationException",
"sync_file_range kernel support not available");
return;
} else {
throw_ioe(env, errno);
}
}
#endif
}
/*
* public static native FileDescriptor open(String path, int flags, int mode);
*/

View File

@ -54,6 +54,11 @@ void fd_deinit(JNIEnv *env) {
* underlying fd, or throw if unavailable
*/
int fd_get(JNIEnv* env, jobject obj) {
if (obj == NULL) {
THROW(env, "java/lang/NullPointerException",
"FileDescriptor object is null");
return -1;
}
return (*env)->GetIntField(env, obj, fd_descriptor);
}

View File

@ -124,6 +124,11 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunk
"bad offsets or lengths");
return;
}
if (unlikely(bytes_per_checksum) <= 0) {
THROW(env, "java/lang/IllegalArgumentException",
"invalid bytes_per_checksum");
return;
}
uint32_t *sums = (uint32_t *)(sums_addr + sums_offset);
uint8_t *data = data_addr + data_offset;

View File

@ -21,6 +21,7 @@
* All rights reserved. Use of this source code is governed by a
* BSD-style license that can be found in the LICENSE file.
*/
#include <assert.h>
#include <arpa/inet.h>
#include <stdint.h>
#include <unistd.h>
@ -30,47 +31,124 @@
#include "bulk_crc32.h"
#include "gcc_optimizations.h"
#define USE_PIPELINED
typedef uint32_t (*crc_update_func_t)(uint32_t, const uint8_t *, size_t);
static uint32_t crc_init();
static uint32_t crc_val(uint32_t crc);
static uint32_t crc32_zlib_sb8(uint32_t crc, const uint8_t *buf, size_t length);
static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
#ifdef USE_PIPELINED
static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks);
#endif USE_PIPELINED
static int cached_cpu_supports_crc32; // initialized by constructor below
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
int bulk_verify_crc(const uint8_t *data, size_t data_len,
const uint32_t *sums, int checksum_type,
int bytes_per_checksum,
crc32_error_t *error_info) {
#ifdef USE_PIPELINED
uint32_t crc1, crc2, crc3;
int n_blocks = data_len / bytes_per_checksum;
int remainder = data_len % bytes_per_checksum;
int do_pipelined = 0;
#endif
uint32_t crc;
crc_update_func_t crc_update_func;
switch (checksum_type) {
case CRC32_ZLIB_POLYNOMIAL:
crc_update_func = crc32_zlib_sb8;
break;
case CRC32C_POLYNOMIAL:
if (likely(cached_cpu_supports_crc32)) {
crc_update_func = crc32c_hardware;
#ifdef USE_PIPELINED
do_pipelined = 1;
#endif
} else {
crc_update_func = crc32c_sb8;
}
break;
default:
return INVALID_CHECKSUM_TYPE;
}
#ifdef USE_PIPELINED
if (do_pipelined) {
/* Process three blocks at a time */
while (likely(n_blocks >= 3)) {
crc1 = crc2 = crc3 = crc_init();
pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, 3);
crc = ntohl(crc_val(crc1));
if ((crc = ntohl(crc_val(crc1))) != *sums)
goto return_crc_error;
sums++;
data += bytes_per_checksum;
if ((crc = ntohl(crc_val(crc2))) != *sums)
goto return_crc_error;
sums++;
data += bytes_per_checksum;
if ((crc = ntohl(crc_val(crc3))) != *sums)
goto return_crc_error;
sums++;
data += bytes_per_checksum;
n_blocks -= 3;
}
/* One or two blocks */
if (n_blocks) {
crc1 = crc2 = crc_init();
pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, n_blocks);
if ((crc = ntohl(crc_val(crc1))) != *sums)
goto return_crc_error;
data += bytes_per_checksum;
sums++;
if (n_blocks == 2) {
if ((crc = ntohl(crc_val(crc2))) != *sums)
goto return_crc_error;
sums++;
data += bytes_per_checksum;
}
}
/* For something smaller than a block */
if (remainder) {
crc1 = crc_init();
pipelined_crc32c(&crc1, &crc2, &crc3, data, remainder, 1);
if ((crc = ntohl(crc_val(crc1))) != *sums)
goto return_crc_error;
}
return CHECKSUMS_VALID;
}
#endif
while (likely(data_len > 0)) {
int len = likely(data_len >= bytes_per_checksum) ? bytes_per_checksum : data_len;
uint32_t crc = crc_init();
crc = crc_init();
crc = crc_update_func(crc, data, len);
crc = ntohl(crc_val(crc));
if (unlikely(crc != *sums)) {
if (error_info != NULL) {
error_info->got_crc = crc;
error_info->expected_crc = *sums;
error_info->bad_data = data;
}
return INVALID_CHECKSUM_DETECTED;
goto return_crc_error;
}
data += len;
data_len -= len;
sums++;
}
return CHECKSUMS_VALID;
return_crc_error:
if (error_info != NULL) {
error_info->got_crc = crc;
error_info->expected_crc = *sums;
error_info->bad_data = data;
}
return INVALID_CHECKSUM_DETECTED;
}
@ -154,3 +232,417 @@ static uint32_t crc32_zlib_sb8(
}
return crc;
}
///////////////////////////////////////////////////////////////////////////
// Begin code for SSE4.2 specific hardware support of CRC32C
///////////////////////////////////////////////////////////////////////////
#if (defined(__amd64__) || defined(__i386)) && defined(__GNUC__)
# define SSE42_FEATURE_BIT (1 << 20)
# define CPUID_FEATURES 1
/**
* Call the cpuid instruction to determine CPU feature flags.
*/
static uint32_t cpuid(uint32_t eax_in) {
uint32_t eax, ebx, ecx, edx;
# if defined(__PIC__) && !defined(__LP64__)
// 32-bit PIC code uses the ebx register for the base offset --
// have to save and restore it on the stack
asm("pushl %%ebx\n\t"
"cpuid\n\t"
"movl %%ebx, %[ebx]\n\t"
"popl %%ebx" : "=a" (eax), [ebx] "=r"(ebx), "=c"(ecx), "=d"(edx) : "a" (eax_in)
: "cc");
# else
asm("cpuid" : "=a" (eax), "=b"(ebx), "=c"(ecx), "=d"(edx) : "a"(eax_in)
: "cc");
# endif
return ecx;
}
/**
* On library load, initiailize the cached value above for
* whether the cpu supports SSE4.2's crc32 instruction.
*/
void __attribute__ ((constructor)) init_cpu_support_flag(void) {
uint32_t ecx = cpuid(CPUID_FEATURES);
cached_cpu_supports_crc32 = ecx & SSE42_FEATURE_BIT;
}
//
// Definitions of the SSE4.2 crc32 operations. Using these instead of
// the GCC __builtin_* intrinsics allows this code to compile without
// -msse4.2, since we do dynamic CPU detection at runtime.
//
# ifdef __LP64__
inline uint64_t _mm_crc32_u64(uint64_t crc, uint64_t value) {
asm("crc32q %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
return crc;
}
# endif
inline uint32_t _mm_crc32_u32(uint32_t crc, uint32_t value) {
asm("crc32l %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
return crc;
}
inline uint32_t _mm_crc32_u16(uint32_t crc, uint16_t value) {
asm("crc32w %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
return crc;
}
inline uint32_t _mm_crc32_u8(uint32_t crc, uint8_t value) {
asm("crc32b %[value], %[crc]\n" : [crc] "+r" (crc) : [value] "rm" (value));
return crc;
}
# ifdef __LP64__
/**
* Hardware-accelerated CRC32C calculation using the 64-bit instructions.
*/
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* p_buf, size_t length) {
// start directly at p_buf, even if it's an unaligned address. According
// to the original author of this code, doing a small run of single bytes
// to word-align the 64-bit instructions doesn't seem to help, but
// we haven't reconfirmed those benchmarks ourselves.
uint64_t crc64bit = crc;
size_t i;
for (i = 0; i < length / sizeof(uint64_t); i++) {
crc64bit = _mm_crc32_u64(crc64bit, *(uint64_t*) p_buf);
p_buf += sizeof(uint64_t);
}
// This ugly switch is slightly faster for short strings than the straightforward loop
uint32_t crc32bit = (uint32_t) crc64bit;
length &= sizeof(uint64_t) - 1;
switch (length) {
case 7:
crc32bit = _mm_crc32_u8(crc32bit, *p_buf++);
case 6:
crc32bit = _mm_crc32_u16(crc32bit, *(uint16_t*) p_buf);
p_buf += 2;
// case 5 is below: 4 + 1
case 4:
crc32bit = _mm_crc32_u32(crc32bit, *(uint32_t*) p_buf);
break;
case 3:
crc32bit = _mm_crc32_u8(crc32bit, *p_buf++);
case 2:
crc32bit = _mm_crc32_u16(crc32bit, *(uint16_t*) p_buf);
break;
case 5:
crc32bit = _mm_crc32_u32(crc32bit, *(uint32_t*) p_buf);
p_buf += 4;
case 1:
crc32bit = _mm_crc32_u8(crc32bit, *p_buf);
break;
case 0:
break;
default:
// This should never happen; enable in debug code
assert(0 && "ended up with 8 or more bytes at tail of calculation");
}
return crc32bit;
}
#ifdef USE_PIPELINED
/**
* Pipelined version of hardware-accelerated CRC32C calculation using
* the 64 bit crc32q instruction.
* One crc32c instruction takes three cycles, but two more with no data
* dependency can be in the pipeline to achieve something close to single
* instruction/cycle. Here we feed three blocks in RR.
*
* crc1, crc2, crc3 : Store initial checksum for each block before
* calling. When it returns, updated checksums are stored.
* p_buf : The base address of the data buffer. The buffer should be
* at least as big as block_size * num_blocks.
* block_size : The size of each block in bytes.
* num_blocks : The number of blocks to work on. Min = 1, Max = 3
*/
static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks) {
uint64_t c1 = *crc1;
uint64_t c2 = *crc2;
uint64_t c3 = *crc3;
uint64_t *data = (uint64_t*)p_buf;
int counter = block_size / sizeof(uint64_t);
int remainder = block_size % sizeof(uint64_t);
uint8_t *bdata;
/* We do switch here because the loop has to be tight in order
* to fill the pipeline. Any other statement inside the loop
* or inbetween crc32 instruction can slow things down. Calling
* individual crc32 instructions three times from C also causes
* gcc to insert other instructions inbetween.
*
* Do not rearrange the following code unless you have verified
* the generated machine code is as efficient as before.
*/
switch (num_blocks) {
case 3:
/* Do three blocks */
while (likely(counter)) {
__asm__ __volatile__(
"crc32q (%7), %0;\n\t"
"crc32q (%7,%6,1), %1;\n\t"
"crc32q (%7,%6,2), %2;\n\t"
: "=r"(c1), "=r"(c2), "=r"(c3)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(data)
);
data++;
counter--;
}
/* Take care of the remainder. They are only up to three bytes,
* so performing byte-level crc32 won't take much time.
*/
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%7), %0;\n\t"
"crc32b (%7,%6,1), %1;\n\t"
"crc32b (%7,%6,2), %2;\n\t"
: "=r"(c1), "=r"(c2), "=r"(c3)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 2:
/* Do two blocks */
while (likely(counter)) {
__asm__ __volatile__(
"crc32q (%5), %0;\n\t"
"crc32q (%5,%4,1), %1;\n\t"
: "=r"(c1), "=r"(c2)
: "r"(c1), "r"(c2), "r"(block_size), "r"(data)
);
data++;
counter--;
}
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%5), %0;\n\t"
"crc32b (%5,%4,1), %1;\n\t"
: "=r"(c1), "=r"(c2)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 1:
/* single block */
while (likely(counter)) {
__asm__ __volatile__(
"crc32q (%2), %0;\n\t"
: "=r"(c1)
: "r"(c1), "r"(data)
);
data++;
counter--;
}
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%2), %0;\n\t"
: "=r"(c1)
: "r"(c1), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 0:
return;
default:
assert(0 && "BUG: Invalid number of checksum blocks");
}
*crc1 = c1;
*crc2 = c2;
*crc3 = c3;
return;
}
#endif /* USE_PIPELINED */
# else // 32-bit
/**
* Hardware-accelerated CRC32C calculation using the 32-bit instructions.
*/
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* p_buf, size_t length) {
// start directly at p_buf, even if it's an unaligned address. According
// to the original author of this code, doing a small run of single bytes
// to word-align the 64-bit instructions doesn't seem to help, but
// we haven't reconfirmed those benchmarks ourselves.
size_t i;
for (i = 0; i < length / sizeof(uint32_t); i++) {
crc = _mm_crc32_u32(crc, *(uint32_t*) p_buf);
p_buf += sizeof(uint32_t);
}
// This ugly switch is slightly faster for short strings than the straightforward loop
length &= sizeof(uint32_t) - 1;
switch (length) {
case 3:
crc = _mm_crc32_u8(crc, *p_buf++);
case 2:
crc = _mm_crc32_u16(crc, *(uint16_t*) p_buf);
break;
case 1:
crc = _mm_crc32_u8(crc, *p_buf);
break;
case 0:
break;
default:
// This should never happen; enable in debug code
assert(0 && "ended up with 4 or more bytes at tail of calculation");
}
return crc;
}
#ifdef USE_PIPELINED
/**
* Pipelined version of hardware-accelerated CRC32C calculation using
* the 32 bit crc32l instruction.
* One crc32c instruction takes three cycles, but two more with no data
* dependency can be in the pipeline to achieve something close to single
* instruction/cycle. Here we feed three blocks in RR.
*
* crc1, crc2, crc3 : Store initial checksum for each block before
* calling. When it returns, updated checksums are stored.
* data : The base address of the data buffer. The buffer should be
* at least as big as block_size * num_blocks.
* block_size : The size of each block in bytes.
* num_blocks : The number of blocks to work on. Min = 1, Max = 3
*/
static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, const uint8_t *p_buf, size_t block_size, int num_blocks) {
uint32_t c1 = *crc1;
uint32_t c2 = *crc2;
uint32_t c3 = *crc3;
int counter = block_size / sizeof(uint32_t);
int remainder = block_size % sizeof(uint32_t);
uint32_t *data = (uint32_t*)p_buf;
uint8_t *bdata;
/* We do switch here because the loop has to be tight in order
* to fill the pipeline. Any other statement inside the loop
* or inbetween crc32 instruction can slow things down. Calling
* individual crc32 instructions three times from C also causes
* gcc to insert other instructions inbetween.
*
* Do not rearrange the following code unless you have verified
* the generated machine code is as efficient as before.
*/
switch (num_blocks) {
case 3:
/* Do three blocks */
while (likely(counter)) {
__asm__ __volatile__(
"crc32l (%7), %0;\n\t"
"crc32l (%7,%6,1), %1;\n\t"
"crc32l (%7,%6,2), %2;\n\t"
: "=r"(c1), "=r"(c2), "=r"(c3)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(data)
);
data++;
counter--;
}
/* Take care of the remainder. They are only up to three bytes,
* so performing byte-level crc32 won't take much time.
*/
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%7), %0;\n\t"
"crc32b (%7,%6,1), %1;\n\t"
"crc32b (%7,%6,2), %2;\n\t"
: "=r"(c1), "=r"(c2), "=r"(c3)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 2:
/* Do two blocks */
while (likely(counter)) {
__asm__ __volatile__(
"crc32l (%5), %0;\n\t"
"crc32l (%5,%4,1), %1;\n\t"
: "=r"(c1), "=r"(c2)
: "r"(c1), "r"(c2), "r"(block_size), "r"(data)
);
data++;
counter--;
}
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%5), %0;\n\t"
"crc32b (%5,%4,1), %1;\n\t"
: "=r"(c1), "=r"(c2)
: "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 1:
/* single block */
while (likely(counter)) {
__asm__ __volatile__(
"crc32l (%2), %0;\n\t"
: "=r"(c1)
: "r"(c1), "r"(data)
);
data++;
counter--;
}
bdata = (uint8_t*)data;
while (likely(remainder)) {
__asm__ __volatile__(
"crc32b (%2), %0;\n\t"
: "=r"(c1)
: "r"(c1), "r"(bdata)
);
bdata++;
remainder--;
}
break;
case 0:
return;
default:
assert(0 && "BUG: Invalid number of checksum blocks");
}
*crc1 = c1;
*crc2 = c2;
*crc3 = c3;
return;
}
#endif /* USE_PIPELINED */
# endif // 64-bit vs 32-bit
#else // end x86 architecture
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length) {
// never called!
assert(0 && "hardware crc called on an unsupported platform");
return 0;
}
#endif

View File

@ -55,6 +55,16 @@ usage: $0 <parameters>
--dfs-support-append=false|true Enable append
--hadoop-proxy-users='user1:groups:hosts;user2:groups:hosts' Setup proxy users for hadoop
--hbase-user=hbase User which hbase is running as. Defaults to hbase
--mapreduce-cluster-mapmemory-mb=memory Virtual memory of a map slot for the MR framework. Defaults to -1
--mapreduce-cluster-reducememory-mb=memory Virtual memory, of a reduce slot for the MR framework. Defaults to -1
--mapreduce-jobtracker-maxmapmemory-mb=memory Maximum virtual memory of a single map task. Defaults to -1
This value should be set to (mapreduce.cluster.mapmemory.mb * mapreduce.tasktracker.map.tasks.maximum)
--mapreduce-jobtracker-maxreducememory-mb=memory Maximum virtual memory of a single reduce task. Defaults to -1
This value should be set to (mapreduce.cluster.reducememory.mb * mapreduce.tasktracker.reduce.tasks.maximum)
--mapreduce-map-memory-mb=memory Virtual memory of a single map slot for a job. Defaults to -1
This value should be <= mapred.cluster.max.map.memory.mb
--mapreduce-reduce-memory-mb=memory Virtual memory, of a single reduce slot for a job. Defaults to -1
This value should be <= mapred.cluster.max.reduce.memory.mb
"
exit 1
}
@ -139,6 +149,7 @@ function addPropertyToXMLConf
#########################################
function setupProxyUsers
{
local conf_file="${HADOOP_CONF_DIR}/core-site.xml"
#if hadoop proxy users are sent, setup hadoop proxy
if [ ! -z $HADOOP_PROXY_USERS ]
then
@ -156,10 +167,10 @@ function setupProxyUsers
#determine the property names and values
proxy_groups_property="hadoop.proxyuser.${user}.groups"
proxy_groups_val="$groups"
addPropertyToXMLConf "${HADOOP_CONF_DIR}/hdfs-site.xml" "$proxy_groups_property" "$proxy_groups_val"
addPropertyToXMLConf "$conf_file" "$proxy_groups_property" "$proxy_groups_val"
proxy_hosts_property="hadoop.proxyuser.${user}.hosts"
proxy_hosts_val="$hosts"
addPropertyToXMLConf "${HADOOP_CONF_DIR}/hdfs-site.xml" "$proxy_hosts_property" "$proxy_hosts_val"
addPropertyToXMLConf "$conf_file" "$proxy_hosts_property" "$proxy_hosts_val"
IFS=';'
done
IFS=$oldIFS
@ -198,6 +209,12 @@ OPTS=$(getopt \
-l 'hadoop-proxy-users:' \
-l 'dfs-support-append:' \
-l 'hbase-user:' \
-l 'mapreduce-cluster-mapmemory-mb:' \
-l 'mapreduce-cluster-reducememory-mb:' \
-l 'mapreduce-jobtracker-maxmapmemory-mb:' \
-l 'mapreduce-jobtracker-maxreducememory-mb:' \
-l 'mapreduce-map-memory-mb:' \
-l 'mapreduce-reduce-memory-mb:' \
-o 'h' \
-- "$@")
@ -333,6 +350,30 @@ while true ; do
HBASE_USER=$2; shift 2
AUTOMATED=1
;;
--mapreduce-cluster-mapmemory-mb)
MAPREDUCE_CLUSTER_MAPMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-cluster-reducememory-mb)
MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-jobtracker-maxmapmemory-mb)
MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-jobtracker-maxreducememory-mb)
MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-map-memory-mb)
MAPREDUCE_MAP_MEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-reduce-memory-mb)
MAPREDUCE_REDUCE_MEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--)
shift ; break
;;
@ -364,6 +405,12 @@ HADOOP_MR_USER=${HADOOP_MR_USER:-mr}
DFS_WEBHDFS_ENABLED=${DFS_WEBHDFS_ENABLED:-false}
DFS_SUPPORT_APPEND=${DFS_SUPPORT_APPEND:-false}
HBASE_USER=${HBASE_USER:-hbase}
MAPREDUCE_CLUSTER_MAPMEMORY_MB=${MAPREDUCE_CLUSTER_MAPMEMORY_MB:--1}
MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB:--1}
MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB:--1}
MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB:--1}
MAPREDUCE_MAP_MEMORY_MB=${MAPREDUCE_MAP_MEMORY_MB:--1}
MAPREDUCE_REDUCE_MEMORY_MB=${MAPREDUCE_REDUCE_MEMORY_MB:--1}
KEYTAB_DIR=${KEYTAB_DIR:-/etc/security/keytabs}
HDFS_KEYTAB=${HDFS_KEYTAB:-/home/hdfs/hdfs.keytab}
MR_KEYTAB=${MR_KEYTAB:-/home/mr/mr.keytab}

View File

@ -70,6 +70,10 @@ while true ; do
HADOOP_MR_USER=$2; shift 2
AUTOMATED=1
;;
--yarn-user)
HADOOP_YARN_USER=$2; shift 2
AUTOMATED=1
;;
--hdfs-user-keytab)
HDFS_KEYTAB=$2; shift 2
AUTOMATED=1
@ -91,6 +95,7 @@ done
HADOOP_GROUP=${HADOOP_GROUP:-hadoop}
HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
HADOOP_MAPREDUCE_USER=${HADOOP_MR_USER:-mapred}
if [ "${KERBEROS_REALM}" != "" ]; then

View File

@ -0,0 +1,181 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Run the following jobs to validate a hadoop cluster
## teragen
## terasort
## teravalidate
# If they all pass 0 will be returned and 1 otherwise
# The test will work for both secure and unsecure deploys. If the kerberos-realm
# is passed we will assume that the deploy is secure and proceed with a kinit before
# running the validation jobs.
################################################################################
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/../libexec/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Optional parameters:
-h Display this message
--user=hdfs
--user_keytab=/home/hdfs/hdfs.keytab
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'user:' \
-l 'user-keytab:' \
-l 'kerberos-realm:' \
-o 'h' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--user)
TEST_USER=$2; shift 2
AUTOMATED=1
;;
--user-keytab)
USER_KEYTAB_FILE=$2; shift 2
AUTOMATED=1
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
AUTOMATED=1
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
#set the hadoop command and the path to the hadoop examples jar
HADOOP_CMD="${HADOOP_PREFIX}/bin/hadoop --config $HADOOP_CONF_DIR"
#find the hadoop examples jar
HADOOP_EXAMPLES_JAR=''
#find under HADOOP_PREFIX (tar ball install)
HADOOP_EXAMPLES_JAR=`find ${HADOOP_PREFIX} -name 'hadoop-examples-*.jar' | head -n1`
#if its not found look under /usr/share/hadoop (rpm/deb installs)
if [ "$HADOOP_EXAMPLES_JAR" == '' ]
then
HADOOP_EXAMPLES_JAR=`find /usr/share/hadoop -name 'hadoop-examples-*.jar' | head -n1`
fi
#if it is still empty then dont run the tests
if [ "$HADOOP_EXAMPLES_JAR" == '' ]
then
echo "Did not find hadoop-examples-*.jar under '${HADOOP_PREFIX} or '/usr/share/hadoop'"
exit 1
fi
# do a kinit if secure
if [ "${KERBEROS_REALM}" != "" ]; then
# Determine kerberos location base on Linux distro.
if [ -e /etc/lsb-release ]; then
KERBEROS_BIN=/usr/bin
else
KERBEROS_BIN=/usr/kerberos/bin
fi
kinit_cmd="su -c '${KERBEROS_BIN}/kinit -kt ${USER_KEYTAB_FILE} ${TEST_USER}' ${TEST_USER}"
echo $kinit_cmd
eval $kinit_cmd
if [ $? -ne 0 ]
then
echo "kinit command did not run successfully."
exit 1
fi
fi
#dir where to store the data on hdfs. The data is relative of the users home dir on hdfs.
PARENT_DIR="validate_deploy_`date +%s`"
TERA_GEN_OUTPUT_DIR="${PARENT_DIR}/tera_gen_data"
TERA_SORT_OUTPUT_DIR="${PARENT_DIR}/tera_sort_data"
TERA_VALIDATE_OUTPUT_DIR="${PARENT_DIR}/tera_validate_data"
#tera gen cmd
TERA_GEN_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR teragen 10000 $TERA_GEN_OUTPUT_DIR' $TEST_USER"
#tera sort cmd
TERA_SORT_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR terasort $TERA_GEN_OUTPUT_DIR $TERA_SORT_OUTPUT_DIR' $TEST_USER"
#tera validate cmd
TERA_VALIDATE_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR teravalidate $TERA_SORT_OUTPUT_DIR $TERA_VALIDATE_OUTPUT_DIR' $TEST_USER"
echo "Starting teragen...."
#run tera gen
echo $TERA_GEN_CMD
eval $TERA_GEN_CMD
if [ $? -ne 0 ]; then
echo "tera gen failed."
exit 1
fi
echo "Teragen passed starting terasort...."
#run tera sort
echo $TERA_SORT_CMD
eval $TERA_SORT_CMD
if [ $? -ne 0 ]; then
echo "tera sort failed."
exit 1
fi
echo "Terasort passed starting teravalidate...."
#run tera validate
echo $TERA_VALIDATE_CMD
eval $TERA_VALIDATE_CMD
if [ $? -ne 0 ]; then
echo "tera validate failed."
exit 1
fi
echo "teragen, terasort, teravalidate passed."
echo "Cleaning the data created by tests: $PARENT_DIR"
CLEANUP_CMD="su -c '$HADOOP_CMD dfs -rmr -skipTrash $PARENT_DIR' $TEST_USER"
echo $CLEANUP_CMD
eval $CLEANUP_CMD
exit 0

View File

@ -44,12 +44,12 @@ done
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
HADOOP_JOBTRACKER_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
HADOOP_TASKTRACKER_OPTS="-Dsecurity.audit.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
HADOOP_DATANODE_OPTS="-Dsecurity.audit.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dsecurity.audit.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"

View File

@ -85,6 +85,7 @@
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.submission.protocol.acl</name>
<value>*</value>
@ -135,5 +136,85 @@
</property>
<!-- YARN Protocols -->
<property>
<name>security.resourcetracker.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value>
<description>ACL for ResourceTracker protocol, used by the
ResourceManager and NodeManager to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.admin.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value>
<description>ACL for RMAdminProtocol, for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.client.resourcemanager.protocol.acl</name>
<value>*</value>
<description>ACL for ClientRMProtocol, used by the ResourceManager
and applications submission clients to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationmaster.resourcemanager.protocol.acl</name>
<value>*</value>
<description>ACL for AMRMProtocol, used by the ResourceManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.containermanager.protocol.acl</name>
<value>*</value>
<description>ACL for ContainerManager protocol, used by the NodeManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.resourcelocalizer.protocol.acl</name>
<value>*</value>
<description>ACL for ResourceLocalizer protocol, used by the NodeManager
and ResourceLocalizer to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.task.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.client.protocol.acl</name>
<value>*</value>
<description>ACL for MRClientProtocol, used by job clients to
communciate with the MR ApplicationMaster to query job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
</configuration>

View File

@ -81,7 +81,8 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security appender
#
security.audit.logger=INFO,console
hadoop.security.logger=INFO,console
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth.audit
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
@ -89,9 +90,6 @@ log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#new logger
# Define some default values that can be overridden by system properties
hadoop.security.logger=INFO,console
#
# hdfs audit logging

View File

@ -280,4 +280,34 @@
<name>mapred.jobtracker.retirejob.interval</name>
<value>0</value>
</property>
<property>
<name>mapreduce.cluster.mapmemory.mb</name>
<value>${MAPREDUCE_CLUSTER_MAPMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.cluster.reducememory.mb</name>
<value>${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.jobtracker.maxmapmemory.mb</name>
<value>${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.jobtracker.maxreducememory.mb</name>
<value>${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.map.memory.mb</name>
<value>${MAPREDUCE_MAP_MEMORY_MB}</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>${MAPREDUCE_REDUCE_MEMORY_MB}</value>
</property>
</configuration>

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These are the messages used by Hadoop RPC to marshal the
* request and response in the RPC layer.
*/
option java_package = "org.apache.hadoop.ipc.protobuf";
option java_outer_classname = "HadoopRpcProtos";
option java_generate_equals_and_hash = true;
/**
* Message used to marshal the client request
* from RPC client to the RPC server.
*/
message HadoopRpcRequestProto {
/** Name of the RPC method */
required string methodName = 1;
/** Bytes corresponding to the client protobuf request */
optional bytes request = 2;
}
/**
* At the RPC layer, this message is used to indicate
* the server side exception the the RPC client.
*
* Hadoop RPC client throws an exception indicated
* by exceptionName with the stackTrace.
*/
message HadoopRpcExceptionProto {
/** Class name of the exception thrown from the server */
optional string exceptionName = 1;
/** Exception stack trace from the server side */
optional string stackTrace = 2;
}
/**
* This message is used to marshal the response from
* RPC server to the client.
*/
message HadoopRpcResponseProto {
/** Status of IPC call */
enum ResponseStatus {
SUCCESS = 1;
ERRROR = 2;
}
required ResponseStatus status = 1;
// Protobuf response payload from the server, when status is SUCCESS.
optional bytes response = 2;
// Exception when status is ERROR or FATAL
optional HadoopRpcExceptionProto exception = 3;
}

View File

@ -0,0 +1,544 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License. See accompanying LICENSE file.
---
Hadoop ${project.version}
---
---
${maven.build.timestamp}
Deprecated Properties
The following table lists the configuration property names that are
deprecated in this version of Hadoop, and their replacements.
*-------------------------------+-----------------------+
|| <<Deprecated property name>> || <<New property name>>|
*-------------------------------+-----------------------+
|StorageId | dfs.datanode.StorageId
*---+---+
|create.empty.dir.if.nonexist | mapreduce.jobcontrol.createdir.ifnotexist
*---+---+
|dfs.access.time.precision | dfs.namenode.accesstime.precision
*---+---+
|dfs.backup.address | dfs.namenode.backup.address
*---+---+
|dfs.backup.http.address | dfs.namenode.backup.http-address
*---+---+
|dfs.balance.bandwidthPerSec | dfs.datanode.balance.bandwidthPerSec
*---+---+
|dfs.block.size | dfs.blocksize
*---+---+
|dfs.client.buffer.dir | fs.client.buffer.dir
*---+---+
|dfs.data.dir | dfs.datanode.data.dir
*---+---+
|dfs.datanode.max.xcievers | dfs.datanode.max.transfer.threads
*---+---+
|dfs.df.interval | fs.df.interval
*---+---+
|dfs.http.address | dfs.namenode.http-address
*---+---+
|dfs.https.address | dfs.namenode.https-address
*---+---+
|dfs.https.client.keystore.resource | dfs.client.https.keystore.resource
*---+---+
|dfs.https.need.client.auth | dfs.client.https.need-auth
*---+---+
|dfs.max-repl-streams | dfs.namenode.replication.max-streams
*---+---+
|dfs.max.objects | dfs.namenode.max.objects
*---+---+
|dfs.name.dir | dfs.namenode.name.dir
*---+---+
|dfs.name.dir.restore | dfs.namenode.name.dir.restore
*---+---+
|dfs.name.edits.dir | dfs.namenode.edits.dir
*---+---+
|dfs.permissions | dfs.permissions.enabled
*---+---+
|dfs.permissions.supergroup | dfs.permissions.superusergroup
*---+---+
|dfs.read.prefetch.size | dfs.client.read.prefetch.size
*---+---+
|dfs.replication.considerLoad | dfs.namenode.replication.considerLoad
*---+---+
|dfs.replication.interval | dfs.namenode.replication.interval
*---+---+
|dfs.replication.min | dfs.namenode.replication.min
*---+---+
|dfs.replication.pending.timeout.sec | dfs.namenode.replication.pending.timeout-sec
*---+---+
|dfs.safemode.extension | dfs.namenode.safemode.extension
*---+---+
|dfs.safemode.threshold.pct | dfs.namenode.safemode.threshold-pct
*---+---+
|dfs.secondary.http.address | dfs.namenode.secondary.http-address
*---+---+
|dfs.socket.timeout | dfs.client.socket-timeout
*---+---+
|dfs.upgrade.permission | dfs.namenode.upgrade.permission
*---+---+
|dfs.write.packet.size | dfs.client-write-packet-size
*---+---+
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir
*---+---+
|fs.checkpoint.edits.dir | dfs.namenode.checkpoint.edits.dir
*---+---+
|fs.checkpoint.period | dfs.namenode.checkpoint.period
*---+---+
|fs.default.name | fs.defaultFS
*---+---+
|hadoop.configured.node.mapping | net.topology.configured.node.mapping
*---+---+
|hadoop.job.history.location | mapreduce.jobtracker.jobhistory.location
*---+---+
|hadoop.native.lib | io.native.lib.available
*---+---+
|hadoop.net.static.resolutions | mapreduce.tasktracker.net.static.resolutions
*---+---+
|hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve
*---+---+
|hadoop.pipes.executable | mapreduce.pipes.executable
*---+---+
|hadoop.pipes.executable.interpretor | mapreduce.pipes.executable.interpretor
*---+---+
|hadoop.pipes.java.mapper | mapreduce.pipes.isjavamapper
*---+---+
|hadoop.pipes.java.recordreader | mapreduce.pipes.isjavarecordreader
*---+---+
|hadoop.pipes.java.recordwriter | mapreduce.pipes.isjavarecordwriter
*---+---+
|hadoop.pipes.java.reducer | mapreduce.pipes.isjavareducer
*---+---+
|hadoop.pipes.partitioner | mapreduce.pipes.partitioner
*---+---+
|heartbeat.recheck.interval | dfs.namenode.heartbeat.recheck-interval
*---+---+
|io.bytes.per.checksum | dfs.bytes-per-checksum
*---+---+
|io.sort.factor | mapreduce.task.io.sort.factor
*---+---+
|io.sort.mb | mapreduce.task.io.sort.mb
*---+---+
|io.sort.spill.percent | mapreduce.map.sort.spill.percent
*---+---+
|job.end.notification.url | mapreduce.job.end-notification.url
*---+---+
|job.end.retry.attempts | mapreduce.job.end-notification.retry.attempts
*---+---+
|job.end.retry.interval | mapreduce.job.end-notification.retry.interval
*---+---+
|job.local.dir | mapreduce.job.local.dir
*---+---+
|jobclient.completion.poll.interval | mapreduce.client.completion.pollinterval
*---+---+
|jobclient.output.filter | mapreduce.client.output.filter
*---+---+
|jobclient.progress.monitor.poll.interval | mapreduce.client.progressmonitor.pollinterval
*---+---+
|keep.failed.task.files | mapreduce.task.files.preserve.failedtasks
*---+---+
|keep.task.files.pattern | mapreduce.task.files.preserve.filepattern
*---+---+
|key.value.separator.in.input.line | mapreduce.input.keyvaluelinerecordreader.key.value.separator
*---+---+
|local.cache.size | mapreduce.tasktracker.cache.local.size
*---+---+
|map.input.file | mapreduce.map.input.file
*---+---+
|map.input.length | mapreduce.map.input.length
*---+---+
|map.input.start | mapreduce.map.input.start
*---+---+
|map.output.key.field.separator | mapreduce.map.output.key.field.separator
*---+---+
|map.output.key.value.fields.spec | mapreduce.fieldsel.map.output.key.value.fields.spec
*---+---+
|mapred.acls.enabled | mapreduce.cluster.acls.enabled
*---+---+
|mapred.binary.partitioner.left.offset | mapreduce.partition.binarypartitioner.left.offset
*---+---+
|mapred.binary.partitioner.right.offset | mapreduce.partition.binarypartitioner.right.offset
*---+---+
|mapred.cache.archives | mapreduce.job.cache.archives
*---+---+
|mapred.cache.archives.timestamps | mapreduce.job.cache.archives.timestamps
*---+---+
|mapred.cache.files | mapreduce.job.cache.files
*---+---+
|mapred.cache.files.timestamps | mapreduce.job.cache.files.timestamps
*---+---+
|mapred.cache.localArchives | mapreduce.job.cache.local.archives
*---+---+
|mapred.cache.localFiles | mapreduce.job.cache.local.files
*---+---+
|mapred.child.tmp | mapreduce.task.tmp.dir
*---+---+
|mapred.cluster.average.blacklist.threshold | mapreduce.jobtracker.blacklist.average.threshold
*---+---+
|mapred.cluster.map.memory.mb | mapreduce.cluster.mapmemory.mb
*---+---+
|mapred.cluster.max.map.memory.mb | mapreduce.jobtracker.maxmapmemory.mb
*---+---+
|mapred.cluster.max.reduce.memory.mb | mapreduce.jobtracker.maxreducememory.mb
*---+---+
|mapred.cluster.reduce.memory.mb | mapreduce.cluster.reducememory.mb
*---+---+
|mapred.committer.job.setup.cleanup.needed | mapreduce.job.committer.setup.cleanup.needed
*---+---+
|mapred.compress.map.output | mapreduce.map.output.compress
*---+---+
|mapred.create.symlink | mapreduce.job.cache.symlink.create
*---+---+
|mapred.data.field.separator | mapreduce.fieldsel.data.field.separator
*---+---+
|mapred.debug.out.lines | mapreduce.task.debugout.lines
*---+---+
|mapred.healthChecker.interval | mapreduce.tasktracker.healthchecker.interval
*---+---+
|mapred.healthChecker.script.args | mapreduce.tasktracker.healthchecker.script.args
*---+---+
|mapred.healthChecker.script.path | mapreduce.tasktracker.healthchecker.script.path
*---+---+
|mapred.healthChecker.script.timeout | mapreduce.tasktracker.healthchecker.script.timeout
*---+---+
|mapred.heartbeats.in.second | mapreduce.jobtracker.heartbeats.in.second
*---+---+
|mapred.hosts | mapreduce.jobtracker.hosts.filename
*---+---+
|mapred.hosts.exclude | mapreduce.jobtracker.hosts.exclude.filename
*---+---+
|mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold
*---+---+
|mapred.input.dir | mapreduce.input.fileinputformat.inputdir
*---+---+
|mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats
*---+---+
|mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers
*---+---+
|mapred.input.pathFilter.class | mapreduce.input.pathFilter.class
*---+---+
|mapred.jar | mapreduce.job.jar
*---+---+
|mapred.job.classpath.archives | mapreduce.job.classpath.archives
*---+---+
|mapred.job.classpath.files | mapreduce.job.classpath.files
*---+---+
|mapred.job.id | mapreduce.job.id
*---+---+
|mapred.job.map.memory.mb | mapreduce.map.memory.mb
*---+---+
|mapred.job.name | mapreduce.job.name
*---+---+
|mapred.job.priority | mapreduce.job.priority
*---+---+
|mapred.job.queue.name | mapreduce.job.queuename
*---+---+
|mapred.job.reduce.input.buffer.percent | mapreduce.reduce.input.buffer.percent
*---+---+
|mapred.job.reduce.markreset.buffer.percent | mapreduce.reduce.markreset.buffer.percent
*---+---+
|mapred.job.reduce.memory.mb | mapreduce.reduce.memory.mb
*---+---+
|mapred.job.reduce.total.mem.bytes | mapreduce.reduce.memory.totalbytes
*---+---+
|mapred.job.reuse.jvm.num.tasks | mapreduce.job.jvm.numtasks
*---+---+
|mapred.job.shuffle.input.buffer.percent | mapreduce.reduce.shuffle.input.buffer.percent
*---+---+
|mapred.job.shuffle.merge.percent | mapreduce.reduce.shuffle.merge.percent
*---+---+
|mapred.job.tracker | mapreduce.jobtracker.address
*---+---+
|mapred.job.tracker.handler.count | mapreduce.jobtracker.handler.count
*---+---+
|mapred.job.tracker.history.completed.location | mapreduce.jobtracker.jobhistory.completed.location
*---+---+
|mapred.job.tracker.http.address | mapreduce.jobtracker.http.address
*---+---+
|mapred.job.tracker.jobhistory.lru.cache.size | mapreduce.jobtracker.jobhistory.lru.cache.size
*---+---+
|mapred.job.tracker.persist.jobstatus.active | mapreduce.jobtracker.persist.jobstatus.active
*---+---+
|mapred.job.tracker.persist.jobstatus.dir | mapreduce.jobtracker.persist.jobstatus.dir
*---+---+
|mapred.job.tracker.persist.jobstatus.hours | mapreduce.jobtracker.persist.jobstatus.hours
*---+---+
|mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs
*---+---+
|mapred.job.tracker.retiredjobs.cache.size | mapreduce.jobtracker.retiredjobs.cache.size
*---+---+
|mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads
*---+---+
|mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation
*---+---+
|mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size
*---+---+
|mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob
*---+---+
|mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover
*---+---+
|mapred.jobtracker.taskScheduler | mapreduce.jobtracker.taskscheduler
*---+---+
|mapred.jobtracker.taskScheduler.maxRunningTasksPerJob | mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob
*---+---+
|mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad
*---+---+
|mapred.join.expr | mapreduce.join.expr
*---+---+
|mapred.join.keycomparator | mapreduce.join.keycomparator
*---+---+
|mapred.lazy.output.format | mapreduce.output.lazyoutputformat.outputformat
*---+---+
|mapred.line.input.format.linespermap | mapreduce.input.lineinputformat.linespermap
*---+---+
|mapred.linerecordreader.maxlength | mapreduce.input.linerecordreader.line.maxlength
*---+---+
|mapred.local.dir | mapreduce.cluster.local.dir
*---+---+
|mapred.local.dir.minspacekill | mapreduce.tasktracker.local.dir.minspacekill
*---+---+
|mapred.local.dir.minspacestart | mapreduce.tasktracker.local.dir.minspacestart
*---+---+
|mapred.map.child.env | mapreduce.map.env
*---+---+
|mapred.map.child.java.opts | mapreduce.map.java.opts
*---+---+
|mapred.map.child.log.level | mapreduce.map.log.level
*---+---+
|mapred.map.child.ulimit | mapreduce.map.ulimit
*---+---+
|mapred.map.max.attempts | mapreduce.map.maxattempts
*---+---+
|mapred.map.output.compression.codec | mapreduce.map.output.compress.codec
*---+---+
|mapred.map.task.debug.script | mapreduce.map.debug.script
*---+---+
|mapred.map.tasks | mapreduce.job.maps
*---+---+
|mapred.map.tasks.speculative.execution | mapreduce.map.speculative
*---+---+
|mapred.mapoutput.key.class | mapreduce.map.output.key.class
*---+---+
|mapred.mapoutput.value.class | mapreduce.map.output.value.class
*---+---+
|mapred.mapper.regex | mapreduce.mapper.regex
*---+---+
|mapred.mapper.regex.group | mapreduce.mapper.regexmapper..group
*---+---+
|mapred.max.map.failures.percent | mapreduce.map.failures.maxpercent
*---+---+
|mapred.max.reduce.failures.percent | mapreduce.reduce.failures.maxpercent
*---+---+
|mapred.max.split.size | mapreduce.input.fileinputformat.split.maxsize
*---+---+
|mapred.max.tracker.blacklists | mapreduce.jobtracker.tasktracker.maxblacklists
*---+---+
|mapred.max.tracker.failures | mapreduce.job.maxtaskfailures.per.tracker
*---+---+
|mapred.merge.recordsBeforeProgress | mapreduce.task.merge.progress.records
*---+---+
|mapred.min.split.size | mapreduce.input.fileinputformat.split.minsize
*---+---+
|mapred.min.split.size.per.node | mapreduce.input.fileinputformat.split.minsize.per.node
*---+---+
|mapred.min.split.size.per.rack | mapreduce.input.fileinputformat.split.minsize.per.rack
*---+---+
|mapred.output.compress | mapreduce.output.fileoutputformat.compress
*---+---+
|mapred.output.compression.codec | mapreduce.output.fileoutputformat.compress.codec
*---+---+
|mapred.output.compression.type | mapreduce.output.fileoutputformat.compress.type
*---+---+
|mapred.output.dir | mapreduce.output.fileoutputformat.outputdir
*---+---+
|mapred.output.key.class | mapreduce.job.output.key.class
*---+---+
|mapred.output.key.comparator.class | mapreduce.job.output.key.comparator.class
*---+---+
|mapred.output.value.class | mapreduce.job.output.value.class
*---+---+
|mapred.output.value.groupfn.class | mapreduce.job.output.group.comparator.class
*---+---+
|mapred.permissions.supergroup | mapreduce.cluster.permissions.supergroup
*---+---+
|mapred.pipes.user.inputformat | mapreduce.pipes.inputformat
*---+---+
|mapred.reduce.child.env | mapreduce.reduce.env
*---+---+
|mapred.reduce.child.java.opts | mapreduce.reduce.java.opts
*---+---+
|mapred.reduce.child.log.level | mapreduce.reduce.log.level
*---+---+
|mapred.reduce.child.ulimit | mapreduce.reduce.ulimit
*---+---+
|mapred.reduce.max.attempts | mapreduce.reduce.maxattempts
*---+---+
|mapred.reduce.parallel.copies | mapreduce.reduce.shuffle.parallelcopies
*---+---+
|mapred.reduce.slowstart.completed.maps | mapreduce.job.reduce.slowstart.completedmaps
*---+---+
|mapred.reduce.task.debug.script | mapreduce.reduce.debug.script
*---+---+
|mapred.reduce.tasks | mapreduce.job.reduces
*---+---+
|mapred.reduce.tasks.speculative.execution | mapreduce.reduce.speculative
*---+---+
|mapred.seqbinary.output.key.class | mapreduce.output.seqbinaryoutputformat.key.class
*---+---+
|mapred.seqbinary.output.value.class | mapreduce.output.seqbinaryoutputformat.value.class
*---+---+
|mapred.shuffle.connect.timeout | mapreduce.reduce.shuffle.connect.timeout
*---+---+
|mapred.shuffle.read.timeout | mapreduce.reduce.shuffle.read.timeout
*---+---+
|mapred.skip.attempts.to.start.skipping | mapreduce.task.skip.start.attempts
*---+---+
|mapred.skip.map.auto.incr.proc.count | mapreduce.map.skip.proc-count.auto-incr
*---+---+
|mapred.skip.map.max.skip.records | mapreduce.map.skip.maxrecords
*---+---+
|mapred.skip.on | mapreduce.job.skiprecords
*---+---+
|mapred.skip.out.dir | mapreduce.job.skip.outdir
*---+---+
|mapred.skip.reduce.auto.incr.proc.count | mapreduce.reduce.skip.proc-count.auto-incr
*---+---+
|mapred.skip.reduce.max.skip.groups | mapreduce.reduce.skip.maxgroups
*---+---+
|mapred.speculative.execution.slowNodeThreshold | mapreduce.job.speculative.slownodethreshold
*---+---+
|mapred.speculative.execution.slowTaskThreshold | mapreduce.job.speculative.slowtaskthreshold
*---+---+
|mapred.speculative.execution.speculativeCap | mapreduce.job.speculative.speculativecap
*---+---+
|mapred.submit.replication | mapreduce.client.submit.file.replication
*---+---+
|mapred.system.dir | mapreduce.jobtracker.system.dir
*---+---+
|mapred.task.cache.levels | mapreduce.jobtracker.taskcache.levels
*---+---+
|mapred.task.id | mapreduce.task.attempt.id
*---+---+
|mapred.task.is.map | mapreduce.task.ismap
*---+---+
|mapred.task.partition | mapreduce.task.partition
*---+---+
|mapred.task.profile | mapreduce.task.profile
*---+---+
|mapred.task.profile.maps | mapreduce.task.profile.maps
*---+---+
|mapred.task.profile.params | mapreduce.task.profile.params
*---+---+
|mapred.task.profile.reduces | mapreduce.task.profile.reduces
*---+---+
|mapred.task.timeout | mapreduce.task.timeout
*---+---+
|mapred.task.tracker.http.address | mapreduce.tasktracker.http.address
*---+---+
|mapred.task.tracker.report.address | mapreduce.tasktracker.report.address
*---+---+
|mapred.task.tracker.task-controller | mapreduce.tasktracker.taskcontroller
*---+---+
|mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface
*---+---+
|mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver
*---+---+
|mapred.tasktracker.events.batchsize | mapreduce.tasktracker.events.batchsize
*---+---+
|mapred.tasktracker.expiry.interval | mapreduce.jobtracker.expire.trackers.interval
*---+---+
|mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb
*---+---+
|mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation
*---+---+
|mapred.tasktracker.map.tasks.maximum | mapreduce.tasktracker.map.tasks.maximum
*---+---+
|mapred.tasktracker.memory_calculator_plugin | mapreduce.tasktracker.resourcecalculatorplugin
*---+---+
|mapred.tasktracker.memorycalculatorplugin | mapreduce.tasktracker.resourcecalculatorplugin
*---+---+
|mapred.tasktracker.reduce.tasks.maximum | mapreduce.tasktracker.reduce.tasks.maximum
*---+---+
|mapred.tasktracker.taskmemorymanager.monitoring-interval | mapreduce.tasktracker.taskmemorymanager.monitoringinterval
*---+---+
|mapred.tasktracker.tasks.sleeptime-before-sigkill | mapreduce.tasktracker.tasks.sleeptimebeforesigkill
*---+---+
|mapred.temp.dir | mapreduce.cluster.temp.dir
*---+---+
|mapred.text.key.comparator.options | mapreduce.partition.keycomparator.options
*---+---+
|mapred.text.key.partitioner.options | mapreduce.partition.keypartitioner.options
*---+---+
|mapred.textoutputformat.separator | mapreduce.output.textoutputformat.separator
*---+---+
|mapred.tip.id | mapreduce.task.id
*---+---+
|mapred.used.genericoptionsparser | mapreduce.client.genericoptionsparser.used
*---+---+
|mapred.userlog.limit.kb | mapreduce.task.userlog.limit.kb
*---+---+
|mapred.userlog.retain.hours | mapreduce.job.userlog.retain.hours
*---+---+
|mapred.work.output.dir | mapreduce.task.output.dir
*---+---+
|mapred.working.dir | mapreduce.job.working.dir
*---+---+
|mapreduce.combine.class | mapreduce.job.combine.class
*---+---+
|mapreduce.inputformat.class | mapreduce.job.inputformat.class
*---+---+
|mapreduce.jobtracker.permissions.supergroup | mapreduce.cluster.permissions.supergroup
*---+---+
|mapreduce.map.class | mapreduce.job.map.class
*---+---+
|mapreduce.outputformat.class | mapreduce.job.outputformat.class
*---+---+
|mapreduce.partitioner.class | mapreduce.job.partitioner.class
*---+---+
|mapreduce.reduce.class | mapreduce.job.reduce.class
*---+---+
|min.num.spills.for.combine | mapreduce.map.combine.minspills
*---+---+
|reduce.output.key.value.fields.spec | mapreduce.fieldsel.reduce.output.key.value.fields.spec
*---+---+
|security.job.submission.protocol.acl | security.job.client.protocol.acl
*---+---+
|security.task.umbilical.protocol.acl | security.job.task.protocol.acl
*---+---+
|sequencefile.filter.class | mapreduce.input.sequencefileinputfilter.class
*---+---+
|sequencefile.filter.frequency | mapreduce.input.sequencefileinputfilter.frequency
*---+---+
|sequencefile.filter.regex | mapreduce.input.sequencefileinputfilter.regex
*---+---+
|session.id | dfs.metrics.session-id
*---+---+
|slave.host.name | dfs.datanode.hostname
*---+---+
|slave.host.name | mapreduce.tasktracker.host.name
*---+---+
|tasktracker.contention.tracking | mapreduce.tasktracker.contention.tracking
*---+---+
|tasktracker.http.threads | mapreduce.tasktracker.http.threads
*---+---+
|topology.node.switch.mapping.impl | net.topology.node.switch.mapping.impl
*---+---+
|topology.script.file.name | net.topology.script.file.name
*---+---+
|topology.script.number.args | net.topology.script.number.args
*---+---+
|user.name | mapreduce.job.user.name
*---+---+
|webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted
*---+---+

View File

@ -180,7 +180,7 @@ private void displayResults() {
LOG.info(" Comparision result: [" +
(resultBoolean ? "pass" : "fail") + "]");
LOG.info(" Expected output: [" +
cd.getExpectedOutput() + "]");
expandCommand(cd.getExpectedOutput()) + "]");
LOG.info(" Actual output: [" +
cd.getActualOutput() + "]");
}
@ -290,7 +290,7 @@ private boolean compareTestOutput(ComparatorData compdata, Result cmdResult) {
comparatorType);
ComparatorBase comp = (ComparatorBase) comparatorClass.newInstance();
compareOutput = comp.compare(cmdResult.getCommandOutput(),
compdata.getExpectedOutput());
expandCommand(compdata.getExpectedOutput()));
} catch (Exception e) {
LOG.info("Error in instantiating the comparator" + e);
}

View File

@ -34,6 +34,7 @@
import org.apache.ftpserver.command.impl.STAT;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import org.junit.BeforeClass;
import org.junit.Test;
@ -301,7 +302,7 @@ public void testInvalidDefaultFS() throws Exception {
// arguments is valid - fsshell should work
FsShell shell = new FsShell();
Configuration conf = new Configuration();
FsConfig.setDefaultFS(conf, "hhhh://doesnotexist/");
conf.set(FS_DEFAULT_NAME_KEY, "hhhh://doesnotexist/");
shell.setConf(conf);
String [] args = new String[2];
args[0] = "-ls";

View File

@ -17,80 +17,180 @@
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestPathData {
protected static Configuration conf;
protected static FileSystem fs;
protected static String dirString;
protected static Path dir;
protected static Path testDir;
protected static PathData item;
protected static String[] d1Paths =
new String[] { "d1/f1", "d1/f1.1", "d1/f2" };
protected static String[] d2Paths =
new String[] { "d2/f3" };
@BeforeClass
public static void initialize() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
testDir = new Path(
System.getProperty("test.build.data", "build/test/data") + "/testPD"
);
// don't want scheme on the path, just an absolute path
testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
FileSystem.setDefaultUri(conf, fs.getUri());
fs.setWorkingDirectory(testDir);
fs.mkdirs(new Path("d1"));
fs.createNewFile(new Path("d1", "f1"));
fs.createNewFile(new Path("d1", "f1.1"));
fs.createNewFile(new Path("d1", "f2"));
fs.mkdirs(new Path("d2"));
fs.create(new Path("d2","f3"));
}
@Test
public void testWithFsAndPath() throws Exception {
dirString = "/tmp";
dir = new Path(dirString);
item = new PathData(fs, dir);
public void testWithDirStringAndConf() throws Exception {
dirString = "d1";
item = new PathData(dirString, conf);
checkPathData();
}
@Test
public void testWithStringAndConf() throws Exception {
dirString = "/tmp";
dir = new Path(dirString);
// properly implementing symlink support in various commands will require
// trailing slashes to be retained
dirString = "d1/";
item = new PathData(dirString, conf);
checkPathData();
}
@Test
public void testUnqualifiedUriContents() throws Exception {
dirString = "/tmp";
dirString = "d1";
item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
for (PathData item : items) {
assertTrue(item.toString().startsWith(dirString));
}
assertEquals(
sortedString("d1/f1", "d1/f1.1", "d1/f2"),
sortedString(items)
);
}
@Test
public void testQualifiedUriContents() throws Exception {
dirString = "file:/tmp";
dirString = fs.makeQualified(new Path("d1")).toString();
item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
for (PathData item : items) {
assertTrue(item.toString().startsWith(dirString));
assertEquals(
sortedString(dirString+"/f1", dirString+"/f1.1", dirString+"/f2"),
sortedString(items)
);
}
@Test
public void testCwdContents() throws Exception {
dirString = Path.CUR_DIR;
item = new PathData(dirString, conf);
PathData[] items = item.getDirectoryContents();
assertEquals(
sortedString("d1", "d2"),
sortedString(items)
);
}
@Test
public void testToFile() throws Exception {
item = new PathData(".", conf);
assertEquals(new File(testDir.toString()), item.toFile());
item = new PathData("d1/f1", conf);
assertEquals(new File(testDir+"/d1/f1"), item.toFile());
item = new PathData(testDir+"/d1/f1", conf);
assertEquals(new File(testDir+"/d1/f1"), item.toFile());
}
@Test
public void testAbsoluteGlob() throws Exception {
PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
assertEquals(
sortedString(testDir+"/d1/f1", testDir+"/d1/f1.1"),
sortedString(items)
);
}
@Test
public void testRelativeGlob() throws Exception {
PathData[] items = PathData.expandAsGlob("d1/f1*", conf);
assertEquals(
sortedString("d1/f1", "d1/f1.1"),
sortedString(items)
);
}
@Test
public void testRelativeGlobBack() throws Exception {
fs.setWorkingDirectory(new Path("d1"));
PathData[] items = PathData.expandAsGlob("../d2/*", conf);
assertEquals(
sortedString("../d2/f3"),
sortedString(items)
);
}
@Test
public void testWithStringAndConfForBuggyPath() throws Exception {
dirString = "file:///tmp";
dir = new Path(dirString);
testDir = new Path(dirString);
item = new PathData(dirString, conf);
// this may fail some day if Path is fixed to not crunch the uri
// if the authority is null, however we need to test that the PathData
// toString() returns the given string, while Path toString() does
// the crunching
assertEquals("file:/tmp", dir.toString());
assertEquals("file:/tmp", testDir.toString());
checkPathData();
}
public void checkPathData() throws Exception {
assertEquals(fs, item.fs);
assertEquals(dirString, item.toString());
assertEquals(dir, item.path);
assertTrue(item.stat != null);
assertTrue(item.stat.isDirectory());
assertEquals("checking fs", fs, item.fs);
assertEquals("checking string", dirString, item.toString());
assertEquals("checking path",
fs.makeQualified(new Path(item.toString())), item.path
);
assertTrue("checking exist", item.stat != null);
assertTrue("checking isDir", item.stat.isDirectory());
}
/* junit does a lousy job of comparing arrays
* if the array lengths differ, it just says that w/o showing contents
* this sorts the paths, and builds a string of "i:<value>, ..." suitable
* for a string compare
*/
private static String sortedString(Object ... list) {
String[] strings = new String[list.length];
for (int i=0; i < list.length; i++) {
strings[i] = String.valueOf(list[i]);
}
Arrays.sort(strings);
StringBuilder result = new StringBuilder();
for (int i=0; i < strings.length; i++) {
if (result.length() > 0) {
result.append(", ");
}
result.append(i+":<"+strings[i]+">");
}
return result.toString();
}
private static String sortedString(PathData ... items) {
return sortedString((Object[])items);
}
}

View File

@ -23,6 +23,7 @@
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -33,6 +34,8 @@
import org.junit.AfterClass;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.*;
/**
@ -81,6 +84,27 @@ public void testFileStatusSerialziation()
assertEquals(content.length, deSer.getLen());
}
// Tests that ViewFileSystem.getFileChecksum calls res.targetFileSystem
// .getFileChecksum with res.remainingPath and not with f
@Test
public void testGetFileChecksum() throws IOException {
FileSystem mockFS = Mockito.mock(FileSystem.class);
InodeTree.ResolveResult<FileSystem> res =
new InodeTree.ResolveResult<FileSystem>(null, mockFS , null,
new Path("someFile"));
@SuppressWarnings("unchecked")
InodeTree<FileSystem> fsState = Mockito.mock(InodeTree.class);
Mockito.when(fsState.resolve("/tmp/someFile", true)).thenReturn(res);
ViewFileSystem vfs = Mockito.mock(ViewFileSystem.class);
vfs.fsState = fsState;
Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
.thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
Mockito.verify(mockFS).getFileChecksum(new Path("someFile"));
}
@AfterClass
public static void cleanup() throws IOException {
FileUtil.fullyDelete(TEST_DIR);

View File

@ -29,13 +29,15 @@
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.Token;
@ -43,6 +45,7 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
@ -408,6 +411,27 @@ public void testFileStatusOnMountLink() throws IOException {
}
}
@Test
public void testGetFileChecksum() throws AccessControlException
, UnresolvedLinkException, IOException {
AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class);
InodeTree.ResolveResult<AbstractFileSystem> res =
new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null,
new Path("someFile"));
@SuppressWarnings("unchecked")
InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class);
Mockito.when(fsState.resolve(Mockito.anyString()
, Mockito.anyBoolean())).thenReturn(res);
ViewFs vfs = Mockito.mock(ViewFs.class);
vfs.fsState = fsState;
Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
.thenCallRealMethod();
vfs.getFileChecksum(new Path("/tmp/someFile"));
Mockito.verify(mockAFS).getFileChecksum(new Path("someFile"));
}
@Test(expected=FileNotFoundException.class)
public void testgetFSonDanglingLink() throws IOException {
fcView.getFileStatus(new Path("/danglingLink"));

View File

@ -19,6 +19,7 @@
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicReference;
@ -210,6 +211,66 @@ public void testChmod() throws Exception {
assertPermissions(toChmod, 0644);
}
@Test
public void testPosixFadvise() throws Exception {
FileInputStream fis = new FileInputStream("/dev/zero");
try {
NativeIO.posix_fadvise(fis.getFD(), 0, 0,
NativeIO.POSIX_FADV_SEQUENTIAL);
} catch (UnsupportedOperationException uoe) {
// we should just skip the unit test on machines where we don't
// have fadvise support
assumeTrue(false);
} finally {
fis.close();
}
try {
NativeIO.posix_fadvise(fis.getFD(), 0, 1024,
NativeIO.POSIX_FADV_SEQUENTIAL);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
assertEquals(Errno.EBADF, nioe.getErrno());
}
try {
NativeIO.posix_fadvise(null, 0, 1024,
NativeIO.POSIX_FADV_SEQUENTIAL);
fail("Did not throw on null file");
} catch (NullPointerException npe) {
// expected
}
}
@Test
public void testSyncFileRange() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testSyncFileRange"));
try {
fos.write("foo".getBytes());
NativeIO.sync_file_range(fos.getFD(), 0, 1024,
NativeIO.SYNC_FILE_RANGE_WRITE);
// no way to verify that this actually has synced,
// but if it doesn't throw, we can assume it worked
} catch (UnsupportedOperationException uoe) {
// we should just skip the unit test on machines where we don't
// have fadvise support
assumeTrue(false);
} finally {
fos.close();
}
try {
NativeIO.sync_file_range(fos.getFD(), 0, 1024,
NativeIO.SYNC_FILE_RANGE_WRITE);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
assertEquals(Errno.EBADF, nioe.getErrno());
}
}
private void assertPermissions(File f, int expected) throws IOException {
FileSystem localfs = FileSystem.getLocal(new Configuration());
FsPermission perms = localfs.getFileStatus(

View File

@ -583,6 +583,12 @@ public void testIpcFromHadoop0_21_0() throws Exception {
NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC);
}
@Test
public void testHttpGetResponse() throws Exception {
doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(),
Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes());
}
private void doIpcVersionTest(
byte[] requestData,
byte[] expectedResponse) throws Exception {

View File

@ -0,0 +1,123 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface;
import org.junit.Assert;
import org.junit.Test;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Test for testing protocol buffer based RPC mechanism.
* This test depends on test.proto definition of types in src/test/proto
* and protobuf service definition from src/test/test_rpc_service.proto
*/
public class TestProtoBufRpc {
public final static String ADDRESS = "0.0.0.0";
public final static int PORT = 0;
public static class ServerImpl implements BlockingInterface {
@Override
public EmptyResponseProto ping(RpcController unused,
EmptyRequestProto request) throws ServiceException {
return EmptyResponseProto.newBuilder().build();
}
@Override
public EchoResponseProto echo(RpcController unused, EchoRequestProto request)
throws ServiceException {
return EchoResponseProto.newBuilder().setMessage(request.getMessage())
.build();
}
@Override
public EmptyResponseProto error(RpcController unused,
EmptyRequestProto request) throws ServiceException {
throw new ServiceException("error", new RpcServerException("error"));
}
}
private static RPC.Server startRPCServer(Configuration conf)
throws IOException {
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, BlockingService.class, ProtobufRpcEngine.class);
// Create server side implementation
ServerImpl serverImpl = new ServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
// Get RPC server for serer side implementation
RPC.Server server = RPC.getServer(BlockingService.class, service, ADDRESS,
PORT, conf);
server.start();
return server;
}
private static BlockingInterface getClient(Configuration conf,
InetSocketAddress addr) throws IOException {
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, BlockingInterface.class,
ProtobufRpcEngine.class);
BlockingInterface client = RPC.getProxy(BlockingInterface.class, 0, addr,
conf);
return client;
}
@Test
public void testProtoBufRpc() throws Exception {
Configuration conf = new Configuration();
RPC.Server server = startRPCServer(conf);
BlockingInterface client = getClient(conf, server.getListenerAddress());
// Test ping method
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping(null, emptyRequest);
// Test echo method
EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
.setMessage("hello").build();
EchoResponseProto echoResponse = client.echo(null, echoRequest);
Assert.assertEquals(echoResponse.getMessage(), "hello");
// Test error method - it should be thrown as RemoteException
try {
client.error(null, emptyRequest);
Assert.fail("Expected exception is not thrown");
} catch (ServiceException e) {
RemoteException re = (RemoteException)e.getCause();
re.printStackTrace();
RpcServerException rse = (RpcServerException) re
.unwrapRemoteException(RpcServerException.class);
rse.printStackTrace();
}
}
}

View File

@ -0,0 +1,395 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: test_rpc_service.proto
package org.apache.hadoop.ipc.protobuf;
public final class TestRpcServiceProtos {
private TestRpcServiceProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public static abstract class TestProtobufRpcProto
implements com.google.protobuf.Service {
protected TestProtobufRpcProto() {}
public interface Interface {
public abstract void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
public abstract void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done);
public abstract void error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
}
public static com.google.protobuf.Service newReflectiveService(
final Interface impl) {
return new TestProtobufRpcProto() {
@java.lang.Override
public void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
impl.ping(controller, request, done);
}
@java.lang.Override
public void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done) {
impl.echo(controller, request, done);
}
@java.lang.Override
public void error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
impl.error(controller, request, done);
}
};
}
public static com.google.protobuf.BlockingService
newReflectiveBlockingService(final BlockingInterface impl) {
return new com.google.protobuf.BlockingService() {
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final com.google.protobuf.Message callBlockingMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request)
throws com.google.protobuf.ServiceException {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callBlockingMethod() given method descriptor for " +
"wrong service type.");
}
switch(method.getIndex()) {
case 0:
return impl.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request);
case 1:
return impl.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto)request);
case 2:
return impl.error(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
};
}
public abstract void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
public abstract void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done);
public abstract void error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done);
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
return org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.getDescriptor().getServices().get(0);
}
public final com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptorForType() {
return getDescriptor();
}
public final void callMethod(
com.google.protobuf.Descriptors.MethodDescriptor method,
com.google.protobuf.RpcController controller,
com.google.protobuf.Message request,
com.google.protobuf.RpcCallback<
com.google.protobuf.Message> done) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.callMethod() given method descriptor for wrong " +
"service type.");
}
switch(method.getIndex()) {
case 0:
this.ping(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto>specializeCallback(
done));
return;
case 1:
this.echo(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto>specializeCallback(
done));
return;
case 2:
this.error(controller, (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto>specializeCallback(
done));
return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getRequestPrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getRequestPrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public final com.google.protobuf.Message
getResponsePrototype(
com.google.protobuf.Descriptors.MethodDescriptor method) {
if (method.getService() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"Service.getResponsePrototype() given method " +
"descriptor for wrong service type.");
}
switch(method.getIndex()) {
case 0:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
case 1:
return org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance();
case 2:
return org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
}
public static Stub newStub(
com.google.protobuf.RpcChannel channel) {
return new Stub(channel);
}
public static final class Stub extends org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto implements Interface {
private Stub(com.google.protobuf.RpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.RpcChannel channel;
public com.google.protobuf.RpcChannel getChannel() {
return channel;
}
public void ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
channel.callMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.class,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance()));
}
public void echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto> done) {
channel.callMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.class,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance()));
}
public void error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request,
com.google.protobuf.RpcCallback<org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto> done) {
channel.callMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance(),
com.google.protobuf.RpcUtil.generalizeCallback(
done,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.class,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance()));
}
}
public static BlockingInterface newBlockingStub(
com.google.protobuf.BlockingRpcChannel channel) {
return new BlockingStub(channel);
}
public interface BlockingInterface {
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request)
throws com.google.protobuf.ServiceException;
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
this.channel = channel;
}
private final com.google.protobuf.BlockingRpcChannel channel;
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto ping(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(0),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto echo(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(1),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto.getDefaultInstance());
}
public org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto error(
com.google.protobuf.RpcController controller,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto) channel.callBlockingMethod(
getDescriptor().getMethods().get(2),
controller,
request,
org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto.getDefaultInstance());
}
}
}
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\026test_rpc_service.proto\032\ntest.proto2\250\001\n" +
"\024TestProtobufRpcProto\022/\n\004ping\022\022.EmptyReq" +
"uestProto\032\023.EmptyResponseProto\022-\n\004echo\022\021" +
".EchoRequestProto\032\022.EchoResponseProto\0220\n" +
"\005error\022\022.EmptyRequestProto\032\023.EmptyRespon" +
"seProtoB<\n\036org.apache.hadoop.ipc.protobu" +
"fB\024TestRpcServiceProtos\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.ipc.protobuf.TestProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}

View File

@ -51,7 +51,7 @@ public static void assertReFind(String re, String value) {
assertTrue("'"+p+"' does not match "+value, m.find());
}
@Test public void testQury() throws Exception {
@Test public void testQuery() throws Exception {
String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime"));
LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result);
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result);

View File

@ -0,0 +1,270 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.log4j.Appender;
import org.apache.log4j.Category;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.spi.HierarchyEventListener;
import org.apache.log4j.spi.LoggerFactory;
import org.apache.log4j.spi.LoggerRepository;
import org.apache.log4j.spi.ThrowableInformation;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.MappingJsonFactory;
import org.codehaus.jackson.node.ContainerNode;
import org.junit.Test;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.net.NoRouteToHostException;
import java.util.Enumeration;
import java.util.Vector;
public class TestLog4Json extends TestCase {
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
private static final JsonFactory factory = new MappingJsonFactory();
@Test
public void testConstruction() throws Throwable {
Log4Json l4j = new Log4Json();
String outcome = l4j.toJson(new StringWriter(),
"name", 0, "DEBUG", "thread1",
"hello, world", null).toString();
println("testConstruction", outcome);
}
@Test
public void testException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
ThrowableInformation ti = new ThrowableInformation(e);
Log4Json l4j = new Log4Json();
long timeStamp = System.currentTimeMillis();
String outcome = l4j.toJson(new StringWriter(),
"testException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testException", outcome);
}
@Test
public void testNestedException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe = new IOException("Datacenter problems", e);
ThrowableInformation ti = new ThrowableInformation(ioe);
Log4Json l4j = new Log4Json();
long timeStamp = System.currentTimeMillis();
String outcome = l4j.toJson(new StringWriter(),
"testNestedException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testNestedException", outcome);
ContainerNode rootNode = Log4Json.parse(outcome);
assertEntryEquals(rootNode, Log4Json.LEVEL, "INFO");
assertEntryEquals(rootNode, Log4Json.NAME, "testNestedException");
assertEntryEquals(rootNode, Log4Json.TIME, timeStamp);
assertEntryEquals(rootNode, Log4Json.EXCEPTION_CLASS,
ioe.getClass().getName());
JsonNode node = assertNodeContains(rootNode, Log4Json.STACK);
assertTrue("Not an array: " + node, node.isArray());
node = assertNodeContains(rootNode, Log4Json.DATE);
assertTrue("Not a string: " + node, node.isTextual());
//rather than try and make assertions about the format of the text
//message equalling another ISO date, this test asserts that the hypen
//and colon characters are in the string.
String dateText = node.getTextValue();
assertTrue("No '-' in " + dateText, dateText.contains("-"));
assertTrue("No '-' in " + dateText, dateText.contains(":"));
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLog() throws Throwable {
String message = "test message";
Throwable throwable = null;
String json = logOut(message, throwable);
println("testLog", json);
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLogExceptions() throws Throwable {
String message = "test message";
Throwable inner = new IOException("Directory / not found");
Throwable throwable = new IOException("startup failure", inner);
String json = logOut(message, throwable);
println("testLogExceptions", json);
}
void assertEntryEquals(ContainerNode rootNode, String key, String value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.getTextValue());
}
private JsonNode assertNodeContains(ContainerNode rootNode, String key) {
JsonNode node = rootNode.get(key);
if (node == null) {
fail("No entry of name \"" + key + "\" found in " + rootNode.toString());
}
return node;
}
void assertEntryEquals(ContainerNode rootNode, String key, long value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.getNumberValue());
}
/**
* Print out what's going on. The logging APIs aren't used and the text
* delimited for more details
*
* @param name name of operation
* @param text text to print
*/
private void println(String name, String text) {
System.out.println(name + ": #" + text + "#");
}
private String logOut(String message, Throwable throwable) {
StringWriter writer = new StringWriter();
Logger logger = createLogger(writer);
logger.info(message, throwable);
//remove and close the appender
logger.removeAllAppenders();
return writer.toString();
}
public Logger createLogger(Writer writer) {
TestLoggerRepository repo = new TestLoggerRepository();
Logger logger = repo.getLogger("test");
Log4Json layout = new Log4Json();
WriterAppender appender = new WriterAppender(layout, writer);
logger.addAppender(appender);
return logger;
}
/**
* This test logger avoids integrating with the main runtimes Logger hierarchy
* in ways the reader does not want to know.
*/
private static class TestLogger extends Logger {
private TestLogger(String name, LoggerRepository repo) {
super(name);
repository = repo;
setLevel(Level.INFO);
}
}
public static class TestLoggerRepository implements LoggerRepository {
@Override
public void addHierarchyEventListener(HierarchyEventListener listener) {
}
@Override
public boolean isDisabled(int level) {
return false;
}
@Override
public void setThreshold(Level level) {
}
@Override
public void setThreshold(String val) {
}
@Override
public void emitNoAppenderWarning(Category cat) {
}
@Override
public Level getThreshold() {
return Level.ALL;
}
@Override
public Logger getLogger(String name) {
return new TestLogger(name, this);
}
@Override
public Logger getLogger(String name, LoggerFactory factory) {
return new TestLogger(name, this);
}
@Override
public Logger getRootLogger() {
return new TestLogger("root", this);
}
@Override
public Logger exists(String name) {
return null;
}
@Override
public void shutdown() {
}
@Override
public Enumeration getCurrentLoggers() {
return new Vector().elements();
}
@Override
public Enumeration getCurrentCategories() {
return new Vector().elements();
}
@Override
public void fireAddAppenderEvent(Category logger, Appender appender) {
}
@Override
public void resetConfiguration() {
}
}
}

View File

@ -164,6 +164,27 @@ public void testWrapUnknownHostException() throws Throwable {
assertInException(wrapped, "/UnknownHost");
}
@Test
public void testCreateSocketAddress() throws Throwable {
InetSocketAddress addr = NetUtils.createSocketAddr(
"127.0.0.1:12345", 1000, "myconfig");
assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
assertEquals(12345, addr.getPort());
addr = NetUtils.createSocketAddr(
"127.0.0.1", 1000, "myconfig");
assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
assertEquals(1000, addr.getPort());
try {
addr = NetUtils.createSocketAddr(
"127.0.0.1:blahblah", 1000, "myconfig");
fail("Should have failed to parse bad port");
} catch (IllegalArgumentException iae) {
assertInException(iae, "myconfig");
}
}
private void assertRemoteDetailsIncluded(IOException wrapped)
throws Throwable {
assertInException(wrapped, "desthost");

View File

@ -360,6 +360,8 @@ public void run() {
byte[] storedPassword = dtSecretManager.retrievePassword(id);
byte[] password = dtSecretManager.createPassword(id, key);
Assert.assertTrue(Arrays.equals(password, storedPassword));
//verify by secret manager api
dtSecretManager.verifyToken(id, password);
}
} finally {
dtSecretManager.stopThreads();

View File

@ -50,11 +50,14 @@ public CoreTestDriver(ProgramDriver pgd) {
}
public void run(String argv[]) {
int exitCode = -1;
try {
pgd.driver(argv);
exitCode = pgd.driver(argv);
} catch(Throwable e) {
e.printStackTrace();
}
System.exit(exitCode);
}
public static void main(String argv[]){

View File

@ -21,11 +21,14 @@
import static com.google.common.base.Preconditions.*;
import org.hamcrest.Description;
import org.junit.Assert;
import static org.mockito.Mockito.*;
import org.mockito.stubbing.Answer;
import org.mockito.internal.matchers.GreaterThan;
import org.mockito.invocation.InvocationOnMock;
import static org.mockito.AdditionalMatchers.*;
import org.mockito.ArgumentCaptor;
import org.mockito.ArgumentMatcher;
import org.apache.commons.logging.Log;
@ -44,6 +47,7 @@
public class MetricsAsserts {
final static Log LOG = LogFactory.getLog(MetricsAsserts.class);
private static final double EPSILON = 0.00001;
public static MetricsSystem mockMetricsSystem() {
MetricsSystem ms = mock(MetricsSystem.class);
@ -139,7 +143,15 @@ public static MetricsInfo anyInfo() {
*/
public static void assertGauge(String name, int expected,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(name, "")), eq(expected));
Assert.assertEquals("Bad value for metric " + name,
expected, getIntGauge(name, rb));
}
public static int getIntGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
@ -150,7 +162,16 @@ public static void assertGauge(String name, int expected,
*/
public static void assertCounter(String name, int expected,
MetricsRecordBuilder rb) {
verify(rb).addCounter(eqName(info(name, "")), eq(expected));
Assert.assertEquals("Bad value for metric " + name,
expected, getIntCounter(name, rb));
}
public static int getIntCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(
Integer.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
@ -161,7 +182,15 @@ public static void assertCounter(String name, int expected,
*/
public static void assertGauge(String name, long expected,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(name, "")), eq(expected));
Assert.assertEquals("Bad value for metric " + name,
expected, getLongGauge(name, rb));
}
public static long getLongGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
@ -172,7 +201,15 @@ public static void assertGauge(String name, long expected,
*/
public static void assertGauge(String name, double expected,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(name, "")), eq(expected));
Assert.assertEquals("Bad value for metric " + name,
expected, getDoubleGauge(name, rb), EPSILON);
}
public static double getDoubleGauge(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Double> captor = ArgumentCaptor.forClass(Double.class);
verify(rb, atLeast(0)).addGauge(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
@ -183,7 +220,23 @@ public static void assertGauge(String name, double expected,
*/
public static void assertCounter(String name, long expected,
MetricsRecordBuilder rb) {
verify(rb).addCounter(eqName(info(name, "")), eq(expected));
Assert.assertEquals("Bad value for metric " + name,
expected, getLongCounter(name, rb));
}
public static long getLongCounter(String name, MetricsRecordBuilder rb) {
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(rb, atLeast(0)).addCounter(eqName(info(name, "")), captor.capture());
checkCaptured(captor, name);
return captor.getValue();
}
/**
* Check that this metric was captured exactly once.
*/
private static void checkCaptured(ArgumentCaptor<?> captor, String name) {
Assert.assertEquals("Expected exactly one metric for name " + name,
1, captor.getAllValues().size());
}
/**
@ -238,7 +291,8 @@ public static void assertCounter(String name, long expected,
*/
public static void assertCounterGt(String name, long greater,
MetricsRecordBuilder rb) {
verify(rb).addCounter(eqName(info(name, "")), gt(greater));
Assert.assertThat("Bad value for metric " + name, getLongCounter(name, rb),
new GreaterThan<Long>(greater));
}
/**
@ -260,7 +314,8 @@ public static void assertCounterGt(String name, long greater,
*/
public static void assertGaugeGt(String name, double greater,
MetricsRecordBuilder rb) {
verify(rb).addGauge(eqName(info(name, "")), gt(greater));
Assert.assertThat("Bad value for metric " + name, getDoubleGauge(name, rb),
new GreaterThan<Double>(greater));
}
/**

View File

@ -115,6 +115,26 @@ private void doBulkTest(DataChecksum checksum, int dataLength,
}
}
@Test
public void testEquality() {
assertEquals(
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512),
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
assertFalse(
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 1024)));
assertFalse(
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512).equals(
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512)));
}
@Test
public void testToString() {
assertEquals("DataChecksum(type=CRC32, chunkSize=512)",
DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512)
.toString());
}
private static void corruptBufferOffset(ByteBuffer buf, int offset) {
buf.put(offset, (byte)(buf.get(offset) + 1));
}

View File

@ -15,25 +15,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
* A clock class - can be mocked out for testing.
*/
class SimulatorClock extends Clock {
option java_package = "org.apache.hadoop.ipc.protobuf";
option java_outer_classname = "TestProtos";
option java_generate_equals_and_hash = true;
long currentTime;
SimulatorClock (long now) {
super();
currentTime = now;
}
void setTime(long now) {
currentTime = now;
message EmptyRequestProto {
}
@Override
long getTime() {
return currentTime;
message EmptyResponseProto {
}
message EchoRequestProto {
required string message = 1;
}
message EchoResponseProto {
required string message = 1;
}

Some files were not shown because too many files have changed in this diff Show More