diff --git a/BUILDING.txt b/BUILDING.txt
index c2e7901c11..0684619204 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -138,3 +138,70 @@ Create a local staging version of the website (in /tmp/hadoop-site)
$ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
----------------------------------------------------------------------------------
+
+Building on Windows
+
+----------------------------------------------------------------------------------
+Requirements:
+
+* Windows System
+* JDK 1.6
+* Maven 3.0
+* Findbugs 1.3.9 (if running findbugs)
+* ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
+* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
+* Windows SDK or Visual Studio 2010 Professional
+* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
+
+If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
+Do not use Visual Studio Express. It does not support compiling for 64-bit,
+which is problematic if running a 64-bit system. The Windows SDK is free to
+download here:
+
+http://www.microsoft.com/en-us/download/details.aspx?id=8279
+
+----------------------------------------------------------------------------------
+Building:
+
+Keep the source code tree in a short path to avoid running into problems related
+to Windows maximum path length limitation. (For example, C:\hdc).
+
+Run builds from a Windows SDK Command Prompt. (Start, All Programs,
+Microsoft Windows SDK v7.1, Windows SDK 7.1 Command Prompt.)
+
+JAVA_HOME must be set, and the path must not contain spaces. If the full path
+would contain spaces, then use the Windows short path instead.
+
+You must set the Platform environment variable to either x64 or Win32 depending
+on whether you're running a 64-bit or 32-bit system. Note that this is
+case-sensitive. It must be "Platform", not "PLATFORM" or "platform".
+Environment variables on Windows are usually case-insensitive, but Maven treats
+them as case-sensitive. Failure to set this environment variable correctly will
+cause msbuild to fail while building the native code in hadoop-common.
+
+set Platform=x64 (when building on a 64-bit system)
+set Platform=Win32 (when building on a 32-bit system)
+
+Several tests require that the user must have the Create Symbolic Links
+privilege.
+
+All Maven goals are the same as described above, with the addition of profile
+-Pnative-win to trigger building Windows native components. The native
+components are required (not optional) on Windows. For example:
+
+ * Run tests : mvn -Pnative-win test
+
+----------------------------------------------------------------------------------
+Building distributions:
+
+Create binary distribution with native code and with documentation:
+
+ $ mvn package -Pdist,native-win,docs -DskipTests -Dtar
+
+Create source distribution:
+
+ $ mvn package -Pnative-win,src -DskipTests
+
+Create source and binary distributions with native code and documentation:
+
+ $ mvn package -Pdist,native-win,docs,src -DskipTests -Dtar
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
index 4d93b11a04..7128c75268 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
@@ -26,6 +26,9 @@
/bin*.sh
+ *-config.cmd
+ start-*.cmd
+ stop-*.cmd0755
@@ -38,6 +41,7 @@
/libexec*-config.sh
+ *-config.cmd0755
@@ -46,9 +50,13 @@
/sbin*.sh
+ *.cmdhadoop-config.sh
+ hadoop.cmd
+ hdfs.cmd
+ hadoop-config.cmd0755
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 20436abfee..4a22317965 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -33,6 +33,7 @@
binyarn
+ yarn.cmd0755
@@ -41,6 +42,7 @@
libexecyarn-config.sh
+ yarn-config.cmd0755
@@ -52,6 +54,8 @@
yarn-daemons.shstart-yarn.shstop-yarn.sh
+ start-yarn.cmd
+ stop-yarn.cmd0755
diff --git a/hadoop-common-project/hadoop-common/CHANGES.branch-trunk-win.txt b/hadoop-common-project/hadoop-common/CHANGES.branch-trunk-win.txt
new file mode 100644
index 0000000000..965bad4a41
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES.branch-trunk-win.txt
@@ -0,0 +1,111 @@
+branch-trunk-win changes - unreleased
+
+ HADOOP-8924. Hadoop Common creating package-info.java must not depend on sh.
+ (Chris Nauroth via suresh)
+
+ HADOOP-8945. Merge winutils from branch-1-win to branch-trunk-win.
+ (Bikas Saha, Chuan Liu, Giridharan Kesavan, Ivan Mitic, and Steve Maine
+ ported by Chris Nauroth via suresh)
+
+ HADOOP-8946. winutils: compile codebase during Maven build on
+ branch-trunk-win. (Chris Nauroth via suresh)
+
+ HADOOP-8947. Merge FileUtil and Shell changes from branch-1-win to
+ branch-trunk-win to enable initial test pass. (Raja Aluri, Davio Lao,
+ Sumadhur Reddy Bolli, Ahmed El Baz, Kanna Karanam, Chuan Liu,
+ Ivan Mitic, Chris Nauroth, and Bikas Saha via suresh)
+
+ HADOOP-8954. "stat" executable not found on Windows. (Bikas Saha, Ivan Mitic
+ ported by Chris Narouth via suresh)
+
+ HADOOP-8959. TestUserGroupInformation fails on Windows due to "id" executable
+ not found. (Bikas Saha, Ivan Mitic, ported by Chris Narouth via suresh)
+
+ HADOOP-8955. "chmod" executable not found on Windows.
+ (Chris Nauroth via suresh)
+
+ HADOOP-8960. TestMetricsServlet fails on Windows. (Ivan Mitic via suresh)
+
+ HADOOP-8961. GenericOptionsParser URI parsing failure on Windows.
+ (Ivan Mitic via suresh)
+
+ HADOOP-8949. Remove FileUtil.CygPathCommand dead code. (Chris Nauroth via
+ suresh)
+
+ HADOOP-8956. FileSystem.primitiveMkdir failures on Windows cause multiple
+ test suites to fail. (Chris Nauroth via suresh)
+
+ HADOOP-8978. TestTrash fails on Windows. (Chris Nauroth via suresh)
+
+ HADOOP-8979. TestHttpServer fails on Windows. (Chris Nauroth via suresh)
+
+ HADOOP-8953. Shell PathData parsing failures on Windows. (Arpit Agarwal via
+ suresh)
+
+ HADOOP-8975. TestFileContextResolveAfs fails on Windows. (Chris Nauroth via
+ suresh)
+
+ HADOOP-8977. Multiple FsShell test failures on Windows. (Chris Nauroth via
+ suresh)
+
+ HADOOP-9005. Merge hadoop cmd line scripts from branch-1-win. (David Lao,
+ Bikas Saha, Lauren Yang, Chuan Liu, Thejas M Nair and Ivan Mitic via suresh)
+
+ HADOOP-9008. Building hadoop tarball fails on Windows. (Chris Nauroth via
+ suresh)
+
+ HADOOP-9011. saveVersion.py does not include branch in version annotation.
+ (Chris Nauroth via suresh)
+
+ HADOOP-9110. winutils ls off-by-one error indexing MONTHS array can cause
+ access violation. (Chris Nauroth via suresh)
+
+ HADOOP-9056. Build native library on Windows. (Chuan Liu, Arpit Agarwal via
+ suresh)
+
+ HADOOP-9144. Fix findbugs warnings. (Chris Nauroth via suresh)
+
+ HADOOP-9081. Add TestWinUtils. (Chuan Liu, Ivan Mitic, Chris Nauroth,
+ and Bikas Saha via suresh)
+
+ HADOOP-9146. Fix sticky bit regression on branch-trunk-win.
+ (Chris Nauroth via suresh)
+
+ HADOOP-9266. Fix javac, findbugs, and release audit warnings on
+ branch-trunk-win. (Chris Nauroth via suresh)
+
+ HADOOP-9270. Remove a stale java comment from FileUtil. (Chris Nauroth via
+ szetszwo)
+
+ HADOOP-9271. Revert Python build scripts from branch-trunk-win.
+ (Chris Nauroth via suresh)
+
+ HADOOP-9313. Remove spurious mkdir from hadoop-config.cmd.
+ (Ivan Mitic via suresh)
+
+ HADOOP-9309. Test failures on Windows due to UnsatisfiedLinkError
+ in NativeCodeLoader#buildSupportsSnappy. (Arpit Agarwal via suresh)
+
+ HADOOP-9347. Add instructions to BUILDING.txt describing how to
+ build on Windows. (Chris Nauroth via suresh)
+
+ HADOOP-9348. Address TODO in winutils to add more command line usage
+ and examples. (Chris Nauroth via suresh)
+
+ HADOOP-9354. Windows native project files missing license headers.
+ (Chris Nauroth via suresh)
+
+ HADOOP-9356. Remove remaining references to cygwin/cygpath from scripts.
+ (Chris Nauroth via suresh)
+
+ HADOOP-9232. JniBasedUnixGroupsMappingWithFallback fails on Windows
+ with UnsatisfiedLinkError. (Ivan Mitic via suresh)
+
+ HADOOP-9368. Add timeouts to new tests in branch-trunk-win.
+ (Arpit Agarwal via suresh)
+
+Patch equivalent to trunk committed to branch-trunk-win
+
+ HADOOP-8924. Add maven plugin alternative to shell script to save
+ package-info.java. (Chris Nauroth via suresh)
+
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3f9b47b331..c42dc76bdc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -671,15 +671,15 @@ Release 2.0.3-alpha - 2013-02-06
HADOOP-9124. SortedMapWritable violates contract of Map interface for
equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
+ HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
+ HarFileSystem. (Chris Nauroth via szetszwo)
+
HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
the synchronization of limitDecimalTo2(double) can be avoided. (szetszwo)
HADOOP-9260. Hadoop version may be not correct when starting name node or
data node. (Chris Nauroth via jlowe)
- HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
- HarFileSystem. (Chris Nauroth via szetszwo)
-
HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
suresh)
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index ff2760b688..1b51f8a825 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -439,6 +439,7 @@
CHANGES.txt
+ CHANGES.branch-trunk-win.txt.idea/**src/main/conf/*src/main/docs/**
@@ -471,6 +472,28 @@
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+ enforce-os
+
+ enforce
+
+
+
+
+ mac
+ unix
+ native build only supported on Mac or Unix
+
+
+ true
+
+
+
+ org.codehaus.mojonative-maven-plugin
@@ -541,6 +564,104 @@
+
+ native-win
+
+ false
+
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+ enforce-os
+
+ enforce
+
+
+
+
+ windows
+ native-win build only supported on Windows
+
+
+ true
+
+
+
+
+
+ org.codehaus.mojo
+ native-maven-plugin
+
+
+ compile
+
+ javah
+
+
+ ${env.JAVA_HOME}/bin/javah
+
+ org.apache.hadoop.io.compress.zlib.ZlibCompressor
+ org.apache.hadoop.io.compress.zlib.ZlibDecompressor
+ org.apache.hadoop.security.JniBasedUnixGroupsMapping
+ org.apache.hadoop.io.nativeio.NativeIO
+ org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping
+ org.apache.hadoop.io.compress.snappy.SnappyCompressor
+ org.apache.hadoop.io.compress.snappy.SnappyDecompressor
+ org.apache.hadoop.io.compress.lz4.Lz4Compressor
+ org.apache.hadoop.io.compress.lz4.Lz4Decompressor
+ org.apache.hadoop.util.NativeCrc32
+
+ ${project.build.directory}/native/javah
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+
+
+ compile-ms-winutils
+ compile
+
+ exec
+
+
+ msbuild
+
+ ${basedir}/src/main/winutils/winutils.sln
+ /nologo
+ /p:Configuration=Release
+ /p:OutDir=${project.build.directory}/bin/
+
+
+
+
+ compile-ms-native-dll
+ compile
+
+ exec
+
+
+ msbuild
+
+ ${basedir}/src/main/native/native.sln
+ /nologo
+ /p:Configuration=Release
+ /p:OutDir=${project.build.directory}/bin/
+
+
+
+
+
+
+
+
+
startKdc
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index e87df9162e..27b41f1684 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -91,9 +91,6 @@ case $COMMAND in
;;
classpath)
- if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
- fi
echo $CLASSPATH
exit
;;
@@ -132,9 +129,6 @@ case $COMMAND in
#make sure security appender is turned off
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
- if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
- fi
export CLASSPATH=$CLASSPATH
exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
;;
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
new file mode 100644
index 0000000000..bcb72a2a19
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
@@ -0,0 +1,292 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem included in all the hadoop scripts with source command
+@rem should not be executable directly
+@rem also should not be passed any arguments, since we need original %*
+
+if not defined HADOOP_COMMON_DIR (
+ set HADOOP_COMMON_DIR=share\hadoop\common
+)
+if not defined HADOOP_COMMON_LIB_JARS_DIR (
+ set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib
+)
+if not defined HADOOP_COMMON_LIB_NATIVE_DIR (
+ set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native
+)
+if not defined HDFS_DIR (
+ set HDFS_DIR=share\hadoop\hdfs
+)
+if not defined HDFS_LIB_JARS_DIR (
+ set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib
+)
+if not defined YARN_DIR (
+ set YARN_DIR=share\hadoop\yarn
+)
+if not defined YARN_LIB_JARS_DIR (
+ set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib
+)
+if not defined MAPRED_DIR (
+ set MAPRED_DIR=share\hadoop\mapreduce
+)
+if not defined MAPRED_LIB_JARS_DIR (
+ set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib
+)
+
+@rem the root of the Hadoop installation
+set HADOOP_HOME=%~dp0
+for %%i in (%HADOOP_HOME%.) do (
+ set HADOOP_HOME=%%~dpi
+)
+if "%HADOOP_HOME:~-1%" == "\" (
+ set HADOOP_HOME=%HADOOP_HOME:~0,-1%
+)
+
+if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar (
+ @echo +================================================================+
+ @echo ^| Error: HADOOP_HOME is not set correctly ^|
+ @echo +----------------------------------------------------------------+
+ @echo ^| Please set your HADOOP_HOME variable to the absolute path of ^|
+ @echo ^| the directory that contains the hadoop distribution ^|
+ @echo +================================================================+
+ exit /b 1
+)
+
+set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop
+
+@rem
+@rem Allow alternate conf dir location.
+@rem
+
+if "%1" == "--config" (
+ set HADOOP_CONF_DIR=%2
+ shift
+ shift
+)
+
+@rem
+@rem check to see it is specified whether to use the slaves or the
+@rem masters file
+@rem
+
+if "%1" == "--hosts" (
+ set HADOOP_SLAVES=%HADOOP_CONF_DIR%\%2
+ shift
+ shift
+)
+
+if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
+ call %HADOOP_CONF_DIR%\hadoop-env.cmd
+)
+
+@rem
+@rem setup java environment variables
+@rem
+
+if not defined JAVA_HOME (
+ echo Error: JAVA_HOME is not set.
+ goto :eof
+)
+
+if not exist %JAVA_HOME%\bin\java.exe (
+ echo Error: JAVA_HOME is incorrectly set.
+ echo Please update %HADOOP_HOME%\conf\hadoop-env.cmd
+ goto :eof
+)
+
+set JAVA=%JAVA_HOME%\bin\java
+@rem some Java parameters
+set JAVA_HEAP_MAX=-Xmx1000m
+
+@rem
+@rem check envvars which might override default args
+@rem
+
+if defined HADOOP_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m
+)
+
+@rem
+@rem CLASSPATH initially contains %HADOOP_CONF_DIR%
+@rem
+
+set CLASSPATH=%HADOOP_CONF_DIR%
+
+if not defined HADOOP_COMMON_HOME (
+ if exist %HADOOP_HOME%\share\hadoop\common (
+ set HADOOP_COMMON_HOME=%HADOOP_HOME%
+ )
+)
+
+@rem
+@rem for releases, add core hadoop jar & webapps to CLASSPATH
+@rem
+
+if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%
+)
+
+if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\*
+)
+
+set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\*
+
+@rem
+@rem add user-specified CLASSPATH last
+@rem
+
+if defined HADOOP_CLASSPATH (
+ if defined HADOOP_USER_CLASSPATH_FIRST (
+ set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+ )
+)
+
+@rem
+@rem default log directory % file
+@rem
+
+if not defined HADOOP_LOG_DIR (
+ set HADOOP_LOG_DIR=%HADOOP_HOME%\logs
+)
+
+if not defined HADOOP_LOGFILE (
+ set HADOOP_LOGFILE=hadoop.log
+)
+
+if not defined HADOOP_ROOT_LOGGER (
+ set HADOOP_ROOT_LOGGER=INFO,console
+)
+
+@rem
+@rem default policy file for service-level authorization
+@rem
+
+if not defined HADOOP_POLICYFILE (
+ set HADOOP_POLICYFILE=hadoop-policy.xml
+)
+
+@rem
+@rem Determine the JAVA_PLATFORM
+@rem
+
+for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A
+@rem replace space with underscore
+set JAVA_PLATFORM=%JAVA_PLATFORM: =_%
+
+@rem
+@rem setup 'java.library.path' for native hadoop code if necessary
+@rem
+
+@rem Check if we're running hadoop directly from the build
+set JAVA_LIBRARY_PATH=
+if exist %HADOOP_CORE_HOME%\target\bin (
+ set JAVA_LIBRARY_PATH=%HADOOP_CORE_HOME%\target\bin
+)
+
+@rem For the distro case, check the bin folder
+if exist %HADOOP_CORE_HOME%\bin (
+ set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_CORE_HOME%\bin
+)
+
+@rem
+@rem setup a default TOOL_PATH
+@rem
+set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\*
+
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR%
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE%
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME%
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING%
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER%
+
+if defined JAVA_LIBRARY_PATH (
+ set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE%
+
+@rem
+@rem Disable ipv6 as it can cause issues
+@rem
+
+set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem
+@rem put hdfs in classpath if present
+@rem
+
+if not defined HADOOP_HDFS_HOME (
+ if exist %HADOOP_HOME%\%HDFS_DIR% (
+ set HADOOP_HDFS_HOME=%HADOOP_HOME%
+ )
+)
+
+if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%
+)
+
+if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\*
+)
+
+set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\*
+
+@rem
+@rem put yarn in classpath if present
+@rem
+
+if not defined HADOOP_YARN_HOME (
+ if exist %HADOOP_HOME%\%YARN_DIR% (
+ set HADOOP_YARN_HOME=%HADOOP_HOME%
+ )
+)
+
+if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%
+)
+
+if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
+)
+
+set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+
+@rem
+@rem put mapred in classpath if present AND different from YARN
+@rem
+
+if not defined HADOOP_MAPRED_HOME (
+ if exist %HADOOP_HOME%\%MAPRED_DIR% (
+ set HADOOP_MAPRED_HOME=%HADOOP_HOME%
+ )
+)
+
+if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
+
+ if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%
+ )
+
+ if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% (
+ set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\*
+ )
+
+ set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\*
+)
+
+:eof
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index 4f83ffd8a5..117b996b9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -112,12 +112,6 @@ if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
exit 1
fi
-cygwin=false
-case "`uname`" in
-CYGWIN*) cygwin=true;;
-esac
-
-
# check if net.ipv6.bindv6only is set to 1
bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
if [ -n "$bindv6only" ] && [ "$bindv6only" -eq "1" ] && [ "$HADOOP_ALLOW_IPV6" != "yes" ]
@@ -209,13 +203,6 @@ fi
# restore ordinary behaviour
unset IFS
-# cygwin path translation
-if $cygwin; then
- HADOOP_PREFIX=`cygpath -w "$HADOOP_PREFIX"`
- HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
- JAVA_LIBRARY_PATH=`cygpath -w "$JAVA_LIBRARY_PATH"`
-fi
-
# setup 'java.library.path' for native-hadoop code if necessary
if [ -d "${HADOOP_PREFIX}/build/native" -o -d "${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR" ]; then
@@ -232,11 +219,6 @@ fi
# setup a default TOOL_PATH
TOOL_PATH="${TOOL_PATH:-$HADOOP_PREFIX/share/hadoop/tools/lib/*}"
-# cygwin path translation
-if $cygwin; then
- JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
-fi
-
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_PREFIX"
@@ -303,15 +285,3 @@ if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$HADOOP_YARN_HOME/$YARN_DIR" ] ; then
CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR'/*'
fi
-
-# cygwin path translation
-if $cygwin; then
- HADOOP_HDFS_HOME=`cygpath -w "$HADOOP_HDFS_HOME"`
-fi
-
-# cygwin path translation
-if $cygwin; then
- TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
-fi
-
-
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
new file mode 100644
index 0000000000..90699b1601
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
@@ -0,0 +1,235 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+
+@rem This script runs the hadoop core commands.
+
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem HADOOP_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem HADOOP_USER_CLASSPATH_FIRST When defined, the HADOOP_CLASSPATH is
+@rem added in the beginning of the global
+@rem classpath. Can be defined, for example,
+@rem by doing
+@rem export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem HADOOP_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem HADOOP_OPTS Extra Java runtime options.
+@rem
+@rem HADOOP_CLIENT_OPTS when the respective command is run.
+@rem HADOOP_{COMMAND}_OPTS etc HADOOP_JT_OPTS applies to JobTracker
+@rem for e.g. HADOOP_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+@rem
+@rem HADOOP_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+call :updatepath %HADOOP_BIN_PATH%
+
+:main
+ setlocal enabledelayedexpansion
+
+ set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+ if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+ )
+
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+
+ set hadoop-command=%1
+ if not defined hadoop-command (
+ goto print_usage
+ )
+
+ call :make_command_arguments %*
+
+ set hdfscommands=namenode secondarynamenode datanode dfs dfsadmin fsck balancer fetchdt oiv dfsgroups
+ for %%i in ( %hdfscommands% ) do (
+ if %hadoop-command% == %%i set hdfscommand=true
+ )
+ if defined hdfscommand (
+ @echo DEPRECATED: Use of this script to execute hdfs command is deprecated. 1>&2
+ @echo Instead use the hdfs command for it. 1>&2
+ if exist %HADOOP_HDFS_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HDFS_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\hdfs.cmd (
+ call %HADOOP_HOME%\bin\hdfs.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_HDFS_HOME not found!
+ goto :eof
+ )
+ )
+
+ set mapredcommands=pipes job queue mrgroups mradmin jobtracker tasktracker
+ for %%i in ( %mapredcommands% ) do (
+ if %hadoop-command% == %%i set mapredcommand=true
+ )
+ if defined mapredcommand (
+ @echo DEPRECATED: Use of this script to execute mapred command is deprecated. 1>&2
+ @echo Instead use the mapred command for it. 1>&2
+ if exist %HADOOP_MAPRED_HOME%\bin\mapred.cmd (
+ call %HADOOP_MAPRED_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else if exist %HADOOP_HOME%\bin\mapred.cmd (
+ call %HADOOP_HOME%\bin\mapred.cmd %*
+ goto :eof
+ ) else (
+ echo HADOOP_MAPRED_HOME not found!
+ goto :eof
+ )
+ )
+
+ if %hadoop-command% == classpath (
+ @echo %CLASSPATH%
+ goto :eof
+ )
+
+ set corecommands=fs version jar distcp daemonlog archive
+ for %%i in ( %corecommands% ) do (
+ if %hadoop-command% == %%i set corecommand=true
+ )
+ if defined corecommand (
+ call :%hadoop-command%
+ ) else (
+ set CLASSPATH=%CLASSPATH%;%CD%
+ set CLASS=%hadoop-command%
+ )
+
+ set path=%PATH%;%HADOOP_BIN_PATH%
+
+ @rem Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+
+ @rem make sure security appender is turned off
+ if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,NullAppender
+ )
+ set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER%
+
+ call %JAVA% %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hadoop-command-arguments%
+
+ goto :eof
+
+:fs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ goto :eof
+
+:jar
+ set CLASS=org.apache.hadoop.util.RunJar
+ goto :eof
+
+:distcp
+ set CLASS=org.apache.hadoop.tools.DistCp
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ goto :eof
+
+:archive
+ set CLASS=org.apache.hadoop.tools.HadoopArchives
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ goto :eof
+
+:updatepath
+ set path_to_add=%*
+ set current_path_comparable=%path%
+ set current_path_comparable=%current_path_comparable: =_%
+ set current_path_comparable=%current_path_comparable:(=_%
+ set current_path_comparable=%current_path_comparable:)=_%
+ set path_to_add_comparable=%path_to_add%
+ set path_to_add_comparable=%path_to_add_comparable: =_%
+ set path_to_add_comparable=%path_to_add_comparable:(=_%
+ set path_to_add_comparable=%path_to_add_comparable:)=_%
+
+ for %%i in ( %current_path_comparable% ) do (
+ if /i "%%i" == "%path_to_add_comparable%" (
+ set path_to_add_exist=true
+ )
+ )
+ set system_path_comparable=
+ set path_to_add_comparable=
+ if not defined path_to_add_exist path=%path_to_add%;%path%
+ set path_to_add=
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _arguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _arguments (
+ set _arguments=%1
+ ) else (
+ set _arguments=!_arguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hadoop-command-arguments=%_arguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hadoop [--config confdir] COMMAND
+ @echo where COMMAND is one of:
+ @echo fs run a generic filesystem user client
+ @echo version print the version
+ @echo jar ^ run a jar file
+ @echo distcp ^ ^ copy file or directories recursively
+ @echo archive -archiveName NAME -p ^ ^* ^ create a hadoop archive
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/rcc b/hadoop-common-project/hadoop-common/src/main/bin/rcc
index 5f75b7c950..22bffffbf2 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/rcc
+++ b/hadoop-common-project/hadoop-common/src/main/bin/rcc
@@ -57,10 +57,5 @@ unset IFS
CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
-# cygwin path translation
-if expr `uname` : 'CYGWIN*' > /dev/null; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-fi
-
# run it
exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd b/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
new file mode 100644
index 0000000000..9f65b5dd1f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/bin/start-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Start all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use start-dfs.cmd and start-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\start-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem start yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\start-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\start-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/stop-all.cmd b/hadoop-common-project/hadoop-common/src/main/bin/stop-all.cmd
new file mode 100644
index 0000000000..1d22c79450
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/bin/stop-all.cmd
@@ -0,0 +1,52 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+setlocal enabledelayedexpansion
+
+@rem Stop all hadoop daemons. Run this on master node.
+
+echo This script is Deprecated. Instead use stop-dfs.cmd and stop-yarn.cmd
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop hdfs daemons if hdfs is present
+if exist %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd (
+ call %HADOOP_HDFS_HOME%\sbin\stop-dfs.cmd --config %HADOOP_CONF_DIR%
+)
+
+@rem stop yarn daemons if yarn is present
+if exist %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd (
+ call %HADOOP_YARN_HOME%\sbin\stop-yarn.cmd --config %HADOOP_CONF_DIR%
+)
+
+endlocal
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.cmd b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.cmd
new file mode 100644
index 0000000000..6e34fe9198
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.cmd
@@ -0,0 +1,81 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME. All others are
+@rem optional. When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use. Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+ if not defined HADOOP_CLASSPATH (
+ set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ ) else (
+ set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+ )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options. Empty by default.
+@rem set HADOOP_OPTS=-Djava.net.preferIPv4Stack=true %HADOOP_CLIENT_OPTS%
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+ set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+ set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx128m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored. %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by
+@rem the user that will run the hadoop daemons. Otherwise there is the
+@rem potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%
diff --git a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/site.xml b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/site.xml
index 9d8e4941bf..f9ce3becf0 100644
--- a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/site.xml
+++ b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/site.xml
@@ -87,7 +87,6 @@ See http://forrest.apache.org/docs/linking.html for more info.
-
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
index de1548e5ea..d2b54a888e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
@@ -35,7 +35,7 @@
/** Filesystem disk space usage statistics.
* Uses the unix 'df' program to get mount points, and java.io.File for
- * space utilization. Tested on Linux, FreeBSD, Cygwin. */
+ * space utilization. Tested on Linux, FreeBSD, Windows. */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DF extends Shell {
@@ -163,11 +163,24 @@ public String toString() {
mount;
}
+ @Override
+ protected void run() throws IOException {
+ if (WINDOWS) {
+ try {
+ this.mount = dirFile.getCanonicalPath().substring(0,2);
+ } catch (IOException e) {
+ }
+ return;
+ }
+ super.run();
+ }
+
@Override
protected String[] getExecString() {
// ignoring the error since the exit code it enough
- return new String[] {"bash","-c","exec 'df' '-k' '-P' '" + dirPath
- + "' 2>/dev/null"};
+ return (WINDOWS)? new String[]{"cmd", "/c", "df -k " + dirPath + " 2>nul"}:
+ new String[] {"bash","-c","exec 'df' '-k' '-P' '" + dirPath
+ + "' 2>/dev/null"};
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
index 9a9f1e3efc..db90cfa7aa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
@@ -145,6 +145,20 @@ public long getUsed() throws IOException {
public String getDirPath() {
return dirPath;
}
+
+
+ /**
+ * Override to hook in DUHelper class. Maybe this can be used more
+ * generally as well on Unix/Linux based systems
+ */
+ @Override
+ protected void run() throws IOException {
+ if (WINDOWS) {
+ used.set(DUHelper.getFolderUsage(dirPath));
+ return;
+ }
+ super.run();
+ }
/**
* Start the disk usage checking thread.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DUHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DUHelper.java
new file mode 100644
index 0000000000..ddecb456fa
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DUHelper.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.File;
+import org.apache.hadoop.util.Shell;
+
+public class DUHelper {
+
+ private int folderCount=0;
+ private int fileCount=0;
+ private double usage = 0;
+ private long folderSize = -1;
+
+ private DUHelper() {
+
+ }
+
+ public static long getFolderUsage(String folder) {
+ return new DUHelper().calculateFolderSize(folder);
+ }
+
+ private long calculateFolderSize(String folder) {
+ if (folder == null)
+ throw new IllegalArgumentException("folder");
+ File f = new File(folder);
+ return folderSize = getFileSize(f);
+ }
+
+ public String check(String folder) {
+ if (folder == null)
+ throw new IllegalArgumentException("folder");
+ File f = new File(folder);
+
+ folderSize = getFileSize(f);
+ usage = 1.0*(f.getTotalSpace() - f.getFreeSpace())/ f.getTotalSpace();
+ return String.format("used %d files %d disk in use %f", folderSize, fileCount, usage);
+ }
+
+ public long getFileCount() {
+ return fileCount;
+ }
+
+ public double getUsage() {
+ return usage;
+ }
+
+ private long getFileSize(File folder) {
+
+ folderCount++;
+ //Counting the total folders
+ long foldersize = 0;
+ if (folder.isFile())
+ return folder.length();
+ File[] filelist = folder.listFiles();
+ if (filelist == null) {
+ return 0;
+ }
+ for (int i = 0; i < filelist.length; i++) {
+ if (filelist[i].isDirectory()) {
+ foldersize += getFileSize(filelist[i]);
+ } else {
+ fileCount++; //Counting the total files
+ foldersize += filelist[i].length();
+ }
+ }
+ return foldersize;
+ }
+
+ public static void main(String[] args) {
+ if (Shell.WINDOWS)
+ System.out.println("Windows: "+ DUHelper.getFolderUsage(args[0]));
+ else
+ System.out.println("Other: " + DUHelper.getFolderUsage(args[0]));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 19c19cd2b6..2cc834852e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -19,18 +19,29 @@
package org.apache.hadoop.fs;
import java.io.*;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import java.util.jar.Attributes;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
+import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@@ -46,6 +57,13 @@ public class FileUtil {
private static final Log LOG = LogFactory.getLog(FileUtil.class);
+ /* The error code is defined in winutils to indicate insufficient
+ * privilege to create symbolic links. This value need to keep in
+ * sync with the constant of the same name in:
+ * "src\winutils\common.h"
+ * */
+ public static final int SYMLINK_NO_PRIVILEGE = 2;
+
/**
* convert an array of FileStatus to an array of Path
*
@@ -469,34 +487,6 @@ private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
return dst;
}
- /**
- * This class is only used on windows to invoke the cygpath command.
- */
- private static class CygPathCommand extends Shell {
- String[] command;
- String result;
- CygPathCommand(String path) throws IOException {
- command = new String[]{"cygpath", "-u", path};
- run();
- }
- String getResult() throws IOException {
- return result;
- }
- @Override
- protected String[] getExecString() {
- return command;
- }
- @Override
- protected void parseExecResult(BufferedReader lines) throws IOException {
- String line = lines.readLine();
- if (line == null) {
- throw new IOException("Can't convert '" + command[2] +
- " to a cygwin path");
- }
- result = line;
- }
- }
-
/**
* Convert a os-native filename to a path that works for the shell.
* @param filename The filename to convert
@@ -504,11 +494,7 @@ protected void parseExecResult(BufferedReader lines) throws IOException {
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(String filename) throws IOException {
- if (Path.WINDOWS) {
- return new CygPathCommand(filename).getResult();
- } else {
- return filename;
- }
+ return filename;
}
/**
@@ -658,7 +644,7 @@ private static void unTarUsingTar(File inFile, File untarDir,
untarCommand.append(FileUtil.makeShellPath(untarDir));
untarCommand.append("' ; ");
untarCommand.append("tar -xf ");
-
+
if (gzipped) {
untarCommand.append(" -)");
} else {
@@ -731,7 +717,7 @@ private static void unpackEntries(TarArchiveInputStream tis,
/**
* Class for creating hardlinks.
- * Supports Unix, Cygwin, WindXP.
+ * Supports Unix, WindXP.
* @deprecated Use {@link org.apache.hadoop.fs.HardLink}
*/
@Deprecated
@@ -743,21 +729,67 @@ public static class HardLink extends org.apache.hadoop.fs.HardLink {
/**
* Create a soft link between a src and destination
- * only on a local disk. HDFS does not support this
+ * only on a local disk. HDFS does not support this.
+ * On Windows, when symlink creation fails due to security
+ * setting, we will log a warning. The return code in this
+ * case is 2.
* @param target the target for symlink
* @param linkname the symlink
* @return value returned by the command
*/
public static int symLink(String target, String linkname) throws IOException{
- String cmd = "ln -s " + target + " " + linkname;
- Process p = Runtime.getRuntime().exec(cmd, null);
- int returnVal = -1;
- try{
- returnVal = p.waitFor();
- } catch(InterruptedException e){
- //do nothing as of yet
+ // Run the input paths through Java's File so that they are converted to the
+ // native OS form
+ File targetFile = new File(target);
+ File linkFile = new File(linkname);
+
+ // If not on Java7+, copy a file instead of creating a symlink since
+ // Java6 has close to no support for symlinks on Windows. Specifically
+ // File#length and File#renameTo do not work as expected.
+ // (see HADOOP-9061 for additional details)
+ // We still create symlinks for directories, since the scenario in this
+ // case is different. The directory content could change in which
+ // case the symlink loses its purpose (for example task attempt log folder
+ // is symlinked under userlogs and userlogs are generated afterwards).
+ if (Shell.WINDOWS && !Shell.isJava7OrAbove() && targetFile.isFile()) {
+ try {
+ LOG.info("FileUtil#symlink: On Java6, copying file instead "
+ + linkname + " -> " + target);
+ org.apache.commons.io.FileUtils.copyFile(targetFile, linkFile);
+ } catch (IOException ex) {
+ LOG.warn("FileUtil#symlink failed to copy the file with error: "
+ + ex.getMessage());
+ // Exit with non-zero exit code
+ return 1;
+ }
+ return 0;
}
- return returnVal;
+
+ String[] cmd = Shell.getSymlinkCommand(targetFile.getPath(),
+ linkFile.getPath());
+ ShellCommandExecutor shExec = new ShellCommandExecutor(cmd);
+ try {
+ shExec.execute();
+ } catch (Shell.ExitCodeException ec) {
+ int returnVal = ec.getExitCode();
+ if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
+ LOG.warn("Fail to create symbolic links on Windows. "
+ + "The default security settings in Windows disallow non-elevated "
+ + "administrators and all non-administrators from creating symbolic links. "
+ + "This behavior can be changed in the Local Security Policy management console");
+ } else if (returnVal != 0) {
+ LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
+ + returnVal + " with: " + ec.getMessage());
+ }
+ return returnVal;
+ } catch (IOException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Error while create symlink " + linkname + " to " + target
+ + "." + " Exception: " + StringUtils.stringifyException(e));
+ }
+ throw e;
+ }
+ return shExec.getExitCode();
}
/**
@@ -781,30 +813,120 @@ public static int chmod(String filename, String perm
* @param recursive true, if permissions should be changed recursively
* @return the exit code from the command.
* @throws IOException
- * @throws InterruptedException
*/
public static int chmod(String filename, String perm, boolean recursive)
- throws IOException, InterruptedException {
- StringBuilder cmdBuf = new StringBuilder();
- cmdBuf.append("chmod ");
- if (recursive) {
- cmdBuf.append("-R ");
- }
- cmdBuf.append(perm).append(" ");
- cmdBuf.append(filename);
- String[] shellCmd = {"bash", "-c" ,cmdBuf.toString()};
- ShellCommandExecutor shExec = new ShellCommandExecutor(shellCmd);
+ throws IOException {
+ String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
+ String[] args = new String[cmd.length + 1];
+ System.arraycopy(cmd, 0, args, 0, cmd.length);
+ args[cmd.length] = new File(filename).getPath();
+ ShellCommandExecutor shExec = new ShellCommandExecutor(args);
try {
shExec.execute();
- }catch(Exception e) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Error while changing permission : " + filename
- + " Exception: ", e);
+ }catch(IOException e) {
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Error while changing permission : " + filename
+ +" Exception: " + StringUtils.stringifyException(e));
}
}
return shExec.getExitCode();
}
+
+ /**
+ * Set the ownership on a file / directory. User name and group name
+ * cannot both be null.
+ * @param file the file to change
+ * @param username the new user owner name
+ * @param groupname the new group owner name
+ * @throws IOException
+ */
+ public static void setOwner(File file, String username,
+ String groupname) throws IOException {
+ if (username == null && groupname == null) {
+ throw new IOException("username == null && groupname == null");
+ }
+ String arg = (username == null ? "" : username)
+ + (groupname == null ? "" : ":" + groupname);
+ String [] cmd = Shell.getSetOwnerCommand(arg);
+ execCommand(file, cmd);
+ }
+
+ /**
+ * Set permissions to the required value. Uses the java primitives instead
+ * of forking if group == other.
+ * @param f the file to change
+ * @param permission the new permissions
+ * @throws IOException
+ */
+ public static void setPermission(File f, FsPermission permission
+ ) throws IOException {
+ FsAction user = permission.getUserAction();
+ FsAction group = permission.getGroupAction();
+ FsAction other = permission.getOtherAction();
+
+ // use the native/fork if the group/other permissions are different
+ // or if the native is available or on Windows
+ if (group != other || NativeIO.isAvailable() || Shell.WINDOWS) {
+ execSetPermission(f, permission);
+ return;
+ }
+
+ boolean rv = true;
+
+ // read perms
+ rv = f.setReadable(group.implies(FsAction.READ), false);
+ checkReturnValue(rv, f, permission);
+ if (group.implies(FsAction.READ) != user.implies(FsAction.READ)) {
+ rv = f.setReadable(user.implies(FsAction.READ), true);
+ checkReturnValue(rv, f, permission);
+ }
+
+ // write perms
+ rv = f.setWritable(group.implies(FsAction.WRITE), false);
+ checkReturnValue(rv, f, permission);
+ if (group.implies(FsAction.WRITE) != user.implies(FsAction.WRITE)) {
+ rv = f.setWritable(user.implies(FsAction.WRITE), true);
+ checkReturnValue(rv, f, permission);
+ }
+
+ // exec perms
+ rv = f.setExecutable(group.implies(FsAction.EXECUTE), false);
+ checkReturnValue(rv, f, permission);
+ if (group.implies(FsAction.EXECUTE) != user.implies(FsAction.EXECUTE)) {
+ rv = f.setExecutable(user.implies(FsAction.EXECUTE), true);
+ checkReturnValue(rv, f, permission);
+ }
+ }
+
+ private static void checkReturnValue(boolean rv, File p,
+ FsPermission permission
+ ) throws IOException {
+ if (!rv) {
+ throw new IOException("Failed to set permissions of path: " + p +
+ " to " +
+ String.format("%04o", permission.toShort()));
+ }
+ }
+ private static void execSetPermission(File f,
+ FsPermission permission
+ ) throws IOException {
+ if (NativeIO.isAvailable()) {
+ NativeIO.POSIX.chmod(f.getCanonicalPath(), permission.toShort());
+ } else {
+ execCommand(f, Shell.getSetPermissionCommand(
+ String.format("%04o", permission.toShort()), false));
+ }
+ }
+
+ static String execCommand(File f, String... cmd) throws IOException {
+ String[] args = new String[cmd.length + 1];
+ System.arraycopy(cmd, 0, args, 0, cmd.length);
+ args[cmd.length] = f.getCanonicalPath();
+ String output = Shell.execCommand(args);
+ return output;
+ }
+
/**
* Create a tmp file for a base file.
* @param basefile the base file of the tmp
@@ -892,4 +1014,97 @@ public static String[] list(File dir) throws IOException {
}
return fileNames;
}
+
+ /**
+ * Create a jar file at the given path, containing a manifest with a classpath
+ * that references all specified entries.
+ *
+ * Some platforms may have an upper limit on command line length. For example,
+ * the maximum command line length on Windows is 8191 characters, but the
+ * length of the classpath may exceed this. To work around this limitation,
+ * use this method to create a small intermediate jar with a manifest that
+ * contains the full classpath. It returns the absolute path to the new jar,
+ * which the caller may set as the classpath for a new process.
+ *
+ * Environment variable evaluation is not supported within a jar manifest, so
+ * this method expands environment variables before inserting classpath entries
+ * to the manifest. The method parses environment variables according to
+ * platform-specific syntax (%VAR% on Windows, or $VAR otherwise). On Windows,
+ * environment variables are case-insensitive. For example, %VAR% and %var%
+ * evaluate to the same value.
+ *
+ * Specifying the classpath in a jar manifest does not support wildcards, so
+ * this method expands wildcards internally. Any classpath entry that ends
+ * with * is translated to all files at that path with extension .jar or .JAR.
+ *
+ * @param inputClassPath String input classpath to bundle into the jar manifest
+ * @param pwd Path to working directory to save jar
+ * @return String absolute path to new jar
+ * @throws IOException if there is an I/O error while writing the jar file
+ */
+ public static String createJarWithClassPath(String inputClassPath, Path pwd)
+ throws IOException {
+ // Replace environment variables, case-insensitive on Windows
+ @SuppressWarnings("unchecked")
+ Map env = Shell.WINDOWS ?
+ new CaseInsensitiveMap(System.getenv()) : System.getenv();
+ String[] classPathEntries = inputClassPath.split(File.pathSeparator);
+ for (int i = 0; i < classPathEntries.length; ++i) {
+ classPathEntries[i] = StringUtils.replaceTokens(classPathEntries[i],
+ StringUtils.ENV_VAR_PATTERN, env);
+ }
+ File workingDir = new File(pwd.toString());
+ if (!workingDir.mkdirs()) {
+ // If mkdirs returns false because the working directory already exists,
+ // then this is acceptable. If it returns false due to some other I/O
+ // error, then this method will fail later with an IOException while saving
+ // the jar.
+ LOG.debug("mkdirs false for " + workingDir + ", execution will continue");
+ }
+
+ // Append all entries
+ List classPathEntryList = new ArrayList(
+ classPathEntries.length);
+ for (String classPathEntry: classPathEntries) {
+ if (classPathEntry.endsWith("*")) {
+ // Append all jars that match the wildcard
+ Path globPath = new Path(classPathEntry).suffix("{.jar,.JAR}");
+ FileStatus[] wildcardJars = FileContext.getLocalFSFileContext().util()
+ .globStatus(globPath);
+ if (wildcardJars != null) {
+ for (FileStatus wildcardJar: wildcardJars) {
+ classPathEntryList.add(wildcardJar.getPath().toUri().toURL()
+ .toExternalForm());
+ }
+ }
+ } else {
+ // Append just this jar
+ classPathEntryList.add(new File(classPathEntry).toURI().toURL()
+ .toExternalForm());
+ }
+ }
+ String jarClassPath = StringUtils.join(" ", classPathEntryList);
+
+ // Create the manifest
+ Manifest jarManifest = new Manifest();
+ jarManifest.getMainAttributes().putValue(
+ Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
+ jarManifest.getMainAttributes().putValue(
+ Attributes.Name.CLASS_PATH.toString(), jarClassPath);
+
+ // Write the manifest to output JAR file
+ File classPathJar = File.createTempFile("classpath-", ".jar", workingDir);
+ FileOutputStream fos = null;
+ BufferedOutputStream bos = null;
+ JarOutputStream jos = null;
+ try {
+ fos = new FileOutputStream(classPathJar);
+ bos = new BufferedOutputStream(fos);
+ jos = new JarOutputStream(bos, jarManifest);
+ } finally {
+ IOUtils.cleanup(LOG, jos, bos, fos);
+ }
+
+ return classPathJar.getCanonicalPath();
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
index 2ea115bbaa..5e462cdc44 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
@@ -25,9 +25,11 @@
import java.io.InputStreamReader;
import java.util.Arrays;
+import org.apache.hadoop.util.Shell;
+
/**
* Class for creating hardlinks.
- * Supports Unix/Linux, WinXP/2003/Vista via Cygwin, and Mac OS X.
+ * Supports Unix/Linux, Windows via winutils , and Mac OS X.
*
* The HardLink class was formerly a static inner class of FSUtil,
* and the methods provided were blatantly non-thread-safe.
@@ -41,7 +43,7 @@ public class HardLink {
public enum OSType {
OS_TYPE_UNIX,
- OS_TYPE_WINXP,
+ OS_TYPE_WIN,
OS_TYPE_SOLARIS,
OS_TYPE_MAC,
OS_TYPE_FREEBSD
@@ -56,7 +58,7 @@ public enum OSType {
//methods without instantiating the HardLink object
static {
osType = getOSType();
- if (osType == OSType.OS_TYPE_WINXP) {
+ if (osType == OSType.OS_TYPE_WIN) {
// Windows
getHardLinkCommand = new HardLinkCGWin();
} else {
@@ -80,14 +82,8 @@ public HardLink() {
static private OSType getOSType() {
String osName = System.getProperty("os.name");
- if (osName.contains("Windows") &&
- (osName.contains("XP")
- || osName.contains("2003")
- || osName.contains("Vista")
- || osName.contains("Windows_7")
- || osName.contains("Windows 7")
- || osName.contains("Windows7"))) {
- return OSType.OS_TYPE_WINXP;
+ if (Shell.WINDOWS) {
+ return OSType.OS_TYPE_WIN;
}
else if (osName.contains("SunOS")
|| osName.contains("Solaris")) {
@@ -258,11 +254,6 @@ int getMaxAllowedCmdArgLength() {
/**
* Implementation of HardLinkCommandGetter class for Windows
- *
- * Note that the linkCount shell command for Windows is actually
- * a Cygwin shell command, and depends on ${cygwin}/bin
- * being in the Windows PATH environment variable, so
- * stat.exe can be found.
*/
static class HardLinkCGWin extends HardLinkCommandGetter {
//The Windows command getter impl class and its member fields are
@@ -270,14 +261,16 @@ static class HardLinkCGWin extends HardLinkCommandGetter {
//unit testing (sort of) on non-Win servers
static String[] hardLinkCommand = {
- "fsutil","hardlink","create", null, null};
+ Shell.WINUTILS,"hardlink","create", null, null};
static String[] hardLinkMultPrefix = {
"cmd","/q","/c","for", "%f", "in", "("};
static String hardLinkMultDir = "\\%f";
static String[] hardLinkMultSuffix = {
- ")", "do", "fsutil", "hardlink", "create", null,
+ ")", "do", Shell.WINUTILS, "hardlink", "create", null,
"%f", "1>NUL"};
- static String[] getLinkCountCommand = {"stat","-c%h", null};
+ static String[] getLinkCountCommand = {
+ Shell.WINUTILS, "hardlink",
+ "stat", null};
//Windows guarantees only 8K - 1 bytes cmd length.
//Subtract another 64b to allow for Java 'exec' overhead
static final int maxAllowedCmdArgLength = 8*1024 - 65;
@@ -328,12 +321,6 @@ String[] linkCount(File file)
String[] buf = new String[getLinkCountCommand.length];
System.arraycopy(getLinkCountCommand, 0, buf, 0,
getLinkCountCommand.length);
- //The linkCount command is actually a Cygwin shell command,
- //not a Windows shell command, so we should use "makeShellPath()"
- //instead of "getCanonicalPath()". However, that causes another
- //shell exec to "cygpath.exe", and "stat.exe" actually can handle
- //DOS-style paths (it just prints a couple hundred bytes of warning
- //to stderr), so we use the more efficient "getCanonicalPath()".
buf[getLinkCountCommand.length - 1] = file.getCanonicalPath();
return buf;
}
@@ -354,7 +341,7 @@ int getLinkMultArgLength(File fileDir, String[] fileBaseNames, File linkDir)
//add the fixed overhead of the hardLinkMult command
//(prefix, suffix, and Dir suffix)
sum += ("cmd.exe /q /c for %f in ( ) do "
- + "fsutil hardlink create \\%f %f 1>NUL ").length();
+ + Shell.WINUTILS + " hardlink create \\%f %f 1>NUL ").length();
return sum;
}
@@ -581,14 +568,10 @@ public static int getLinkCount(File fileName) throws IOException {
/* Create an IOException for failing to get link count. */
private static IOException createIOException(File f, String message,
String error, int exitvalue, Exception cause) {
-
- final String winErrMsg = "; Windows errors in getLinkCount are often due "
- + "to Cygwin misconfiguration";
final String s = "Failed to get link count on file " + f
+ ": message=" + message
+ "; error=" + error
- + ((osType == OSType.OS_TYPE_WINXP) ? winErrMsg : "")
+ "; exit value=" + exitvalue;
return (cause == null) ? new IOException(s) : new IOException(s, cause);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 0a2dfe7d39..feef1c7bab 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.regex.Pattern;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.lang.StringUtils;
@@ -43,9 +44,17 @@ public class Path implements Comparable {
public static final String CUR_DIR = ".";
- static final boolean WINDOWS
+ public static final boolean WINDOWS
= System.getProperty("os.name").startsWith("Windows");
+ /**
+ * Pre-compiled regular expressions to detect path formats.
+ */
+ private static final Pattern hasUriScheme =
+ Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]+:");
+ private static final Pattern hasDriveLetterSpecifier =
+ Pattern.compile("^/?[a-zA-Z]:");
+
private URI uri; // a hierarchical uri
/** Resolve a child path against a parent path. */
@@ -81,7 +90,7 @@ public Path(Path parent, Path child) {
resolved.getPath(), resolved.getFragment());
}
- private void checkPathArg( String path ) {
+ private void checkPathArg( String path ) throws IllegalArgumentException {
// disallow construction of a Path from an empty string
if ( path == null ) {
throw new IllegalArgumentException(
@@ -95,15 +104,16 @@ private void checkPathArg( String path ) {
/** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */
- public Path(String pathString) {
+ public Path(String pathString) throws IllegalArgumentException {
checkPathArg( pathString );
// We can't use 'new URI(String)' directly, since it assumes things are
// escaped, which we don't require of Paths.
// add a slash in front of paths with Windows drive letters
- if (hasWindowsDrive(pathString, false))
- pathString = "/"+pathString;
+ if (hasWindowsDrive(pathString) && pathString.charAt(0) != '/') {
+ pathString = "/" + pathString;
+ }
// parse uri components
String scheme = null;
@@ -151,22 +161,54 @@ public Path(String scheme, String authority, String path) {
private void initialize(String scheme, String authority, String path,
String fragment) {
try {
- this.uri = new URI(scheme, authority, normalizePath(path), null, fragment)
+ this.uri = new URI(scheme, authority, normalizePath(scheme, path), null, fragment)
.normalize();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
- private String normalizePath(String path) {
- // remove double slashes & backslashes
+ /**
+ * Merge 2 paths such that the second path is appended relative to the first.
+ * The returned path has the scheme and authority of the first path. On
+ * Windows, the drive specification in the second path is discarded.
+ *
+ * @param path1 Path first path
+ * @param path2 Path second path, to be appended relative to path1
+ * @return Path merged path
+ */
+ public static Path mergePaths(Path path1, Path path2) {
+ String path2Str = path2.toUri().getPath();
+ if(hasWindowsDrive(path2Str)) {
+ path2Str = path2Str.substring(path2Str.indexOf(':')+1);
+ }
+ return new Path(path1 + path2Str);
+ }
+
+ /**
+ * Normalize a path string to use non-duplicated forward slashes as
+ * the path separator and remove any trailing path separators.
+ * @param scheme Supplies the URI scheme. Used to deduce whether we
+ * should replace backslashes or not.
+ * @param path Supplies the scheme-specific part
+ * @return Normalized path string.
+ */
+ private static String normalizePath(String scheme, String path) {
+ // Remove double forward slashes.
path = StringUtils.replace(path, "//", "/");
- if (Path.WINDOWS) {
+
+ // Remove backslashes if this looks like a Windows path. Avoid
+ // the substitution if it looks like a non-local URI.
+ if (WINDOWS &&
+ (hasWindowsDrive(path) ||
+ (scheme == null) ||
+ (scheme.isEmpty()) ||
+ (scheme.equals("file")))) {
path = StringUtils.replace(path, "\\", "/");
}
// trim trailing slash from non-root path (ignoring windows drive)
- int minLength = hasWindowsDrive(path, true) ? 4 : 1;
+ int minLength = hasWindowsDrive(path) ? 4 : 1;
if (path.length() > minLength && path.endsWith("/")) {
path = path.substring(0, path.length()-1);
}
@@ -174,17 +216,29 @@ private String normalizePath(String path) {
return path;
}
- private boolean hasWindowsDrive(String path, boolean slashed) {
- if (!WINDOWS) return false;
- int start = slashed ? 1 : 0;
- return
- path.length() >= start+2 &&
- (slashed ? path.charAt(0) == '/' : true) &&
- path.charAt(start+1) == ':' &&
- ((path.charAt(start) >= 'A' && path.charAt(start) <= 'Z') ||
- (path.charAt(start) >= 'a' && path.charAt(start) <= 'z'));
+ private static boolean hasWindowsDrive(String path) {
+ return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
}
+ /**
+ * Determine whether a given path string represents an absolute path on
+ * Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
+ *
+ * @param pathString Supplies the path string to evaluate.
+ * @param slashed true if the given path is prefixed with "/".
+ * @return true if the supplied path looks like an absolute path with a Windows
+ * drive-specifier.
+ */
+ public static boolean isWindowsAbsolutePath(final String pathString,
+ final boolean slashed) {
+ int start = (slashed ? 1 : 0);
+
+ return
+ hasWindowsDrive(pathString) &&
+ pathString.length() >= (start + 3) &&
+ ((pathString.charAt(start + 2) == SEPARATOR_CHAR) ||
+ (pathString.charAt(start + 2) == '\\'));
+ }
/** Convert this to a URI. */
public URI toUri() { return uri; }
@@ -207,7 +261,7 @@ public boolean isAbsoluteAndSchemeAuthorityNull() {
* True if the path component (i.e. directory) of this URI is absolute.
*/
public boolean isUriPathAbsolute() {
- int start = hasWindowsDrive(uri.getPath(), true) ? 3 : 0;
+ int start = hasWindowsDrive(uri.getPath()) ? 3 : 0;
return uri.getPath().startsWith(SEPARATOR, start);
}
@@ -241,7 +295,7 @@ public String getName() {
public Path getParent() {
String path = uri.getPath();
int lastSlash = path.lastIndexOf('/');
- int start = hasWindowsDrive(path, true) ? 3 : 0;
+ int start = hasWindowsDrive(path) ? 3 : 0;
if ((path.length() == start) || // empty path
(lastSlash == start && path.length() == start+1)) { // at root
return null;
@@ -250,7 +304,7 @@ public Path getParent() {
if (lastSlash==-1) {
parent = CUR_DIR;
} else {
- int end = hasWindowsDrive(path, true) ? 3 : 0;
+ int end = hasWindowsDrive(path) ? 3 : 0;
parent = path.substring(0, lastSlash==end?end+1:lastSlash);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
@@ -277,7 +331,7 @@ public String toString() {
if (uri.getPath() != null) {
String path = uri.getPath();
if (path.indexOf('/')==0 &&
- hasWindowsDrive(path, true) && // has windows drive
+ hasWindowsDrive(path) && // has windows drive
uri.getScheme() == null && // but no scheme
uri.getAuthority() == null) // or authority
path = path.substring(1); // remove slash before drive
@@ -364,7 +418,7 @@ public Path makeQualified(URI defaultUri, Path workingDir ) {
URI newUri = null;
try {
newUri = new URI(scheme, authority ,
- normalizePath(pathUri.getPath()), null, fragment);
+ normalizePath(scheme, pathUri.getPath()), null, fragment);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 88b877d146..7fe1e8bc60 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -508,9 +508,10 @@ private boolean isPermissionLoaded() {
return !super.getOwner().isEmpty();
}
- RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
+ RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
super(f.length(), f.isDirectory(), 1, defaultBlockSize,
- f.lastModified(), fs.makeQualified(new Path(f.getPath())));
+ f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
+ fs.getWorkingDirectory()));
}
@Override
@@ -541,9 +542,10 @@ public String getGroup() {
private void loadPermissionInfo() {
IOException e = null;
try {
- StringTokenizer t = new StringTokenizer(
- execCommand(new File(getPath().toUri()),
- Shell.getGET_PERMISSION_COMMAND()));
+ String output = FileUtil.execCommand(new File(getPath().toUri()),
+ Shell.getGetPermissionCommand());
+ StringTokenizer t =
+ new StringTokenizer(output, Shell.TOKEN_SEPARATOR_REGEX);
//expected format
//-rw------- 1 username groupname ...
String permission = t.nextToken();
@@ -552,7 +554,17 @@ private void loadPermissionInfo() {
}
setPermission(FsPermission.valueOf(permission));
t.nextToken();
- setOwner(t.nextToken());
+
+ String owner = t.nextToken();
+ // If on windows domain, token format is DOMAIN\\user and we want to
+ // extract only the user name
+ if (Shell.WINDOWS) {
+ int i = owner.indexOf('\\');
+ if (i != -1)
+ owner = owner.substring(i + 1);
+ }
+ setOwner(owner);
+
setGroup(t.nextToken());
} catch (Shell.ExitCodeException ioe) {
if (ioe.getExitCode() != 1) {
@@ -588,17 +600,7 @@ public void write(DataOutput out) throws IOException {
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
- if (username == null && groupname == null) {
- throw new IOException("username == null && groupname == null");
- }
-
- if (username == null) {
- execCommand(pathToFile(p), Shell.SET_GROUP_COMMAND, groupname);
- } else {
- //OWNER[:[GROUP]]
- String s = username + (groupname == null? "": ":" + groupname);
- execCommand(pathToFile(p), Shell.SET_OWNER_COMMAND, s);
- }
+ FileUtil.setOwner(pathToFile(p), username, groupname);
}
/**
@@ -608,20 +610,12 @@ public void setOwner(Path p, String username, String groupname)
public void setPermission(Path p, FsPermission permission)
throws IOException {
if (NativeIO.isAvailable()) {
- NativeIO.chmod(pathToFile(p).getCanonicalPath(),
+ NativeIO.POSIX.chmod(pathToFile(p).getCanonicalPath(),
permission.toShort());
} else {
- execCommand(pathToFile(p), Shell.SET_PERMISSION_COMMAND,
- String.format("%05o", permission.toShort()));
+ String perm = String.format("%04o", permission.toShort());
+ Shell.execCommand(Shell.getSetPermissionCommand(perm, false,
+ FileUtil.makeShellPath(pathToFile(p), true)));
}
}
-
- private static String execCommand(File f, String... cmd) throws IOException {
- String[] args = new String[cmd.length + 1];
- System.arraycopy(cmd, 0, args, 0, cmd.length);
- args[cmd.length] = FileUtil.makeShellPath(f, true);
- String output = Shell.execCommand(args);
- return output;
- }
-
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 70db687f0c..c35c8dff66 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -92,7 +92,7 @@ public void initialize(Configuration conf, FileSystem fs, Path home) {
}
private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
- return new Path(basePath + rmFilePath.toUri().getPath());
+ return Path.mergePaths(basePath, rmFilePath);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
index 85178f42d0..f28d4f09ca 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
@@ -89,11 +89,9 @@ public void createSymlink(Path target, Path link, boolean createParent)
}
// NB: Use createSymbolicLink in java.nio.file.Path once available
try {
- Shell.execCommand(Shell.LINK_COMMAND, "-s",
- new URI(target.toString()).getPath(),
- new URI(link.toString()).getPath());
- } catch (URISyntaxException x) {
- throw new IOException("Invalid symlink path: "+x.getMessage());
+ Shell.execCommand(Shell.getSymlinkCommand(
+ getPathWithoutSchemeAndAuthority(target),
+ getPathWithoutSchemeAndAuthority(link)));
} catch (IOException x) {
throw new IOException("Unable to create symlink: "+x.getMessage());
}
@@ -176,4 +174,13 @@ public Path getLinkTarget(Path f) throws IOException {
*/
throw new AssertionError();
}
+
+ private static String getPathWithoutSchemeAndAuthority(Path path) {
+ // This code depends on Path.toString() to remove the leading slash before
+ // the drive specification on Windows.
+ Path newPath = path.isUriPathAbsolute() ?
+ new Path(null, null, path.toUri().getPath()) :
+ path;
+ return newPath.toString();
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index f689930735..7540163f35 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -21,6 +21,8 @@
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.LinkedList;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -72,8 +74,12 @@ protected void setWriteChecksum(boolean flag) {
*/
protected void getLocalDestination(LinkedList args)
throws IOException {
- String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
- dst = new PathData(new File(pathString), getConf());
+ try {
+ String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
+ dst = new PathData(new URI(pathString), getConf());
+ } catch (URISyntaxException e) {
+ throw new IOException("unexpected URISyntaxException", e);
+ }
}
/**
@@ -295,4 +301,4 @@ public void close() {
processDeleteOnExit();
}
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 4e3aab0816..ab4fc99cec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -20,6 +20,8 @@
import java.io.File;
import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.LinkedList;
import java.util.List;
@@ -60,16 +62,20 @@ public static class Merge extends FsCommand {
@Override
protected void processOptions(LinkedList args) throws IOException {
- CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "nl");
- cf.parse(args);
+ try {
+ CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "nl");
+ cf.parse(args);
- delimiter = cf.getOpt("nl") ? "\n" : null;
+ delimiter = cf.getOpt("nl") ? "\n" : null;
- dst = new PathData(new File(args.removeLast()), getConf());
- if (dst.exists && dst.stat.isDirectory()) {
- throw new PathIsDirectoryException(dst.toString());
+ dst = new PathData(new URI(args.removeLast()), getConf());
+ if (dst.exists && dst.stat.isDirectory()) {
+ throw new PathIsDirectoryException(dst.toString());
+ }
+ srcs = new LinkedList();
+ } catch (URISyntaxException e) {
+ throw new IOException("unexpected URISyntaxException", e);
}
- srcs = new LinkedList();
}
@Override
@@ -188,9 +194,13 @@ protected void processOptions(LinkedList args) throws IOException {
// commands operating on local paths have no need for glob expansion
@Override
protected List expandArgument(String arg) throws IOException {
- List items = new LinkedList();
- items.add(new PathData(new File(arg), getConf()));
- return items;
+ try {
+ List items = new LinkedList();
+ items.add(new PathData(new URI(arg), getConf()));
+ return items;
+ } catch (URISyntaxException e) {
+ throw new IOException("unexpected URISyntaxException", e);
+ }
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
index 6e86af7cc1..ae719f5796 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
@@ -24,6 +24,7 @@
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
+import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -39,6 +40,9 @@
/**
* Encapsulates a Path (path), its FileStatus (stat), and its FileSystem (fs).
+ * PathData ensures that the returned path string will be the same as the
+ * one passed in during initialization (unlike Path objects which can
+ * modify the path string).
* The stat field will be null if the path does not exist.
*/
@InterfaceAudience.Private
@@ -51,6 +55,20 @@ public class PathData implements Comparable {
public FileStatus stat;
public boolean exists;
+ /* True if the URI scheme was not present in the pathString but inferred.
+ */
+ private boolean inferredSchemeFromPath = false;
+
+ /**
+ * Pre-compiled regular expressions to detect path formats.
+ */
+ private static final Pattern potentialUri =
+ Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]+:");
+ private static final Pattern windowsNonUriAbsolutePath1 =
+ Pattern.compile("^/?[a-zA-Z]:\\\\");
+ private static final Pattern windowsNonUriAbsolutePath2 =
+ Pattern.compile("^/?[a-zA-Z]:/");
+
/**
* Creates an object to wrap the given parameters as fields. The string
* used to create the path will be recorded since the Path object does not
@@ -67,12 +85,12 @@ public PathData(String pathString, Configuration conf) throws IOException {
* Creates an object to wrap the given parameters as fields. The string
* used to create the path will be recorded since the Path object does not
* return exactly the same string used to initialize it
- * @param localPath a local File
+ * @param localPath a local URI
* @param conf the configuration file
* @throws IOException if anything goes wrong...
*/
- public PathData(File localPath, Configuration conf) throws IOException {
- this(FileSystem.getLocal(conf), localPath.toString());
+ public PathData(URI localPath, Configuration conf) throws IOException {
+ this(FileSystem.getLocal(conf), localPath.getPath());
}
/**
@@ -86,6 +104,39 @@ private PathData(FileSystem fs, String pathString) throws IOException {
this(fs, pathString, lookupStat(fs, pathString, true));
}
+ /**
+ * Validates the given Windows path.
+ * Throws IOException on failure.
+ * @param pathString a String of the path suppliued by the user.
+ */
+ private void ValidateWindowsPath(String pathString)
+ throws IOException
+ {
+ if (windowsNonUriAbsolutePath1.matcher(pathString).find()) {
+ // Forward slashes disallowed in a backslash-separated path.
+ if (pathString.indexOf('/') != -1) {
+ throw new IOException("Invalid path string " + pathString);
+ }
+
+ inferredSchemeFromPath = true;
+ return;
+ }
+
+ // Is it a forward slash-separated absolute path?
+ if (windowsNonUriAbsolutePath2.matcher(pathString).find()) {
+ inferredSchemeFromPath = true;
+ return;
+ }
+
+ // Does it look like a URI? If so then just leave it alone.
+ if (potentialUri.matcher(pathString).find()) {
+ return;
+ }
+
+ // Looks like a relative path on Windows.
+ return;
+ }
+
/**
* Creates an object to wrap the given parameters as fields. The string
* used to create the path will be recorded since the Path object does not
@@ -100,6 +151,10 @@ private PathData(FileSystem fs, String pathString, FileStatus stat)
this.uri = stringToUri(pathString);
this.path = fs.makeQualified(new Path(uri));
setStat(stat);
+
+ if (Path.WINDOWS) {
+ ValidateWindowsPath(pathString);
+ }
}
// need a static method for the ctor above
@@ -236,7 +291,7 @@ public PathData getPathDataForChild(PathData child) throws IOException {
* Given a child of this directory, use the directory's path and the child's
* basename to construct the string to the child. This preserves relative
* paths since Path will fully qualify.
- * @param child a path contained within this directory
+ * @param childPath a path contained within this directory
* @return String of the path relative to this directory
*/
private String getStringForChildPath(Path childPath) {
@@ -386,7 +441,14 @@ public String toString() {
// No interpretation of symbols. Just decode % escaped chars.
String decodedRemainder = uri.getSchemeSpecificPart();
- if (scheme == null) {
+ // Drop the scheme if it was inferred to ensure fidelity between
+ // the input and output path strings.
+ if ((scheme == null) || (inferredSchemeFromPath)) {
+ if (Path.isWindowsAbsolutePath(decodedRemainder, true)) {
+ // Strip the leading '/' added in stringToUri so users see a valid
+ // Windows path.
+ decodedRemainder = decodedRemainder.substring(1);
+ }
return decodedRemainder;
} else {
StringBuilder buffer = new StringBuilder();
@@ -409,13 +471,56 @@ public File toFile() {
return ((LocalFileSystem)fs).pathToFile(path);
}
+ /** Normalize the given Windows path string. This does the following:
+ * 1. Adds "file:" scheme for absolute paths.
+ * 2. Ensures the scheme-specific part starts with '/' per RFC2396.
+ * 3. Replaces backslash path separators with forward slashes.
+ * @param pathString Path string supplied by the user.
+ * @return normalized absolute path string. Returns the input string
+ * if it is not a Windows absolute path.
+ */
+ private static String normalizeWindowsPath(String pathString)
+ throws IOException
+ {
+ if (!Path.WINDOWS) {
+ return pathString;
+ }
+
+ boolean slashed =
+ ((pathString.length() >= 1) && (pathString.charAt(0) == '/'));
+
+ // Is it a backslash-separated absolute path?
+ if (windowsNonUriAbsolutePath1.matcher(pathString).find()) {
+ // Forward slashes disallowed in a backslash-separated path.
+ if (pathString.indexOf('/') != -1) {
+ throw new IOException("Invalid path string " + pathString);
+ }
+
+ pathString = pathString.replace('\\', '/');
+ return "file:" + (slashed ? "" : "/") + pathString;
+ }
+
+ // Is it a forward slash-separated absolute path?
+ if (windowsNonUriAbsolutePath2.matcher(pathString).find()) {
+ return "file:" + (slashed ? "" : "/") + pathString;
+ }
+
+ // Is it a backslash-separated relative file path (no scheme and
+ // no drive-letter specifier)?
+ if ((pathString.indexOf(':') == -1) && (pathString.indexOf('\\') != -1)) {
+ pathString = pathString.replace('\\', '/');
+ }
+
+ return pathString;
+ }
+
/** Construct a URI from a String with unescaped special characters
- * that have non-standard sematics. e.g. /, ?, #. A custom parsing
- * is needed to prevent misbihaviors.
+ * that have non-standard semantics. e.g. /, ?, #. A custom parsing
+ * is needed to prevent misbehavior.
* @param pathString The input path in string form
* @return URI
*/
- private static URI stringToUri(String pathString) {
+ private static URI stringToUri(String pathString) throws IOException {
// We can't use 'new URI(String)' directly. Since it doesn't do quoting
// internally, the internal parser may fail or break the string at wrong
// places. Use of multi-argument ctors will quote those chars for us,
@@ -424,9 +529,10 @@ private static URI stringToUri(String pathString) {
// parse uri components
String scheme = null;
String authority = null;
-
int start = 0;
+ pathString = normalizeWindowsPath(pathString);
+
// parse uri scheme, if any
int colon = pathString.indexOf(':');
int slash = pathString.indexOf('/');
@@ -445,8 +551,7 @@ private static URI stringToUri(String pathString) {
authority = pathString.substring(start, authEnd);
start = authEnd;
}
-
- // uri path is the rest of the string. ? or # are not interpreated,
+ // uri path is the rest of the string. ? or # are not interpreted,
// but any occurrence of them will be quoted by the URI ctor.
String path = pathString.substring(start, pathString.length());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index fc1d7c4717..b3d6d4ac68 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -61,6 +61,7 @@
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Shell;
import org.mortbay.io.Buffer;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Handler;
@@ -305,6 +306,13 @@ public static Connector createDefaultChannelConnector() {
ret.setAcceptQueueSize(128);
ret.setResolveNames(false);
ret.setUseDirectBuffers(false);
+ if(Shell.WINDOWS) {
+ // result of setting the SO_REUSEADDR flag is different on Windows
+ // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
+ // without this 2 NN's can start on the same machine and listen on
+ // the same port with indeterminate routing of incoming requests to them
+ ret.setReuseAddress(false);
+ }
ret.setHeaderBufferSize(1024*64);
return ret;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index f1545b69c9..7e3693a9f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -203,8 +203,8 @@ public void run() {
// It's also possible that we'll end up requesting readahead on some
// other FD, which may be wasted work, but won't cause a problem.
try {
- NativeIO.posixFadviseIfPossible(fd, off, len,
- NativeIO.POSIX_FADV_WILLNEED);
+ NativeIO.POSIX.posixFadviseIfPossible(fd, off, len,
+ NativeIO.POSIX.POSIX_FADV_WILLNEED);
} catch (IOException ioe) {
if (canceled) {
// no big deal - the reader canceled the request and closed
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
index 3d01810e71..70c29b810a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
@@ -22,6 +22,7 @@
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -30,7 +31,7 @@
import org.apache.hadoop.io.nativeio.Errno;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIOException;
-import org.apache.hadoop.io.nativeio.NativeIO.Stat;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.Stat;
import org.apache.hadoop.security.UserGroupInformation;
/**
@@ -120,7 +121,7 @@ static FileInputStream forceSecureOpenForRead(File f, String expectedOwner,
FileInputStream fis = new FileInputStream(f);
boolean success = false;
try {
- Stat stat = NativeIO.getFstat(fis.getFD());
+ Stat stat = NativeIO.POSIX.getFstat(fis.getFD());
checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner,
expectedGroup);
success = true;
@@ -166,35 +167,30 @@ public static FileOutputStream createForWrite(File f, int permissions)
if (skipSecurity) {
return insecureCreateForWrite(f, permissions);
} else {
- // Use the native wrapper around open(2)
- try {
- FileDescriptor fd = NativeIO.open(f.getAbsolutePath(),
- NativeIO.O_WRONLY | NativeIO.O_CREAT | NativeIO.O_EXCL,
- permissions);
- return new FileOutputStream(fd);
- } catch (NativeIOException nioe) {
- if (nioe.getErrno() == Errno.EEXIST) {
- throw new AlreadyExistsException(nioe);
- }
- throw nioe;
- }
+ return NativeIO.getCreateForWriteFileOutputStream(f, permissions);
}
}
private static void checkStat(File f, String owner, String group,
String expectedOwner,
String expectedGroup) throws IOException {
+ boolean success = true;
if (expectedOwner != null &&
!expectedOwner.equals(owner)) {
- throw new IOException(
- "Owner '" + owner + "' for path " + f + " did not match " +
- "expected owner '" + expectedOwner + "'");
+ if (Path.WINDOWS) {
+ UserGroupInformation ugi =
+ UserGroupInformation.createRemoteUser(expectedOwner);
+ final String adminsGroupString = "Administrators";
+ success = owner.equals(adminsGroupString)
+ && Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString);
+ } else {
+ success = false;
+ }
}
- if (expectedGroup != null &&
- !expectedGroup.equals(group)) {
+ if (!success) {
throw new IOException(
- "Group '" + group + "' for path " + f + " did not match " +
- "expected group '" + expectedGroup + "'");
+ "Owner '" + owner + "' for path " + f + " did not match " +
+ "expected owner '" + expectedOwner + "'");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index c8649c6a2e..a79e5cd34c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -19,7 +19,10 @@
import java.io.File;
import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.IOException;
+import java.io.RandomAccessFile;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
@@ -27,10 +30,13 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.Shell;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+
/**
* JNI wrappers for various native IO-related calls not available in Java.
* These functions should generally be used alongside a fallback to another
@@ -39,81 +45,341 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class NativeIO {
- // Flags for open() call from bits/fcntl.h
- public static final int O_RDONLY = 00;
- public static final int O_WRONLY = 01;
- public static final int O_RDWR = 02;
- public static final int O_CREAT = 0100;
- public static final int O_EXCL = 0200;
- public static final int O_NOCTTY = 0400;
- public static final int O_TRUNC = 01000;
- public static final int O_APPEND = 02000;
- public static final int O_NONBLOCK = 04000;
- public static final int O_SYNC = 010000;
- public static final int O_ASYNC = 020000;
- public static final int O_FSYNC = O_SYNC;
- public static final int O_NDELAY = O_NONBLOCK;
+ public static class POSIX {
+ // Flags for open() call from bits/fcntl.h
+ public static final int O_RDONLY = 00;
+ public static final int O_WRONLY = 01;
+ public static final int O_RDWR = 02;
+ public static final int O_CREAT = 0100;
+ public static final int O_EXCL = 0200;
+ public static final int O_NOCTTY = 0400;
+ public static final int O_TRUNC = 01000;
+ public static final int O_APPEND = 02000;
+ public static final int O_NONBLOCK = 04000;
+ public static final int O_SYNC = 010000;
+ public static final int O_ASYNC = 020000;
+ public static final int O_FSYNC = O_SYNC;
+ public static final int O_NDELAY = O_NONBLOCK;
- // Flags for posix_fadvise() from bits/fcntl.h
- /* No further special treatment. */
- public static final int POSIX_FADV_NORMAL = 0;
- /* Expect random page references. */
- public static final int POSIX_FADV_RANDOM = 1;
- /* Expect sequential page references. */
- public static final int POSIX_FADV_SEQUENTIAL = 2;
- /* Will need these pages. */
- public static final int POSIX_FADV_WILLNEED = 3;
- /* Don't need these pages. */
- public static final int POSIX_FADV_DONTNEED = 4;
- /* Data will be accessed once. */
- public static final int POSIX_FADV_NOREUSE = 5;
+ // Flags for posix_fadvise() from bits/fcntl.h
+ /* No further special treatment. */
+ public static final int POSIX_FADV_NORMAL = 0;
+ /* Expect random page references. */
+ public static final int POSIX_FADV_RANDOM = 1;
+ /* Expect sequential page references. */
+ public static final int POSIX_FADV_SEQUENTIAL = 2;
+ /* Will need these pages. */
+ public static final int POSIX_FADV_WILLNEED = 3;
+ /* Don't need these pages. */
+ public static final int POSIX_FADV_DONTNEED = 4;
+ /* Data will be accessed once. */
+ public static final int POSIX_FADV_NOREUSE = 5;
- /* Wait upon writeout of all pages
- in the range before performing the
- write. */
- public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
- /* Initiate writeout of all those
- dirty pages in the range which are
- not presently under writeback. */
- public static final int SYNC_FILE_RANGE_WRITE = 2;
+ /* Wait upon writeout of all pages
+ in the range before performing the
+ write. */
+ public static final int SYNC_FILE_RANGE_WAIT_BEFORE = 1;
+ /* Initiate writeout of all those
+ dirty pages in the range which are
+ not presently under writeback. */
+ public static final int SYNC_FILE_RANGE_WRITE = 2;
- /* Wait upon writeout of all pages in
- the range after performing the
- write. */
- public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;
+ /* Wait upon writeout of all pages in
+ the range after performing the
+ write. */
+ public static final int SYNC_FILE_RANGE_WAIT_AFTER = 4;
+
+ private static final Log LOG = LogFactory.getLog(NativeIO.class);
+
+ private static boolean nativeLoaded = false;
+ private static boolean fadvisePossible = true;
+ private static boolean syncFileRangePossible = true;
+
+ static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
+ "hadoop.workaround.non.threadsafe.getpwuid";
+ static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = false;
+
+ private static long cacheTimeout = -1;
+
+ static {
+ if (NativeCodeLoader.isNativeCodeLoaded()) {
+ try {
+ Configuration conf = new Configuration();
+ workaroundNonThreadSafePasswdCalls = conf.getBoolean(
+ WORKAROUND_NON_THREADSAFE_CALLS_KEY,
+ WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT);
+
+ initNative();
+ nativeLoaded = true;
+
+ cacheTimeout = conf.getLong(
+ CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY,
+ CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT) *
+ 1000;
+ LOG.debug("Initialized cache for IDs to User/Group mapping with a " +
+ " cache timeout of " + cacheTimeout/1000 + " seconds.");
+
+ } catch (Throwable t) {
+ // This can happen if the user has an older version of libhadoop.so
+ // installed - in this case we can continue without native IO
+ // after warning
+ LOG.error("Unable to initialize NativeIO libraries", t);
+ }
+ }
+ }
+
+ /**
+ * Return true if the JNI-based native IO extensions are available.
+ */
+ public static boolean isAvailable() {
+ return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
+ }
+
+ /** Wrapper around open(2) */
+ public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
+ /** Wrapper around fstat(2) */
+ private static native Stat fstat(FileDescriptor fd) throws IOException;
+
+ /** Native chmod implementation. On UNIX, it is a wrapper around chmod(2) */
+ private static native void chmodImpl(String path, int mode) throws IOException;
+
+ public static void chmod(String path, int mode) throws IOException {
+ if (!Shell.WINDOWS) {
+ chmodImpl(path, mode);
+ } else {
+ try {
+ chmodImpl(path, mode);
+ } catch (NativeIOException nioe) {
+ if (nioe.getErrorCode() == 3) {
+ throw new NativeIOException("No such file or directory",
+ Errno.ENOENT);
+ } else {
+ LOG.warn(String.format("NativeIO.chmod error (%d): %s",
+ nioe.getErrorCode(), nioe.getMessage()));
+ throw new NativeIOException("Unknown error", Errno.UNKNOWN);
+ }
+ }
+ }
+ }
+
+ /** Wrapper around posix_fadvise(2) */
+ static native void posix_fadvise(
+ FileDescriptor fd, long offset, long len, int flags) throws NativeIOException;
+
+ /** Wrapper around sync_file_range(2) */
+ static native void sync_file_range(
+ FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException;
+
+ /**
+ * Call posix_fadvise on the given file descriptor. See the manpage
+ * for this syscall for more information. On systems where this
+ * call is not available, does nothing.
+ *
+ * @throws NativeIOException if there is an error with the syscall
+ */
+ public static void posixFadviseIfPossible(
+ FileDescriptor fd, long offset, long len, int flags)
+ throws NativeIOException {
+ if (nativeLoaded && fadvisePossible) {
+ try {
+ posix_fadvise(fd, offset, len, flags);
+ } catch (UnsupportedOperationException uoe) {
+ fadvisePossible = false;
+ } catch (UnsatisfiedLinkError ule) {
+ fadvisePossible = false;
+ }
+ }
+ }
+
+ /**
+ * Call sync_file_range on the given file descriptor. See the manpage
+ * for this syscall for more information. On systems where this
+ * call is not available, does nothing.
+ *
+ * @throws NativeIOException if there is an error with the syscall
+ */
+ public static void syncFileRangeIfPossible(
+ FileDescriptor fd, long offset, long nbytes, int flags)
+ throws NativeIOException {
+ if (nativeLoaded && syncFileRangePossible) {
+ try {
+ sync_file_range(fd, offset, nbytes, flags);
+ } catch (UnsupportedOperationException uoe) {
+ syncFileRangePossible = false;
+ } catch (UnsatisfiedLinkError ule) {
+ syncFileRangePossible = false;
+ }
+ }
+ }
+
+ /** Linux only methods used for getOwner() implementation */
+ private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException;
+ private static native String getUserName(long uid) throws IOException;
+
+ /**
+ * Result type of the fstat call
+ */
+ public static class Stat {
+ private int ownerId, groupId;
+ private String owner, group;
+ private int mode;
+
+ // Mode constants
+ public static final int S_IFMT = 0170000; /* type of file */
+ public static final int S_IFIFO = 0010000; /* named pipe (fifo) */
+ public static final int S_IFCHR = 0020000; /* character special */
+ public static final int S_IFDIR = 0040000; /* directory */
+ public static final int S_IFBLK = 0060000; /* block special */
+ public static final int S_IFREG = 0100000; /* regular */
+ public static final int S_IFLNK = 0120000; /* symbolic link */
+ public static final int S_IFSOCK = 0140000; /* socket */
+ public static final int S_IFWHT = 0160000; /* whiteout */
+ public static final int S_ISUID = 0004000; /* set user id on execution */
+ public static final int S_ISGID = 0002000; /* set group id on execution */
+ public static final int S_ISVTX = 0001000; /* save swapped text even after use */
+ public static final int S_IRUSR = 0000400; /* read permission, owner */
+ public static final int S_IWUSR = 0000200; /* write permission, owner */
+ public static final int S_IXUSR = 0000100; /* execute/search permission, owner */
+
+ Stat(int ownerId, int groupId, int mode) {
+ this.ownerId = ownerId;
+ this.groupId = groupId;
+ this.mode = mode;
+ }
+
+ @Override
+ public String toString() {
+ return "Stat(owner='" + owner + "', group='" + group + "'" +
+ ", mode=" + mode + ")";
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+ public String getGroup() {
+ return group;
+ }
+ public int getMode() {
+ return mode;
+ }
+ }
+
+ /**
+ * Returns the file stat for a file descriptor.
+ *
+ * @param fd file descriptor.
+ * @return the file descriptor file stat.
+ * @throws IOException thrown if there was an IO error while obtaining the file stat.
+ */
+ public static Stat getFstat(FileDescriptor fd) throws IOException {
+ Stat stat = fstat(fd);
+ stat.owner = getName(IdCache.USER, stat.ownerId);
+ stat.group = getName(IdCache.GROUP, stat.groupId);
+ return stat;
+ }
+
+ private static String getName(IdCache domain, int id) throws IOException {
+ Map idNameCache = (domain == IdCache.USER)
+ ? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE;
+ String name;
+ CachedName cachedName = idNameCache.get(id);
+ long now = System.currentTimeMillis();
+ if (cachedName != null && (cachedName.timestamp + cacheTimeout) > now) {
+ name = cachedName.name;
+ } else {
+ name = (domain == IdCache.USER) ? getUserName(id) : getGroupName(id);
+ if (LOG.isDebugEnabled()) {
+ String type = (domain == IdCache.USER) ? "UserName" : "GroupName";
+ LOG.debug("Got " + type + " " + name + " for ID " + id +
+ " from the native implementation");
+ }
+ cachedName = new CachedName(name, now);
+ idNameCache.put(id, cachedName);
+ }
+ return name;
+ }
+
+ static native String getUserName(int uid) throws IOException;
+ static native String getGroupName(int uid) throws IOException;
+
+ private static class CachedName {
+ final long timestamp;
+ final String name;
+
+ public CachedName(String name, long timestamp) {
+ this.name = name;
+ this.timestamp = timestamp;
+ }
+ }
+
+ private static final Map USER_ID_NAME_CACHE =
+ new ConcurrentHashMap();
+
+ private static final Map GROUP_ID_NAME_CACHE =
+ new ConcurrentHashMap();
+
+ private enum IdCache { USER, GROUP }
+ }
+
+ private static boolean workaroundNonThreadSafePasswdCalls = false;
+
+
+ public static class Windows {
+ // Flags for CreateFile() call on Windows
+ public static final long GENERIC_READ = 0x80000000L;
+ public static final long GENERIC_WRITE = 0x40000000L;
+
+ public static final long FILE_SHARE_READ = 0x00000001L;
+ public static final long FILE_SHARE_WRITE = 0x00000002L;
+ public static final long FILE_SHARE_DELETE = 0x00000004L;
+
+ public static final long CREATE_NEW = 1;
+ public static final long CREATE_ALWAYS = 2;
+ public static final long OPEN_EXISTING = 3;
+ public static final long OPEN_ALWAYS = 4;
+ public static final long TRUNCATE_EXISTING = 5;
+
+ public static final long FILE_BEGIN = 0;
+ public static final long FILE_CURRENT = 1;
+ public static final long FILE_END = 2;
+
+ /** Wrapper around CreateFile() on Windows */
+ public static native FileDescriptor createFile(String path,
+ long desiredAccess, long shareMode, long creationDisposition)
+ throws IOException;
+
+ /** Wrapper around SetFilePointer() on Windows */
+ public static native long setFilePointer(FileDescriptor fd,
+ long distanceToMove, long moveMethod) throws IOException;
+
+ /** Windows only methods used for getOwner() implementation */
+ private static native String getOwner(FileDescriptor fd) throws IOException;
+
+ static {
+ if (NativeCodeLoader.isNativeCodeLoaded()) {
+ try {
+ initNative();
+ nativeLoaded = true;
+ } catch (Throwable t) {
+ // This can happen if the user has an older version of libhadoop.so
+ // installed - in this case we can continue without native IO
+ // after warning
+ LOG.error("Unable to initialize NativeIO libraries", t);
+ }
+ }
+ }
+ }
private static final Log LOG = LogFactory.getLog(NativeIO.class);
private static boolean nativeLoaded = false;
- private static boolean workaroundNonThreadSafePasswdCalls = false;
- private static boolean fadvisePossible = true;
- private static boolean syncFileRangePossible = true;
-
- static final String WORKAROUND_NON_THREADSAFE_CALLS_KEY =
- "hadoop.workaround.non.threadsafe.getpwuid";
- static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = false;
-
- private static long cacheTimeout = -1;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
- Configuration conf = new Configuration();
- workaroundNonThreadSafePasswdCalls = conf.getBoolean(
- WORKAROUND_NON_THREADSAFE_CALLS_KEY,
- WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT);
-
initNative();
nativeLoaded = true;
-
- cacheTimeout = conf.getLong(
- CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY,
- CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT) *
- 1000;
- LOG.debug("Initialized cache for IDs to User/Group mapping with a" +
- " cache timeout of " + cacheTimeout/1000 + " seconds.");
-
} catch (Throwable t) {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
@@ -130,169 +396,161 @@ public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded;
}
- /** Wrapper around open(2) */
- public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
- /** Wrapper around fstat(2) */
- private static native Stat fstat(FileDescriptor fd) throws IOException;
- /** Wrapper around chmod(2) */
- public static native void chmod(String path, int mode) throws IOException;
-
- /** Wrapper around posix_fadvise(2) */
- static native void posix_fadvise(
- FileDescriptor fd, long offset, long len, int flags) throws NativeIOException;
-
- /** Wrapper around sync_file_range(2) */
- static native void sync_file_range(
- FileDescriptor fd, long offset, long nbytes, int flags) throws NativeIOException;
-
/** Initialize the JNI method ID and class ID cache */
private static native void initNative();
- /**
- * Call posix_fadvise on the given file descriptor. See the manpage
- * for this syscall for more information. On systems where this
- * call is not available, does nothing.
- *
- * @throws NativeIOException if there is an error with the syscall
- */
- public static void posixFadviseIfPossible(
- FileDescriptor fd, long offset, long len, int flags)
- throws NativeIOException {
- if (nativeLoaded && fadvisePossible) {
- try {
- posix_fadvise(fd, offset, len, flags);
- } catch (UnsupportedOperationException uoe) {
- fadvisePossible = false;
- } catch (UnsatisfiedLinkError ule) {
- fadvisePossible = false;
- }
- }
- }
-
- /**
- * Call sync_file_range on the given file descriptor. See the manpage
- * for this syscall for more information. On systems where this
- * call is not available, does nothing.
- *
- * @throws NativeIOException if there is an error with the syscall
- */
- public static void syncFileRangeIfPossible(
- FileDescriptor fd, long offset, long nbytes, int flags)
- throws NativeIOException {
- if (nativeLoaded && syncFileRangePossible) {
- try {
- sync_file_range(fd, offset, nbytes, flags);
- } catch (UnsupportedOperationException uoe) {
- syncFileRangePossible = false;
- } catch (UnsatisfiedLinkError ule) {
- syncFileRangePossible = false;
- }
- }
- }
-
- /**
- * Result type of the fstat call
- */
- public static class Stat {
- private int ownerId, groupId;
- private String owner, group;
- private int mode;
-
- // Mode constants
- public static final int S_IFMT = 0170000; /* type of file */
- public static final int S_IFIFO = 0010000; /* named pipe (fifo) */
- public static final int S_IFCHR = 0020000; /* character special */
- public static final int S_IFDIR = 0040000; /* directory */
- public static final int S_IFBLK = 0060000; /* block special */
- public static final int S_IFREG = 0100000; /* regular */
- public static final int S_IFLNK = 0120000; /* symbolic link */
- public static final int S_IFSOCK = 0140000; /* socket */
- public static final int S_IFWHT = 0160000; /* whiteout */
- public static final int S_ISUID = 0004000; /* set user id on execution */
- public static final int S_ISGID = 0002000; /* set group id on execution */
- public static final int S_ISVTX = 0001000; /* save swapped text even after use */
- public static final int S_IRUSR = 0000400; /* read permission, owner */
- public static final int S_IWUSR = 0000200; /* write permission, owner */
- public static final int S_IXUSR = 0000100; /* execute/search permission, owner */
-
- Stat(int ownerId, int groupId, int mode) {
- this.ownerId = ownerId;
- this.groupId = groupId;
- this.mode = mode;
- }
-
- @Override
- public String toString() {
- return "Stat(owner='" + owner + "', group='" + group + "'" +
- ", mode=" + mode + ")";
- }
-
- public String getOwner() {
- return owner;
- }
- public String getGroup() {
- return group;
- }
- public int getMode() {
- return mode;
- }
- }
-
- static native String getUserName(int uid) throws IOException;
-
- static native String getGroupName(int uid) throws IOException;
-
- private static class CachedName {
+ private static class CachedUid {
final long timestamp;
- final String name;
-
- public CachedName(String name, long timestamp) {
- this.name = name;
+ final String username;
+ public CachedUid(String username, long timestamp) {
this.timestamp = timestamp;
+ this.username = username;
}
}
+ private static final Map uidCache =
+ new ConcurrentHashMap();
+ private static long cacheTimeout;
+ private static boolean initialized = false;
- private static final Map USER_ID_NAME_CACHE =
- new ConcurrentHashMap();
-
- private static final Map GROUP_ID_NAME_CACHE =
- new ConcurrentHashMap();
-
- private enum IdCache { USER, GROUP }
-
- private static String getName(IdCache domain, int id) throws IOException {
- Map idNameCache = (domain == IdCache.USER)
- ? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE;
- String name;
- CachedName cachedName = idNameCache.get(id);
- long now = System.currentTimeMillis();
- if (cachedName != null && (cachedName.timestamp + cacheTimeout) > now) {
- name = cachedName.name;
+ public static String getOwner(FileDescriptor fd) throws IOException {
+ ensureInitialized();
+ if (Shell.WINDOWS) {
+ String owner = Windows.getOwner(fd);
+ int i = owner.indexOf('\\');
+ if (i != -1)
+ owner = owner.substring(i + 1);
+ return owner;
} else {
- name = (domain == IdCache.USER) ? getUserName(id) : getGroupName(id);
- if (LOG.isDebugEnabled()) {
- String type = (domain == IdCache.USER) ? "UserName" : "GroupName";
- LOG.debug("Got " + type + " " + name + " for ID " + id +
- " from the native implementation");
+ long uid = POSIX.getUIDforFDOwnerforOwner(fd);
+ CachedUid cUid = uidCache.get(uid);
+ long now = System.currentTimeMillis();
+ if (cUid != null && (cUid.timestamp + cacheTimeout) > now) {
+ return cUid.username;
}
- cachedName = new CachedName(name, now);
- idNameCache.put(id, cachedName);
+ String user = POSIX.getUserName(uid);
+ LOG.info("Got UserName " + user + " for UID " + uid
+ + " from the native implementation");
+ cUid = new CachedUid(user, now);
+ uidCache.put(uid, cUid);
+ return user;
}
- return name;
}
/**
- * Returns the file stat for a file descriptor.
- *
- * @param fd file descriptor.
- * @return the file descriptor file stat.
- * @throws IOException thrown if there was an IO error while obtaining the file stat.
+ * Create a FileInputStream that shares delete permission on the
+ * file opened, i.e. other process can delete the file the
+ * FileInputStream is reading. Only Windows implementation uses
+ * the native interface.
*/
- public static Stat getFstat(FileDescriptor fd) throws IOException {
- Stat stat = fstat(fd);
- stat.owner = getName(IdCache.USER, stat.ownerId);
- stat.group = getName(IdCache.GROUP, stat.groupId);
- return stat;
+ public static FileInputStream getShareDeleteFileInputStream(File f)
+ throws IOException {
+ if (!Shell.WINDOWS) {
+ // On Linux the default FileInputStream shares delete permission
+ // on the file opened.
+ //
+ return new FileInputStream(f);
+ } else {
+ // Use Windows native interface to create a FileInputStream that
+ // shares delete permission on the file opened.
+ //
+ FileDescriptor fd = Windows.createFile(
+ f.getAbsolutePath(),
+ Windows.GENERIC_READ,
+ Windows.FILE_SHARE_READ |
+ Windows.FILE_SHARE_WRITE |
+ Windows.FILE_SHARE_DELETE,
+ Windows.OPEN_EXISTING);
+ return new FileInputStream(fd);
+ }
+ }
+
+ /**
+ * Create a FileInputStream that shares delete permission on the
+ * file opened at a given offset, i.e. other process can delete
+ * the file the FileInputStream is reading. Only Windows implementation
+ * uses the native interface.
+ */
+ public static FileInputStream getShareDeleteFileInputStream(File f, long seekOffset)
+ throws IOException {
+ if (!Shell.WINDOWS) {
+ RandomAccessFile rf = new RandomAccessFile(f, "r");
+ if (seekOffset > 0) {
+ rf.seek(seekOffset);
+ }
+ return new FileInputStream(rf.getFD());
+ } else {
+ // Use Windows native interface to create a FileInputStream that
+ // shares delete permission on the file opened, and set it to the
+ // given offset.
+ //
+ FileDescriptor fd = NativeIO.Windows.createFile(
+ f.getAbsolutePath(),
+ NativeIO.Windows.GENERIC_READ,
+ NativeIO.Windows.FILE_SHARE_READ |
+ NativeIO.Windows.FILE_SHARE_WRITE |
+ NativeIO.Windows.FILE_SHARE_DELETE,
+ NativeIO.Windows.OPEN_EXISTING);
+ if (seekOffset > 0)
+ NativeIO.Windows.setFilePointer(fd, seekOffset, NativeIO.Windows.FILE_BEGIN);
+ return new FileInputStream(fd);
+ }
+ }
+
+ /**
+ * Create the specified File for write access, ensuring that it does not exist.
+ * @param f the file that we want to create
+ * @param permissions we want to have on the file (if security is enabled)
+ *
+ * @throws AlreadyExistsException if the file already exists
+ * @throws IOException if any other error occurred
+ */
+ public static FileOutputStream getCreateForWriteFileOutputStream(File f, int permissions)
+ throws IOException {
+ if (!Shell.WINDOWS) {
+ // Use the native wrapper around open(2)
+ try {
+ FileDescriptor fd = NativeIO.POSIX.open(f.getAbsolutePath(),
+ NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT
+ | NativeIO.POSIX.O_EXCL, permissions);
+ return new FileOutputStream(fd);
+ } catch (NativeIOException nioe) {
+ if (nioe.getErrno() == Errno.EEXIST) {
+ throw new AlreadyExistsException(nioe);
+ }
+ throw nioe;
+ }
+ } else {
+ // Use the Windows native APIs to create equivalent FileOutputStream
+ try {
+ FileDescriptor fd = NativeIO.Windows.createFile(f.getCanonicalPath(),
+ NativeIO.Windows.GENERIC_WRITE,
+ NativeIO.Windows.FILE_SHARE_DELETE
+ | NativeIO.Windows.FILE_SHARE_READ
+ | NativeIO.Windows.FILE_SHARE_WRITE,
+ NativeIO.Windows.CREATE_NEW);
+ NativeIO.POSIX.chmod(f.getCanonicalPath(), permissions);
+ return new FileOutputStream(fd);
+ } catch (NativeIOException nioe) {
+ if (nioe.getErrorCode() == 80) {
+ // ERROR_FILE_EXISTS
+ // 80 (0x50)
+ // The file exists
+ throw new AlreadyExistsException(nioe);
+ }
+ throw nioe;
+ }
+ }
+ }
+
+ private synchronized static void ensureInitialized() {
+ if (!initialized) {
+ cacheTimeout =
+ new Configuration().getLong("hadoop.security.uid.cache.secs",
+ 4*60*60) * 1000;
+ LOG.info("Initialized cache for UID to User mapping with a cache" +
+ " timeout of " + cacheTimeout/1000 + " seconds.");
+ initialized = true;
+ }
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
index db653b23f4..8d6558ab1d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIOException.java
@@ -18,20 +18,40 @@
package org.apache.hadoop.io.nativeio;
import java.io.IOException;
+import org.apache.hadoop.util.Shell;
+
/**
* An exception generated by a call to the native IO code.
*
- * These exceptions simply wrap errno result codes.
+ * These exceptions simply wrap errno result codes on Linux,
+ * or the System Error Code on Windows.
*/
public class NativeIOException extends IOException {
private static final long serialVersionUID = 1L;
private Errno errno;
+ // Java has no unsigned primitive error code. Use a signed 32-bit
+ // integer to hold the unsigned 32-bit integer.
+ private int errorCode;
+
public NativeIOException(String msg, Errno errno) {
super(msg);
this.errno = errno;
+ // Windows error code is always set to ERROR_SUCCESS on Linux,
+ // i.e. no failure on Windows
+ this.errorCode = 0;
+ }
+
+ public NativeIOException(String msg, int errorCode) {
+ super(msg);
+ this.errorCode = errorCode;
+ this.errno = Errno.UNKNOWN;
+ }
+
+ public long getErrorCode() {
+ return errorCode;
}
public Errno getErrno() {
@@ -40,8 +60,10 @@ public Errno getErrno() {
@Override
public String toString() {
- return errno.toString() + ": " + super.getMessage();
+ if (Shell.WINDOWS)
+ return errorCode + ": " + super.getMessage();
+ else
+ return errno.toString() + ": " + super.getMessage();
}
}
-
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
index 6cb641d7b5..edfdc10c7c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
@@ -140,10 +140,12 @@ public void doGet(HttpServletRequest request, HttpServletResponse response)
*/
void printMap(PrintWriter out, Map>> map) {
for (Map.Entry>> context : map.entrySet()) {
- out.println(context.getKey());
+ out.print(context.getKey());
+ out.print("\n");
for (Map.Entry> record : context.getValue().entrySet()) {
indent(out, 1);
- out.println(record.getKey());
+ out.print(record.getKey());
+ out.print("\n");
for (TagsMetricsPair pair : record.getValue()) {
indent(out, 2);
// Prints tag values in the form "{key=value,key=value}:"
@@ -159,7 +161,7 @@ void printMap(PrintWriter out, Map>> m
out.print("=");
out.print(tagValue.getValue().toString());
}
- out.println("}:");
+ out.print("}:\n");
// Now print metric values, one per line
for (Map.Entry metricValue :
@@ -167,7 +169,8 @@ void printMap(PrintWriter out, Map>> m
indent(out, 3);
out.print(metricValue.getKey());
out.print("=");
- out.println(metricValue.getValue().toString());
+ out.print(metricValue.getValue().toString());
+ out.print("\n");
}
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 6335fc7146..3689ebaa06 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -86,7 +86,8 @@ private static List getUnixGroups(final String user) throws IOException
LOG.warn("got exception trying to get groups for user " + user, e);
}
- StringTokenizer tokenizer = new StringTokenizer(result);
+ StringTokenizer tokenizer =
+ new StringTokenizer(result, Shell.TOKEN_SEPARATOR_REGEX);
List groups = new LinkedList();
while (tokenizer.hasMoreTokens()) {
groups.add(tokenizer.nextToken());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
index 97919ad92a..13b2c9a581 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
@@ -60,7 +60,7 @@ public static void verifyChunkedSums(int bytesPerSum, int checksumType,
fileName, basePos);
}
- private static native void nativeVerifyChunkedSums(
+ private static native void nativeVerifyChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java
index 5e3522c99e..43a5e8970a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java
@@ -32,9 +32,10 @@ public class PlatformName {
* The complete platform 'name' to identify the platform as
* per the java-vm.
*/
- private static final String platformName = System.getProperty("os.name") + "-" +
- System.getProperty("os.arch") + "-" +
- System.getProperty("sun.arch.data.model");
+ private static final String platformName =
+ (Shell.WINDOWS ? System.getenv("os") : System.getProperty("os.name"))
+ + "-" + System.getProperty("os.arch")
+ + "-" + System.getProperty("sun.arch.data.model");
/**
* Get the complete platform as per the java-vm.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index b8c16f214d..eacc0bfdbf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -21,6 +21,7 @@
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.util.Arrays;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
@@ -44,46 +45,208 @@ abstract public class Shell {
public static final Log LOG = LogFactory.getLog(Shell.class);
+ private static boolean IS_JAVA7_OR_ABOVE =
+ System.getProperty("java.version").substring(0, 3).compareTo("1.7") >= 0;
+
+ public static boolean isJava7OrAbove() {
+ return IS_JAVA7_OR_ABOVE;
+ }
+
/** a Unix command to get the current user's name */
public final static String USER_NAME_COMMAND = "whoami";
+
+ /** Windows CreateProcess synchronization object */
+ public static final Object WindowsProcessLaunchLock = new Object();
+
/** a Unix command to get the current user's groups list */
public static String[] getGroupsCommand() {
- return new String[]{"bash", "-c", "groups"};
+ return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
+ : new String[]{"bash", "-c", "groups"};
}
+
/** a Unix command to get a given user's groups list */
public static String[] getGroupsForUserCommand(final String user) {
//'groups username' command return is non-consistent across different unixes
- return new String [] {"bash", "-c", "id -Gn " + user};
+ return (WINDOWS)? new String[] { WINUTILS, "groups", "-F", "\"" + user + "\""}
+ : new String [] {"bash", "-c", "id -Gn " + user};
}
+
/** a Unix command to get a given netgroup's user list */
public static String[] getUsersForNetgroupCommand(final String netgroup) {
//'groups username' command return is non-consistent across different unixes
- return new String [] {"bash", "-c", "getent netgroup " + netgroup};
+ return (WINDOWS)? new String [] {"cmd", "/c", "getent netgroup " + netgroup}
+ : new String [] {"bash", "-c", "getent netgroup " + netgroup};
}
+
+ /** Return a command to get permission information. */
+ public static String[] getGetPermissionCommand() {
+ return (WINDOWS) ? new String[] { WINUTILS, "ls", "-F" }
+ : new String[] { "/bin/ls", "-ld" };
+ }
+
+ /** Return a command to set permission */
+ public static String[] getSetPermissionCommand(String perm, boolean recursive) {
+ if (recursive) {
+ return (WINDOWS) ? new String[] { WINUTILS, "chmod", "-R", perm }
+ : new String[] { "chmod", "-R", perm };
+ } else {
+ return (WINDOWS) ? new String[] { WINUTILS, "chmod", perm }
+ : new String[] { "chmod", perm };
+ }
+ }
+
+ /**
+ * Return a command to set permission for specific file.
+ *
+ * @param perm String permission to set
+ * @param recursive boolean true to apply to all sub-directories recursively
+ * @param file String file to set
+ * @return String[] containing command and arguments
+ */
+ public static String[] getSetPermissionCommand(String perm, boolean recursive,
+ String file) {
+ String[] baseCmd = getSetPermissionCommand(perm, recursive);
+ String[] cmdWithFile = Arrays.copyOf(baseCmd, baseCmd.length + 1);
+ cmdWithFile[cmdWithFile.length - 1] = file;
+ return cmdWithFile;
+ }
+
+ /** Return a command to set owner */
+ public static String[] getSetOwnerCommand(String owner) {
+ return (WINDOWS) ? new String[] { WINUTILS, "chown", "\"" + owner + "\"" }
+ : new String[] { "chown", owner };
+ }
+
+ /** Return a command to create symbolic links */
+ public static String[] getSymlinkCommand(String target, String link) {
+ return WINDOWS ? new String[] { WINUTILS, "symlink", link, target }
+ : new String[] { "ln", "-s", target, link };
+ }
+
/** a Unix command to set permission */
public static final String SET_PERMISSION_COMMAND = "chmod";
/** a Unix command to set owner */
public static final String SET_OWNER_COMMAND = "chown";
+
+ /** a Unix command to set the change user's groups list */
public static final String SET_GROUP_COMMAND = "chgrp";
/** a Unix command to create a link */
public static final String LINK_COMMAND = "ln";
/** a Unix command to get a link target */
public static final String READ_LINK_COMMAND = "readlink";
- /** Return a Unix command to get permission information. */
- public static String[] getGET_PERMISSION_COMMAND() {
- //force /bin/ls, except on windows.
- return new String[] {(WINDOWS ? "ls" : "/bin/ls"), "-ld"};
- }
/**Time after which the executing script would be timedout*/
protected long timeOutInterval = 0L;
/** If or not script timed out*/
private AtomicBoolean timedOut;
+
+ /** Centralized logic to discover and validate the sanity of the Hadoop
+ * home directory. Returns either NULL or a directory that exists and
+ * was specified via either -Dhadoop.home.dir or the HADOOP_HOME ENV
+ * variable. This does a lot of work so it should only be called
+ * privately for initialization once per process.
+ **/
+ private static String checkHadoopHome() {
+
+ // first check the Dflag hadoop.home.dir with JVM scope
+ String home = System.getProperty("hadoop.home.dir");
+
+ // fall back to the system/user-global env variable
+ if (home == null) {
+ home = System.getenv("HADOOP_HOME");
+ }
+
+ try {
+ // couldn't find either setting for hadoop's home directory
+ if (home == null) {
+ throw new IOException("HADOOP_HOME or hadoop.home.dir are not set.");
+ }
+
+ if (home.startsWith("\"") && home.endsWith("\"")) {
+ home = home.substring(1, home.length()-1);
+ }
+
+ // check that the home setting is actually a directory that exists
+ File homedir = new File(home);
+ if (!homedir.isAbsolute() || !homedir.exists() || !homedir.isDirectory()) {
+ throw new IOException("Hadoop home directory " + homedir
+ + " does not exist, is not a directory, or is not an absolute path.");
+ }
+
+ home = homedir.getCanonicalPath();
+
+ } catch (IOException ioe) {
+ LOG.error("Failed to detect a valid hadoop home directory", ioe);
+ home = null;
+ }
+
+ return home;
+ }
+ private static String HADOOP_HOME_DIR = checkHadoopHome();
+
+ // Public getter, throws an exception if HADOOP_HOME failed validation
+ // checks and is being referenced downstream.
+ public static final String getHadoopHome() throws IOException {
+ if (HADOOP_HOME_DIR == null) {
+ throw new IOException("Misconfigured HADOOP_HOME cannot be referenced.");
+ }
+
+ return HADOOP_HOME_DIR;
+ }
+
+ /** fully qualify the path to a binary that should be in a known hadoop
+ * bin location. This is primarily useful for disambiguating call-outs
+ * to executable sub-components of Hadoop to avoid clashes with other
+ * executables that may be in the path. Caveat: this call doesn't
+ * just format the path to the bin directory. It also checks for file
+ * existence of the composed path. The output of this call should be
+ * cached by callers.
+ * */
+ public static final String getQualifiedBinPath(String executable)
+ throws IOException {
+ // construct hadoop bin path to the specified executable
+ String fullExeName = HADOOP_HOME_DIR + File.separator + "bin"
+ + File.separator + executable;
+
+ File exeFile = new File(fullExeName);
+ if (!exeFile.exists()) {
+ throw new IOException("Could not locate executable " + fullExeName
+ + " in the Hadoop binaries.");
+ }
+
+ return exeFile.getCanonicalPath();
+ }
+
/** Set to true on Windows platforms */
public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
= System.getProperty("os.name").startsWith("Windows");
+
+ public static final boolean LINUX
+ = System.getProperty("os.name").startsWith("Linux");
+ /** a Windows utility to emulate Unix commands */
+ public static final String WINUTILS = getWinUtilsPath();
+
+ public static final String getWinUtilsPath() {
+ String winUtilsPath = null;
+
+ try {
+ if (WINDOWS) {
+ winUtilsPath = getQualifiedBinPath("winutils.exe");
+ }
+ } catch (IOException ioe) {
+ LOG.error("Failed to locate the winutils binary in the hadoop binary path",
+ ioe);
+ }
+
+ return winUtilsPath;
+ }
+
+ /** Token separator regex used to parse Shell tool outputs */
+ public static final String TOKEN_SEPARATOR_REGEX
+ = WINDOWS ? "[|\n\r]" : "[ \t\n\r\f]";
+
private long interval; // refresh interval in msec
private long lastTime; // last time the command was performed
private Map environment; // env for the command execution
@@ -144,7 +307,19 @@ private void runCommand() throws IOException {
builder.directory(this.dir);
}
- process = builder.start();
+ if (Shell.WINDOWS) {
+ synchronized (WindowsProcessLaunchLock) {
+ // To workaround the race condition issue with child processes
+ // inheriting unintended handles during process launch that can
+ // lead to hangs on reading output and error streams, we
+ // serialize process creation. More info available at:
+ // http://support.microsoft.com/kb/315939
+ process = builder.start();
+ }
+ } else {
+ process = builder.start();
+ }
+
if (timeOutInterval > 0) {
timeOutTimer = new Timer("Shell command timeout");
timeoutTimerTask = new ShellTimeoutTimerTask(
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 898901e505..f2591f8104 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -30,12 +30,16 @@
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
+import java.util.Map;
import java.util.StringTokenizer;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.Shell;
import com.google.common.net.InetAddresses;
@@ -51,6 +55,27 @@ public class StringUtils {
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 0;
+ /**
+ * Shell environment variables: $ followed by one letter or _ followed by
+ * multiple letters, numbers, or underscores. The group captures the
+ * environment variable name without the leading $.
+ */
+ public static final Pattern SHELL_ENV_VAR_PATTERN =
+ Pattern.compile("\\$([A-Za-z_]{1}[A-Za-z0-9_]*)");
+
+ /**
+ * Windows environment variables: surrounded by %. The group captures the
+ * environment variable name without the leading and trailing %.
+ */
+ public static final Pattern WIN_ENV_VAR_PATTERN = Pattern.compile("%(.*?)%");
+
+ /**
+ * Regular expression that matches and captures environment variable names
+ * according to platform-specific rules.
+ */
+ public static final Pattern ENV_VAR_PATTERN = Shell.WINDOWS ?
+ WIN_ENV_VAR_PATTERN : SHELL_ENV_VAR_PATTERN;
+
/**
* Make a string representation of the exception.
* @param e The exception to stringify
@@ -791,6 +816,28 @@ public static String join(CharSequence separator, Iterable> strings) {
return sb.toString();
}
+ /**
+ * Concatenates strings, using a separator.
+ *
+ * @param separator to join with
+ * @param strings to join
+ * @return the joined string
+ */
+ public static String join(CharSequence separator, String[] strings) {
+ // Ideally we don't have to duplicate the code here if array is iterable.
+ StringBuilder sb = new StringBuilder();
+ boolean first = true;
+ for (String s : strings) {
+ if (first) {
+ first = false;
+ } else {
+ sb.append(separator);
+ }
+ sb.append(s);
+ }
+ return sb.toString();
+ }
+
/**
* Convert SOME_STUFF to SomeStuff
*
@@ -806,4 +853,37 @@ public static String camelize(String s) {
return sb.toString();
}
+
+ /**
+ * Matches a template string against a pattern, replaces matched tokens with
+ * the supplied replacements, and returns the result. The regular expression
+ * must use a capturing group. The value of the first capturing group is used
+ * to look up the replacement. If no replacement is found for the token, then
+ * it is replaced with the empty string.
+ *
+ * For example, assume template is "%foo%_%bar%_%baz%", pattern is "%(.*?)%",
+ * and replacements contains 2 entries, mapping "foo" to "zoo" and "baz" to
+ * "zaz". The result returned would be "zoo__zaz".
+ *
+ * @param template String template to receive replacements
+ * @param pattern Pattern to match for identifying tokens, must use a capturing
+ * group
+ * @param replacements Map mapping tokens identified by the
+ * capturing group to their replacement values
+ * @return String template with replacements
+ */
+ public static String replaceTokens(String template, Pattern pattern,
+ Map replacements) {
+ StringBuffer sb = new StringBuffer();
+ Matcher matcher = pattern.matcher(template);
+ while (matcher.find()) {
+ String replacement = replacements.get(matcher.group(1));
+ if (replacement == null) {
+ replacement = "";
+ }
+ matcher.appendReplacement(sb, Matcher.quoteReplacement(replacement));
+ }
+ matcher.appendTail(sb);
+ return sb.toString();
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/overview.html b/hadoop-common-project/hadoop-common/src/main/java/overview.html
index c0cafc6408..759c093aa5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/overview.html
+++ b/hadoop-common-project/hadoop-common/src/main/java/overview.html
@@ -60,9 +60,7 @@
Platforms
Hadoop was been demonstrated on GNU/Linux clusters with 2000 nodes.
- Win32 is supported as a development platform. Distributed operation
- has not been well tested on Win32, so this is not a production
- platform.
+ Windows is also a supported platform.
@@ -84,15 +82,6 @@
Requisite Software
-
Additional requirements for Windows
-
-
-
- Cygwin - Required for shell support in
- addition to the required software above.
-
-
-
Installing Required Software
If your platform does not have the required software listed above, you
@@ -104,13 +93,6 @@
Installing Required Software
$ sudo apt-get install rsync
-
On Windows, if you did not install the required software when you
-installed cygwin, start the cygwin installer and select the packages:
-
-
openssh - the "Net" category
-
rsync - the "Net" category
-
-
Getting Started
First, you need to get a copy of the Hadoop code.
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.sln b/hadoop-common-project/hadoop-common/src/main/native/native.sln
new file mode 100644
index 0000000000..40a78215d7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.sln
@@ -0,0 +1,48 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "native", "native.vcxproj", "{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Mixed Platforms = Debug|Mixed Platforms
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Mixed Platforms = Release|Mixed Platforms
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Mixed Platforms.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Mixed Platforms.Build.0 = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.Build.0 = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|x64.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|x64.Build.0 = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Mixed Platforms.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Mixed Platforms.Build.0 = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.Build.0 = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|x64.ActiveCfg = Release|x64
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
new file mode 100644
index 0000000000..73b6cb82a3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+ Release
+ x64
+
+
+
+ {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}
+ Win32Proj
+ native
+
+
+
+ DynamicLibrary
+ false
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+ false
+ ..\..\..\target\bin\
+ ..\..\..\target\native\$(Configuration)\
+ hadoop
+
+
+
+ Level3
+ NotUsing
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_WINDOWS;_USRDLL;NATIVE_EXPORTS;%(PreprocessorDefinitions)
+ ..\winutils\include;..\..\..\target\native\javah;%JAVA_HOME%\include;%JAVA_HOME%\include\win32;.\src;%(AdditionalIncludeDirectories)
+ CompileAsC
+ 4244
+
+
+ Windows
+ true
+ true
+ true
+ Ws2_32.lib;libwinutils.lib;%(AdditionalDependencies)
+ ..\..\..\target\bin;%(AdditionalLibraryDirectories)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters
new file mode 100644
index 0000000000..0ef3a17bcd
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj.filters
@@ -0,0 +1,87 @@
+
+
+
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hpp;hxx;hm;inl;inc;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
index a52e490b0f..b421aa0f54 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
@@ -16,10 +16,14 @@
* limitations under the License.
*/
-#include "config.h"
+
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
+#ifdef UNIX
+#include "config.h"
+#endif // UNIX
+
//****************************
// Simple Functions
//****************************
@@ -61,6 +65,9 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_init
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_compressBytesDirect
(JNIEnv *env, jobject thisj){
+ const char* uncompressed_bytes;
+ char *compressed_bytes;
+
// Get members of Lz4Compressor
jobject clazz = (*env)->GetStaticObjectField(env, thisj, Lz4Compressor_clazz);
jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, Lz4Compressor_uncompressedDirectBuf);
@@ -70,7 +77,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
// Get the input direct buffer
LOCK_CLASS(env, clazz, "Lz4Compressor");
- const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+ uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
UNLOCK_CLASS(env, clazz, "Lz4Compressor");
if (uncompressed_bytes == 0) {
@@ -79,7 +86,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Compressor_comp
// Get the output direct buffer
LOCK_CLASS(env, clazz, "Lz4Compressor");
- char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+ compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
UNLOCK_CLASS(env, clazz, "Lz4Compressor");
if (compressed_bytes == 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
index ef351bba7d..08d1b606f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
@@ -16,10 +16,13 @@
* limitations under the License.
*/
-#include "config.h"
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
+#ifdef UNIX
+#include "config.h"
+#endif // UNIX
+
int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize, int maxOutputSize);
/*
@@ -58,6 +61,9 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_in
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_decompressBytesDirect
(JNIEnv *env, jobject thisj){
+ const char *compressed_bytes;
+ char *uncompressed_bytes;
+
// Get members of Lz4Decompressor
jobject clazz = (*env)->GetStaticObjectField(env,thisj, Lz4Decompressor_clazz);
jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_compressedDirectBuf);
@@ -67,7 +73,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_de
// Get the input direct buffer
LOCK_CLASS(env, clazz, "Lz4Decompressor");
- const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+ compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
if (compressed_bytes == 0) {
@@ -76,7 +82,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_de
// Get the output direct buffer
LOCK_CLASS(env, clazz, "Lz4Decompressor");
- char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+ uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
UNLOCK_CLASS(env, clazz, "Lz4Decompressor");
if (uncompressed_bytes == 0) {
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
index 96b3c275bf..07c1620a08 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
@@ -16,12 +16,18 @@
* limitations under the License.
*/
-#include
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
#include
#include
#include
+#ifdef UNIX
+#include
#include "config.h"
+#endif // UNIX
+
#include "org_apache_hadoop_io_compress_snappy.h"
#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
@@ -81,7 +87,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
if (uncompressed_bytes == 0) {
- return 0;
+ return (jint)0;
}
// Get the output direct buffer
@@ -90,7 +96,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
UNLOCK_CLASS(env, clazz, "SnappyCompressor");
if (compressed_bytes == 0) {
- return 0;
+ return (jint)0;
}
/* size_t should always be 4 bytes or larger. */
@@ -109,3 +115,5 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
(*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
return (jint)buf_len;
}
+
+#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
index d7602e144d..9180384c5a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
@@ -16,12 +16,18 @@
* limitations under the License.
*/
-#include
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
#include
#include
#include
+#ifdef UNIX
#include "config.h"
+#include
+#endif
+
#include "org_apache_hadoop_io_compress_snappy.h"
#include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
@@ -103,3 +109,5 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
return (jint)uncompressed_direct_buf_len;
}
+
+#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
index 689c783ef7..7298892c1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
@@ -16,12 +16,15 @@
* limitations under the License.
*/
-#include
#include
#include
#include
+#ifdef UNIX
+#include
#include "config.h"
+#endif
+
#include "org_apache_hadoop_io_compress_zlib.h"
#include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
@@ -35,48 +38,124 @@ static jfieldID ZlibCompressor_directBufferSize;
static jfieldID ZlibCompressor_finish;
static jfieldID ZlibCompressor_finished;
+#ifdef UNIX
static int (*dlsym_deflateInit2_)(z_streamp, int, int, int, int, int, const char *, int);
static int (*dlsym_deflate)(z_streamp, int);
static int (*dlsym_deflateSetDictionary)(z_streamp, const Bytef *, uInt);
static int (*dlsym_deflateReset)(z_streamp);
static int (*dlsym_deflateEnd)(z_streamp);
+#endif
+
+#ifdef WINDOWS
+#include
+typedef int (__cdecl *__dlsym_deflateInit2_) (z_streamp, int, int, int, int, int, const char *, int);
+typedef int (__cdecl *__dlsym_deflate) (z_streamp, int);
+typedef int (__cdecl *__dlsym_deflateSetDictionary) (z_streamp, const Bytef *, uInt);
+typedef int (__cdecl *__dlsym_deflateReset) (z_streamp);
+typedef int (__cdecl *__dlsym_deflateEnd) (z_streamp);
+static __dlsym_deflateInit2_ dlsym_deflateInit2_;
+static __dlsym_deflate dlsym_deflate;
+static __dlsym_deflateSetDictionary dlsym_deflateSetDictionary;
+static __dlsym_deflateReset dlsym_deflateReset;
+static __dlsym_deflateEnd dlsym_deflateEnd;
+
+// Try to load zlib.dll from the dir where hadoop.dll is located.
+HANDLE LoadZlibTryHadoopNativeDir() {
+ HMODULE libz = NULL;
+ PCWSTR HADOOP_DLL = L"hadoop.dll";
+ size_t HADOOP_DLL_LEN = 10;
+ WCHAR path[MAX_PATH] = { 0 };
+ BOOL isPathValid = FALSE;
+
+ // Get hadoop.dll full path
+ HMODULE hModule = GetModuleHandle(HADOOP_DLL);
+ if (hModule != NULL) {
+ if (GetModuleFileName(hModule, path, MAX_PATH) > 0) {
+ size_t size = 0;
+ if (StringCchLength(path, MAX_PATH, &size) == S_OK) {
+
+ // Update path variable to have the full path to the zlib.dll
+ size = size - HADOOP_DLL_LEN;
+ if (size >= 0) {
+ path[size] = L'\0';
+ if (StringCchCat(path, MAX_PATH, HADOOP_ZLIB_LIBRARY) == S_OK) {
+ isPathValid = TRUE;
+ }
+ }
+ }
+ }
+ }
+
+ if (isPathValid) {
+ libz = LoadLibrary(path);
+ }
+
+ // fallback to system paths
+ if (!libz) {
+ libz = LoadLibrary(HADOOP_ZLIB_LIBRARY);
+ }
+
+ return libz;
+}
+#endif
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_initIDs(
JNIEnv *env, jclass class
) {
+#ifdef UNIX
// Load libz.so
void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
- if (!libz) {
+ if (!libz) {
THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so");
return;
}
+#endif
+#ifdef WINDOWS
+ HMODULE libz = LoadZlibTryHadoopNativeDir();
+
+ if (!libz) {
+ THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zlib1.dll");
+ return;
+ }
+#endif
+
+#ifdef UNIX
// Locate the requisite symbols from libz.so
dlerror(); // Clear any existing error
- LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
- LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate");
- LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
- LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
- LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
+ LOAD_DYNAMIC_SYMBOL(dlsym_deflateInit2_, env, libz, "deflateInit2_");
+ LOAD_DYNAMIC_SYMBOL(dlsym_deflate, env, libz, "deflate");
+ LOAD_DYNAMIC_SYMBOL(dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
+ LOAD_DYNAMIC_SYMBOL(dlsym_deflateReset, env, libz, "deflateReset");
+ LOAD_DYNAMIC_SYMBOL(dlsym_deflateEnd, env, libz, "deflateEnd");
+#endif
+
+#ifdef WINDOWS
+ LOAD_DYNAMIC_SYMBOL(__dlsym_deflateInit2_, dlsym_deflateInit2_, env, libz, "deflateInit2_");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_deflate, dlsym_deflate, env, libz, "deflate");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_deflateSetDictionary, dlsym_deflateSetDictionary, env, libz, "deflateSetDictionary");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_deflateReset, dlsym_deflateReset, env, libz, "deflateReset");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_deflateEnd, dlsym_deflateEnd, env, libz, "deflateEnd");
+#endif
// Initialize the requisite fieldIds
- ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
+ ZlibCompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
"Ljava/lang/Class;");
ZlibCompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
ZlibCompressor_finish = (*env)->GetFieldID(env, class, "finish", "Z");
ZlibCompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
- ZlibCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
- "uncompressedDirectBuf",
+ ZlibCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
+ "uncompressedDirectBuf",
"Ljava/nio/Buffer;");
- ZlibCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class,
+ ZlibCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, class,
"uncompressedDirectBufOff", "I");
- ZlibCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class,
+ ZlibCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, class,
"uncompressedDirectBufLen", "I");
- ZlibCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
- "compressedDirectBuf",
+ ZlibCompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
+ "compressedDirectBuf",
"Ljava/nio/Buffer;");
- ZlibCompressor_directBufferSize = (*env)->GetFieldID(env, class,
+ ZlibCompressor_directBufferSize = (*env)->GetFieldID(env, class,
"directBufferSize", "I");
}
@@ -84,7 +163,9 @@ JNIEXPORT jlong JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_init(
JNIEnv *env, jclass class, jint level, jint strategy, jint windowBits
) {
- // Create a z_stream
+ int rv = 0;
+ static const int memLevel = 8; // See zconf.h
+ // Create a z_stream
z_stream *stream = malloc(sizeof(z_stream));
if (!stream) {
THROW(env, "java/lang/OutOfMemoryError", NULL);
@@ -93,17 +174,16 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_init(
memset((void*)stream, 0, sizeof(z_stream));
// Initialize stream
- static const int memLevel = 8; // See zconf.h
- int rv = (*dlsym_deflateInit2_)(stream, level, Z_DEFLATED, windowBits,
+ rv = (*dlsym_deflateInit2_)(stream, level, Z_DEFLATED, windowBits,
memLevel, strategy, ZLIB_VERSION, sizeof(z_stream));
-
+
if (rv != Z_OK) {
// Contingency - Report error by throwing appropriate exceptions
free(stream);
stream = NULL;
-
+
switch (rv) {
- case Z_MEM_ERROR:
+ case Z_MEM_ERROR:
{
THROW(env, "java/lang/OutOfMemoryError", NULL);
}
@@ -120,27 +200,28 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_init(
break;
}
}
-
+
return JLONG(stream);
}
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_setDictionary(
- JNIEnv *env, jclass class, jlong stream,
+ JNIEnv *env, jclass class, jlong stream,
jarray b, jint off, jint len
) {
+ int rv = 0;
Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0);
if (!buf) {
return;
}
- int rv = dlsym_deflateSetDictionary(ZSTREAM(stream), buf + off, len);
+ rv = dlsym_deflateSetDictionary(ZSTREAM(stream), buf + off, len);
(*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0);
-
+
if (rv != Z_OK) {
// Contingency - Report error by throwing appropriate exceptions
switch (rv) {
case Z_STREAM_ERROR:
- {
+ {
THROW(env, "java/lang/IllegalArgumentException", NULL);
}
break;
@@ -157,75 +238,85 @@ JNIEXPORT jint JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
JNIEnv *env, jobject this
) {
+ jobject clazz = NULL;
+ jobject uncompressed_direct_buf = NULL;
+ jint uncompressed_direct_buf_off = 0;
+ jint uncompressed_direct_buf_len = 0;
+ jobject compressed_direct_buf = NULL;
+ jint compressed_direct_buf_len = 0;
+ jboolean finish;
+ Bytef* uncompressed_bytes = NULL;
+ Bytef* compressed_bytes = NULL;
+ int rv = 0;
+ jint no_compressed_bytes = 0;
// Get members of ZlibCompressor
z_stream *stream = ZSTREAM(
- (*env)->GetLongField(env, this,
+ (*env)->GetLongField(env, this,
ZlibCompressor_stream)
);
if (!stream) {
THROW(env, "java/lang/NullPointerException", NULL);
return (jint)0;
- }
+ }
// Get members of ZlibCompressor
- jobject clazz = (*env)->GetStaticObjectField(env, this,
+ clazz = (*env)->GetStaticObjectField(env, this,
ZlibCompressor_clazz);
- jobject uncompressed_direct_buf = (*env)->GetObjectField(env, this,
+ uncompressed_direct_buf = (*env)->GetObjectField(env, this,
ZlibCompressor_uncompressedDirectBuf);
- jint uncompressed_direct_buf_off = (*env)->GetIntField(env, this,
+ uncompressed_direct_buf_off = (*env)->GetIntField(env, this,
ZlibCompressor_uncompressedDirectBufOff);
- jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
+ uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
ZlibCompressor_uncompressedDirectBufLen);
- jobject compressed_direct_buf = (*env)->GetObjectField(env, this,
+ compressed_direct_buf = (*env)->GetObjectField(env, this,
ZlibCompressor_compressedDirectBuf);
- jint compressed_direct_buf_len = (*env)->GetIntField(env, this,
+ compressed_direct_buf_len = (*env)->GetIntField(env, this,
ZlibCompressor_directBufferSize);
- jboolean finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish);
+ finish = (*env)->GetBooleanField(env, this, ZlibCompressor_finish);
// Get the input direct buffer
LOCK_CLASS(env, clazz, "ZlibCompressor");
- Bytef* uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
+ uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
uncompressed_direct_buf);
UNLOCK_CLASS(env, clazz, "ZlibCompressor");
-
+
if (uncompressed_bytes == 0) {
return (jint)0;
}
-
+
// Get the output direct buffer
LOCK_CLASS(env, clazz, "ZlibCompressor");
- Bytef* compressed_bytes = (*env)->GetDirectBufferAddress(env,
+ compressed_bytes = (*env)->GetDirectBufferAddress(env,
compressed_direct_buf);
UNLOCK_CLASS(env, clazz, "ZlibCompressor");
if (compressed_bytes == 0) {
return (jint)0;
}
-
+
// Re-calibrate the z_stream
stream->next_in = uncompressed_bytes + uncompressed_direct_buf_off;
stream->next_out = compressed_bytes;
stream->avail_in = uncompressed_direct_buf_len;
- stream->avail_out = compressed_direct_buf_len;
-
- // Compress
- int rv = dlsym_deflate(stream, finish ? Z_FINISH : Z_NO_FLUSH);
+ stream->avail_out = compressed_direct_buf_len;
+
+ // Compress
+ rv = dlsym_deflate(stream, finish ? Z_FINISH : Z_NO_FLUSH);
- jint no_compressed_bytes = 0;
switch (rv) {
// Contingency? - Report error by throwing appropriate exceptions
case Z_STREAM_END:
{
(*env)->SetBooleanField(env, this, ZlibCompressor_finished, JNI_TRUE);
} // cascade
- case Z_OK:
+ case Z_OK:
{
uncompressed_direct_buf_off += uncompressed_direct_buf_len - stream->avail_in;
- (*env)->SetIntField(env, this,
+ (*env)->SetIntField(env, this,
ZlibCompressor_uncompressedDirectBufOff, uncompressed_direct_buf_off);
- (*env)->SetIntField(env, this,
+ (*env)->SetIntField(env, this,
ZlibCompressor_uncompressedDirectBufLen, stream->avail_in);
no_compressed_bytes = compressed_direct_buf_len - stream->avail_out;
}
@@ -238,7 +329,7 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibCompressor_deflateBytesDirect(
}
break;
}
-
+
return no_compressed_bytes;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
index 6abe36381f..8b78f41e1a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
@@ -16,12 +16,15 @@
* limitations under the License.
*/
-#include
#include
#include
#include
+#ifdef UNIX
+#include
#include "config.h"
+#endif
+
#include "org_apache_hadoop_io_compress_zlib.h"
#include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
@@ -35,48 +38,88 @@ static jfieldID ZlibDecompressor_directBufferSize;
static jfieldID ZlibDecompressor_needDict;
static jfieldID ZlibDecompressor_finished;
+#ifdef UNIX
static int (*dlsym_inflateInit2_)(z_streamp, int, const char *, int);
static int (*dlsym_inflate)(z_streamp, int);
static int (*dlsym_inflateSetDictionary)(z_streamp, const Bytef *, uInt);
static int (*dlsym_inflateReset)(z_streamp);
static int (*dlsym_inflateEnd)(z_streamp);
+#endif
+
+#ifdef WINDOWS
+#include
+typedef int (__cdecl *__dlsym_inflateInit2_)(z_streamp, int, const char *, int);
+typedef int (__cdecl *__dlsym_inflate)(z_streamp, int);
+typedef int (__cdecl *__dlsym_inflateSetDictionary)(z_streamp, const Bytef *, uInt);
+typedef int (__cdecl *__dlsym_inflateReset)(z_streamp);
+typedef int (__cdecl *__dlsym_inflateEnd)(z_streamp);
+static __dlsym_inflateInit2_ dlsym_inflateInit2_;
+static __dlsym_inflate dlsym_inflate;
+static __dlsym_inflateSetDictionary dlsym_inflateSetDictionary;
+static __dlsym_inflateReset dlsym_inflateReset;
+static __dlsym_inflateEnd dlsym_inflateEnd;
+extern HANDLE LoadZlibTryHadoopNativeDir();
+#endif
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_initIDs(
- JNIEnv *env, jclass class
+JNIEnv *env, jclass class
) {
// Load libz.so
- void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+#ifdef UNIX
+ void *libz = dlopen(HADOOP_ZLIB_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
if (!libz) {
THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load libz.so");
return;
- }
+ }
+#endif
+
+#ifdef WINDOWS
+ HMODULE libz = LoadZlibTryHadoopNativeDir();
+
+ if (!libz) {
+ THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zlib1.dll");
+ return;
+ }
+#endif
+
// Locate the requisite symbols from libz.so
+#ifdef UNIX
dlerror(); // Clear any existing error
LOAD_DYNAMIC_SYMBOL(dlsym_inflateInit2_, env, libz, "inflateInit2_");
LOAD_DYNAMIC_SYMBOL(dlsym_inflate, env, libz, "inflate");
LOAD_DYNAMIC_SYMBOL(dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
LOAD_DYNAMIC_SYMBOL(dlsym_inflateReset, env, libz, "inflateReset");
LOAD_DYNAMIC_SYMBOL(dlsym_inflateEnd, env, libz, "inflateEnd");
+#endif
- // Initialize the requisite fieldIds
- ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
+#ifdef WINDOWS
+ LOAD_DYNAMIC_SYMBOL(__dlsym_inflateInit2_, dlsym_inflateInit2_, env, libz, "inflateInit2_");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_inflate, dlsym_inflate, env, libz, "inflate");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_inflateSetDictionary, dlsym_inflateSetDictionary, env, libz, "inflateSetDictionary");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_inflateReset, dlsym_inflateReset, env, libz, "inflateReset");
+ LOAD_DYNAMIC_SYMBOL(__dlsym_inflateEnd, dlsym_inflateEnd, env, libz, "inflateEnd");
+#endif
+
+
+ // Initialize the requisite fieldIds
+ ZlibDecompressor_clazz = (*env)->GetStaticFieldID(env, class, "clazz",
"Ljava/lang/Class;");
ZlibDecompressor_stream = (*env)->GetFieldID(env, class, "stream", "J");
ZlibDecompressor_needDict = (*env)->GetFieldID(env, class, "needDict", "Z");
ZlibDecompressor_finished = (*env)->GetFieldID(env, class, "finished", "Z");
- ZlibDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
- "compressedDirectBuf",
+ ZlibDecompressor_compressedDirectBuf = (*env)->GetFieldID(env, class,
+ "compressedDirectBuf",
"Ljava/nio/Buffer;");
- ZlibDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class,
+ ZlibDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, class,
"compressedDirectBufOff", "I");
- ZlibDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class,
+ ZlibDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env, class,
"compressedDirectBufLen", "I");
- ZlibDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
- "uncompressedDirectBuf",
+ ZlibDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, class,
+ "uncompressedDirectBuf",
"Ljava/nio/Buffer;");
- ZlibDecompressor_directBufferSize = (*env)->GetFieldID(env, class,
+ ZlibDecompressor_directBufferSize = (*env)->GetFieldID(env, class,
"directBufferSize", "I");
}
@@ -84,21 +127,22 @@ JNIEXPORT jlong JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_init(
JNIEnv *env, jclass cls, jint windowBits
) {
+ int rv = 0;
z_stream *stream = malloc(sizeof(z_stream));
memset((void*)stream, 0, sizeof(z_stream));
if (stream == 0) {
THROW(env, "java/lang/OutOfMemoryError", NULL);
return (jlong)0;
- }
-
- int rv = dlsym_inflateInit2_(stream, windowBits, ZLIB_VERSION, sizeof(z_stream));
+ }
+
+ rv = dlsym_inflateInit2_(stream, windowBits, ZLIB_VERSION, sizeof(z_stream));
if (rv != Z_OK) {
// Contingency - Report error by throwing appropriate exceptions
free(stream);
stream = NULL;
-
+
switch (rv) {
case Z_MEM_ERROR:
{
@@ -112,7 +156,7 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_init(
break;
}
}
-
+
return JLONG(stream);
}
@@ -121,21 +165,22 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_setDictionary(
JNIEnv *env, jclass cls, jlong stream,
jarray b, jint off, jint len
) {
+ int rv = 0;
Bytef *buf = (*env)->GetPrimitiveArrayCritical(env, b, 0);
if (!buf) {
THROW(env, "java/lang/InternalError", NULL);
return;
}
- int rv = dlsym_inflateSetDictionary(ZSTREAM(stream), buf + off, len);
+ rv = dlsym_inflateSetDictionary(ZSTREAM(stream), buf + off, len);
(*env)->ReleasePrimitiveArrayCritical(env, b, buf, 0);
-
+
if (rv != Z_OK) {
// Contingency - Report error by throwing appropriate exceptions
switch (rv) {
case Z_STREAM_ERROR:
case Z_DATA_ERROR:
{
- THROW(env, "java/lang/IllegalArgumentException",
+ THROW(env, "java/lang/IllegalArgumentException",
(ZSTREAM(stream))->msg);
}
break;
@@ -152,62 +197,71 @@ JNIEXPORT jint JNICALL
Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
JNIEnv *env, jobject this
) {
+ jobject clazz = NULL;
+ jarray compressed_direct_buf = NULL;
+ jint compressed_direct_buf_off = 0;
+ jint compressed_direct_buf_len = 0;
+ jarray uncompressed_direct_buf = NULL;
+ jint uncompressed_direct_buf_len = 0;
+ Bytef *compressed_bytes = NULL;
+ Bytef *uncompressed_bytes = NULL;
+ int rv = 0;
+ int no_decompressed_bytes = 0;
// Get members of ZlibDecompressor
z_stream *stream = ZSTREAM(
- (*env)->GetLongField(env, this,
+ (*env)->GetLongField(env, this,
ZlibDecompressor_stream)
);
if (!stream) {
THROW(env, "java/lang/NullPointerException", NULL);
return (jint)0;
- }
+ }
// Get members of ZlibDecompressor
- jobject clazz = (*env)->GetStaticObjectField(env, this,
+ clazz = (*env)->GetStaticObjectField(env, this,
ZlibDecompressor_clazz);
- jarray compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
+ compressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
ZlibDecompressor_compressedDirectBuf);
- jint compressed_direct_buf_off = (*env)->GetIntField(env, this,
+ compressed_direct_buf_off = (*env)->GetIntField(env, this,
ZlibDecompressor_compressedDirectBufOff);
- jint compressed_direct_buf_len = (*env)->GetIntField(env, this,
+ compressed_direct_buf_len = (*env)->GetIntField(env, this,
ZlibDecompressor_compressedDirectBufLen);
- jarray uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
+ uncompressed_direct_buf = (jarray)(*env)->GetObjectField(env, this,
ZlibDecompressor_uncompressedDirectBuf);
- jint uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
+ uncompressed_direct_buf_len = (*env)->GetIntField(env, this,
ZlibDecompressor_directBufferSize);
// Get the input direct buffer
LOCK_CLASS(env, clazz, "ZlibDecompressor");
- Bytef *compressed_bytes = (*env)->GetDirectBufferAddress(env,
+ compressed_bytes = (*env)->GetDirectBufferAddress(env,
compressed_direct_buf);
UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
-
+
if (!compressed_bytes) {
return (jint)0;
}
-
+
// Get the output direct buffer
LOCK_CLASS(env, clazz, "ZlibDecompressor");
- Bytef *uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
+ uncompressed_bytes = (*env)->GetDirectBufferAddress(env,
uncompressed_direct_buf);
UNLOCK_CLASS(env, clazz, "ZlibDecompressor");
if (!uncompressed_bytes) {
return (jint)0;
}
-
+
// Re-calibrate the z_stream
stream->next_in = compressed_bytes + compressed_direct_buf_off;
stream->next_out = uncompressed_bytes;
stream->avail_in = compressed_direct_buf_len;
stream->avail_out = uncompressed_direct_buf_len;
-
+
// Decompress
- int rv = dlsym_inflate(stream, Z_PARTIAL_FLUSH);
+ rv = dlsym_inflate(stream, Z_PARTIAL_FLUSH);
// Contingency? - Report error by throwing appropriate exceptions
- int no_decompressed_bytes = 0;
switch (rv) {
case Z_STREAM_END:
{
@@ -216,9 +270,9 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
case Z_OK:
{
compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in;
- (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
+ (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
compressed_direct_buf_off);
- (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
+ (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
stream->avail_in);
no_decompressed_bytes = uncompressed_direct_buf_len - stream->avail_out;
}
@@ -227,9 +281,9 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
{
(*env)->SetBooleanField(env, this, ZlibDecompressor_needDict, JNI_TRUE);
compressed_direct_buf_off += compressed_direct_buf_len - stream->avail_in;
- (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
+ (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufOff,
compressed_direct_buf_off);
- (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
+ (*env)->SetIntField(env, this, ZlibDecompressor_compressedDirectBufLen,
stream->avail_in);
}
break;
@@ -251,7 +305,7 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_inflateBytesDirect(
}
break;
}
-
+
return no_decompressed_bytes;
}
@@ -299,4 +353,3 @@ Java_org_apache_hadoop_io_compress_zlib_ZlibDecompressor_end(
/**
* vim: sw=2: ts=2: et:
*/
-
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
index c53aa531c9..467e17921b 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
@@ -19,14 +19,23 @@
#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
#define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
+#include
+#include
+#include
+#include
#include
#include
-#include
-#include
-#include
+#endif
-#include "config.h"
-#include "org_apache_hadoop.h"
+#ifdef WINDOWS
+#include
+#define HADOOP_ZLIB_LIBRARY L"zlib1.dll"
+#include
+#include
+#endif
/* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */
#define ZSTREAM(stream) ((z_stream*)((ptrdiff_t)(stream)))
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index be957b447a..47f8dc1c9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -18,6 +18,10 @@
#define _GNU_SOURCE
+#include "org_apache_hadoop.h"
+#include "org_apache_hadoop_io_nativeio_NativeIO.h"
+
+#ifdef UNIX
#include
#include
#include
@@ -31,14 +35,19 @@
#include
#include
#include
-
#include "config.h"
-#include "org_apache_hadoop.h"
-#include "org_apache_hadoop_io_nativeio_NativeIO.h"
+#endif
+
+#ifdef WINDOWS
+#include
+#include
+#include "winutils.h"
+#endif
+
#include "file_descriptor.h"
#include "errno_enum.h"
-// the NativeIO$Stat inner class and its constructor
+// the NativeIO$POSIX$Stat inner class and its constructor
static jclass stat_clazz;
static jmethodID stat_ctor;
@@ -53,26 +62,32 @@ static jobject pw_lock_object;
// Internal functions
static void throw_ioe(JNIEnv* env, int errnum);
+#ifdef UNIX
static ssize_t get_pw_buflen();
+#endif
/**
* Returns non-zero if the user has specified that the system
* has non-threadsafe implementations of getpwuid_r or getgrgid_r.
**/
static int workaround_non_threadsafe_calls(JNIEnv *env, jclass clazz) {
- jfieldID needs_workaround_field = (*env)->GetStaticFieldID(env, clazz,
- "workaroundNonThreadSafePasswdCalls", "Z");
+ jboolean result;
+ jfieldID needs_workaround_field = (*env)->GetStaticFieldID(
+ env, clazz,
+ "workaroundNonThreadSafePasswdCalls",
+ "Z");
PASS_EXCEPTIONS_RET(env, 0);
assert(needs_workaround_field);
- jboolean result = (*env)->GetStaticBooleanField(
+ result = (*env)->GetStaticBooleanField(
env, clazz, needs_workaround_field);
return result;
}
+#ifdef UNIX
static void stat_init(JNIEnv *env, jclass nativeio_class) {
// Init Stat
- jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$Stat");
+ jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat");
if (!clazz) {
return; // exception has been raised
}
@@ -85,6 +100,7 @@ static void stat_init(JNIEnv *env, jclass nativeio_class) {
if (!stat_ctor) {
return; // exception has been raised
}
+
jclass obj_class = (*env)->FindClass(env, "java/lang/Object");
if (!obj_class) {
return; // exception has been raised
@@ -99,6 +115,7 @@ static void stat_init(JNIEnv *env, jclass nativeio_class) {
pw_lock_object = (*env)->NewObject(env, obj_class, obj_ctor);
PASS_EXCEPTIONS(env);
pw_lock_object = (*env)->NewGlobalRef(env, pw_lock_object);
+
PASS_EXCEPTIONS(env);
}
}
@@ -113,6 +130,7 @@ static void stat_deinit(JNIEnv *env) {
pw_lock_object = NULL;
}
}
+#endif
static void nioe_init(JNIEnv *env) {
// Init NativeIOException
@@ -121,8 +139,15 @@ static void nioe_init(JNIEnv *env) {
PASS_EXCEPTIONS(env);
nioe_clazz = (*env)->NewGlobalRef(env, nioe_clazz);
+#ifdef UNIX
nioe_ctor = (*env)->GetMethodID(env, nioe_clazz, "",
"(Ljava/lang/String;Lorg/apache/hadoop/io/nativeio/Errno;)V");
+#endif
+
+#ifdef WINDOWS
+ nioe_ctor = (*env)->GetMethodID(env, nioe_clazz, "",
+ "(Ljava/lang/String;I)V");
+#endif
}
static void nioe_deinit(JNIEnv *env) {
@@ -143,32 +168,46 @@ static void nioe_deinit(JNIEnv *env) {
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_initNative(
JNIEnv *env, jclass clazz) {
-
+#ifdef UNIX
stat_init(env, clazz);
PASS_EXCEPTIONS_GOTO(env, error);
+#endif
nioe_init(env);
PASS_EXCEPTIONS_GOTO(env, error);
fd_init(env);
PASS_EXCEPTIONS_GOTO(env, error);
+#ifdef UNIX
errno_enum_init(env);
PASS_EXCEPTIONS_GOTO(env, error);
+#endif
return;
error:
// these are all idempodent and safe to call even if the
// class wasn't initted yet
+#ifdef UNIX
stat_deinit(env);
+#endif
nioe_deinit(env);
fd_deinit(env);
+#ifdef UNIX
errno_enum_deinit(env);
+#endif
}
/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method: fstat
+ * Signature: (Ljava/io/FileDescriptor;)Lorg/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat;
* public static native Stat fstat(FileDescriptor fd);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
JNIEXPORT jobject JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_fstat(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_fstat(
JNIEnv *env, jclass clazz, jobject fd_object)
{
+#ifdef UNIX
jobject ret = NULL;
int fd = fd_get(env, fd_object);
@@ -187,14 +226,26 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_fstat(
cleanup:
return ret;
+#endif
+
+#ifdef WINDOWS
+ THROW(env, "java/io/IOException",
+ "The function POSIX.fstat() is not supported on Windows");
+ return NULL;
+#endif
}
+
+
/**
* public static native void posix_fadvise(
* FileDescriptor fd, long offset, long len, int flags);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_posix_1fadvise(
JNIEnv *env, jclass clazz,
jobject fd_object, jlong offset, jlong len, jint flags)
{
@@ -240,9 +291,12 @@ static int manual_sync_file_range (int fd, __off64_t from, __off64_t to, unsigne
/**
* public static native void sync_file_range(
* FileDescriptor fd, long offset, long len, int flags);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_sync_1file_1range(
JNIEnv *env, jclass clazz,
jobject fd_object, jlong offset, jlong len, jint flags)
{
@@ -284,13 +338,20 @@ static int toFreeBSDFlags(int flags)
#endif
/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method: open
+ * Signature: (Ljava/lang/String;II)Ljava/io/FileDescriptor;
* public static native FileDescriptor open(String path, int flags, int mode);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
JNIEXPORT jobject JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_open(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_open(
JNIEnv *env, jclass clazz, jstring j_path,
jint flags, jint mode)
{
+#ifdef UNIX
#ifdef __FreeBSD__
flags = toFreeBSDFlags(flags);
#endif
@@ -318,16 +379,90 @@ cleanup:
(*env)->ReleaseStringUTFChars(env, j_path, path);
}
return ret;
+#endif
+
+#ifdef WINDOWS
+ THROW(env, "java/io/IOException",
+ "The function POSIX.open() is not supported on Windows");
+ return NULL;
+#endif
}
-/**
- * public static native void chmod(String path, int mode) throws IOException;
+/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method: createFile
+ * Signature: (Ljava/lang/String;JJJ)Ljava/io/FileDescriptor;
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
-JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
- JNIEnv *env, jclass clazz, jstring j_path,
- jint mode)
+JNIEXPORT jobject JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_createFile
+ (JNIEnv *env, jclass clazz, jstring j_path,
+ jlong desiredAccess, jlong shareMode, jlong creationDisposition)
{
+#ifdef UNIX
+ THROW(env, "java/io/IOException",
+ "The function Windows.createFile() is not supported on Unix");
+ return NULL;
+#endif
+
+#ifdef WINDOWS
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ BOOL isSymlink = FALSE;
+ BOOL isJunction = FALSE;
+ DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS;
+ jobject ret = (jobject) NULL;
+ HANDLE hFile = INVALID_HANDLE_VALUE;
+ WCHAR *path = (WCHAR *) (*env)->GetStringChars(env, j_path, (jboolean*)NULL);
+ if (path == NULL) goto cleanup;
+
+ // Set the flag for a symbolic link or a junctions point only when it exists.
+ // According to MSDN if the call to CreateFile() function creates a file,
+ // there is no change in behavior. So we do not throw if no file is found.
+ //
+ dwRtnCode = SymbolicLinkCheck(path, &isSymlink);
+ if (dwRtnCode != ERROR_SUCCESS && dwRtnCode != ERROR_FILE_NOT_FOUND) {
+ throw_ioe(env, dwRtnCode);
+ goto cleanup;
+ }
+ dwRtnCode = JunctionPointCheck(path, &isJunction);
+ if (dwRtnCode != ERROR_SUCCESS && dwRtnCode != ERROR_FILE_NOT_FOUND) {
+ throw_ioe(env, dwRtnCode);
+ goto cleanup;
+ }
+ if (isSymlink || isJunction)
+ dwFlagsAndAttributes |= FILE_FLAG_OPEN_REPARSE_POINT;
+
+ hFile = CreateFile(path,
+ (DWORD) desiredAccess,
+ (DWORD) shareMode,
+ (LPSECURITY_ATTRIBUTES ) NULL,
+ (DWORD) creationDisposition,
+ dwFlagsAndAttributes,
+ NULL);
+ if (hFile == INVALID_HANDLE_VALUE) {
+ throw_ioe(env, GetLastError());
+ goto cleanup;
+ }
+
+ ret = fd_create(env, (long) hFile);
+cleanup:
+ if (path != NULL) {
+ (*env)->ReleaseStringChars(env, j_path, (const jchar*)path);
+ }
+ return (jobject) ret;
+#endif
+}
+
+/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method: chmod
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_chmodImpl
+ (JNIEnv *env, jclass clazz, jstring j_path, jint mode)
+{
+#ifdef UNIX
const char *path = (*env)->GetStringUTFChars(env, j_path, NULL);
if (path == NULL) return; // JVM throws Exception for us
@@ -336,15 +471,30 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
}
(*env)->ReleaseStringUTFChars(env, j_path, path);
+#endif
+
+#ifdef WINDOWS
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ LPCWSTR path = (LPCWSTR) (*env)->GetStringChars(env, j_path, NULL);
+ if (path == NULL) return; // JVM throws Exception for us
+
+ if ((dwRtnCode = ChangeFileModeByMask((LPCWSTR) path, mode)) != ERROR_SUCCESS)
+ {
+ throw_ioe(env, dwRtnCode);
+ }
+
+ (*env)->ReleaseStringChars(env, j_path, (const jchar*) path);
+#endif
}
/*
* static native String getUserName(int uid);
*/
JNIEXPORT jstring JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_getUserName(JNIEnv *env,
-jclass clazz, jint uid)
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getUserName(
+ JNIEnv *env, jclass clazz, jint uid)
{
+#ifdef UNIX
int pw_lock_locked = 0;
if (pw_lock_object != NULL) {
if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
@@ -396,15 +546,26 @@ cleanup:
}
if (pw_buf != NULL) free(pw_buf);
return jstr_username;
+#endif // UNIX
+
+#ifdef WINDOWS
+ THROW(env, "java/io/IOException",
+ "The function POSIX.getUserName() is not supported on Windows");
+ return NULL;
+#endif
}
/*
* static native String getGroupName(int gid);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
*/
JNIEXPORT jstring JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_getGroupName(JNIEnv *env,
-jclass clazz, jint gid)
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getGroupName(
+ JNIEnv *env, jclass clazz, jint gid)
{
+#ifdef UNIX
int pw_lock_locked = 0;
if (pw_lock_object != NULL) {
@@ -458,14 +619,21 @@ cleanup:
}
if (pw_buf != NULL) free(pw_buf);
return jstr_groupname;
-}
+#endif // UNIX
+#ifdef WINDOWS
+ THROW(env, "java/io/IOException",
+ "The function POSIX.getUserName() is not supported on Windows");
+ return NULL;
+#endif
+}
/*
* Throw a java.IO.IOException, generating the message from errno.
*/
static void throw_ioe(JNIEnv* env, int errnum)
{
+#ifdef UNIX
char message[80];
jstring jstr_message;
@@ -490,9 +658,51 @@ static void throw_ioe(JNIEnv* env, int errnum)
err:
if (jstr_message != NULL)
(*env)->ReleaseStringUTFChars(env, jstr_message, message);
+#endif
+
+#ifdef WINDOWS
+ DWORD len = 0;
+ LPWSTR buffer = NULL;
+ const jchar* message = NULL;
+ jstring jstr_message = NULL;
+ jthrowable obj = NULL;
+
+ len = FormatMessageW(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, *(DWORD*) (&errnum), // reinterpret cast
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPWSTR) &buffer, 0, NULL);
+
+ if (len > 0)
+ {
+ message = (const jchar*) buffer;
+ }
+ else
+ {
+ message = (const jchar*) L"Unknown error.";
+ }
+
+ if ((jstr_message = (*env)->NewString(env, message, len)) == NULL)
+ goto err;
+ LocalFree(buffer);
+ buffer = NULL; // Set buffer to NULL to avoid double free
+
+ obj = (jthrowable)(*env)->NewObject(env, nioe_clazz, nioe_ctor,
+ jstr_message, errnum);
+ if (obj == NULL) goto err;
+
+ (*env)->Throw(env, obj);
+ return;
+
+err:
+ if (jstr_message != NULL)
+ (*env)->ReleaseStringChars(env, jstr_message, message);
+ LocalFree(buffer);
+ return;
+#endif
}
-
+#ifdef UNIX
/*
* Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r
*/
@@ -503,6 +713,104 @@ ssize_t get_pw_buflen() {
#endif
return (ret > 512) ? ret : 512;
}
+#endif
+
+
+/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method: getOwnerOnWindows
+ * Signature: (Ljava/io/FileDescriptor;)Ljava/lang/String;
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
+ */
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_getOwner
+ (JNIEnv *env, jclass clazz, jobject fd_object)
+{
+#ifdef UNIX
+ THROW(env, "java/io/IOException",
+ "The function Windows.getOwner() is not supported on Unix");
+ return NULL;
+#endif
+
+#ifdef WINDOWS
+ PSID pSidOwner = NULL;
+ PSECURITY_DESCRIPTOR pSD = NULL;
+ LPWSTR ownerName = (LPWSTR)NULL;
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ jstring jstr_username = NULL;
+ HANDLE hFile = (HANDLE) fd_get(env, fd_object);
+ PASS_EXCEPTIONS_GOTO(env, cleanup);
+
+ dwRtnCode = GetSecurityInfo(
+ hFile,
+ SE_FILE_OBJECT,
+ OWNER_SECURITY_INFORMATION,
+ &pSidOwner,
+ NULL,
+ NULL,
+ NULL,
+ &pSD);
+ if (dwRtnCode != ERROR_SUCCESS) {
+ throw_ioe(env, dwRtnCode);
+ goto cleanup;
+ }
+
+ dwRtnCode = GetAccntNameFromSid(pSidOwner, &ownerName);
+ if (dwRtnCode != ERROR_SUCCESS) {
+ throw_ioe(env, dwRtnCode);
+ goto cleanup;
+ }
+
+ jstr_username = (*env)->NewString(env, ownerName, (jsize) wcslen(ownerName));
+ if (jstr_username == NULL) goto cleanup;
+
+cleanup:
+ LocalFree(ownerName);
+ LocalFree(pSD);
+ return jstr_username;
+#endif
+}
+
+/*
+ * Class: org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method: setFilePointer
+ * Signature: (Ljava/io/FileDescriptor;JJ)J
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
+ */
+JNIEXPORT jlong JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_setFilePointer
+ (JNIEnv *env, jclass clazz, jobject fd_object, jlong distanceToMove, jlong moveMethod)
+{
+#ifdef UNIX
+ THROW(env, "java/io/IOException",
+ "The function setFilePointer(FileDescriptor) is not supported on Unix");
+ return NULL;
+#endif
+
+#ifdef WINDOWS
+ DWORD distanceToMoveLow = (DWORD) distanceToMove;
+ LONG distanceToMoveHigh = (LONG) (distanceToMove >> 32);
+ DWORD distanceMovedLow = 0;
+ HANDLE hFile = (HANDLE) fd_get(env, fd_object);
+ PASS_EXCEPTIONS_GOTO(env, cleanup);
+
+ distanceMovedLow = SetFilePointer(hFile,
+ distanceToMoveLow, &distanceToMoveHigh, (DWORD) moveMethod);
+
+ if (distanceMovedLow == INVALID_SET_FILE_POINTER) {
+ throw_ioe(env, GetLastError());
+ return -1;
+ }
+
+cleanup:
+
+ return ((jlong) distanceToMoveHigh << 32) | (jlong) distanceMovedLow;
+#endif
+}
JNIEXPORT void JNICALL
Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env,
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c
index f2c5509d57..17a3b1e7c8 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c
@@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
+
#include
#include "file_descriptor.h"
#include "org_apache_hadoop.h"
@@ -26,6 +26,10 @@ static jfieldID fd_descriptor;
// the no-argument constructor
static jmethodID fd_constructor;
+#ifdef WINDOWS
+// the internal field for the long handle
+static jfieldID fd_handle;
+#endif
void fd_init(JNIEnv* env)
{
@@ -37,6 +41,12 @@ void fd_init(JNIEnv* env)
fd_descriptor = (*env)->GetFieldID(env, fd_class, "fd", "I");
PASS_EXCEPTIONS(env);
+
+#ifdef WINDOWS
+ fd_handle = (*env)->GetFieldID(env, fd_class, "handle", "J");
+ PASS_EXCEPTIONS(env);
+#endif
+
fd_constructor = (*env)->GetMethodID(env, fd_class, "", "()V");
}
@@ -46,9 +56,13 @@ void fd_deinit(JNIEnv *env) {
fd_class = NULL;
}
fd_descriptor = NULL;
+#ifdef WINDOWS
+ fd_handle = NULL;
+#endif
fd_constructor = NULL;
}
+#ifdef UNIX
/*
* Given an instance 'obj' of java.io.FileDescriptor, return the
* underlying fd, or throw if unavailable
@@ -71,4 +85,31 @@ jobject fd_create(JNIEnv *env, int fd) {
(*env)->SetIntField(env, obj, fd_descriptor, fd);
return obj;
-}
+}
+#endif
+
+#ifdef WINDOWS
+/*
+ * Given an instance 'obj' of java.io.FileDescriptor, return the
+ * underlying fd, or throw if unavailable
+ */
+long fd_get(JNIEnv* env, jobject obj) {
+ if (obj == NULL) {
+ THROW(env, "java/lang/NullPointerException",
+ "FileDescriptor object is null");
+ return -1;
+ }
+ return (long) (*env)->GetLongField(env, obj, fd_handle);
+}
+
+/*
+ * Create a FileDescriptor object corresponding to the given int fd
+ */
+jobject fd_create(JNIEnv *env, long fd) {
+ jobject obj = (*env)->NewObject(env, fd_class, fd_constructor);
+ PASS_EXCEPTIONS_RET(env, (jobject) NULL);
+
+ (*env)->SetLongField(env, obj, fd_handle, fd);
+ return obj;
+}
+#endif
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h
index 3f689493bc..38fc09f652 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h
@@ -18,11 +18,19 @@
#define FILE_DESCRIPTOR_H
#include
+#include "org_apache_hadoop.h"
void fd_init(JNIEnv *env);
void fd_deinit(JNIEnv *env);
+#ifdef UNIX
int fd_get(JNIEnv* env, jobject obj);
jobject fd_create(JNIEnv *env, int fd);
+#endif
+
+#ifdef WINDOWS
+long fd_get(JNIEnv* env, jobject obj);
+jobject fd_create(JNIEnv *env, long fd);
+#endif
#endif
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMappingWin.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMappingWin.c
new file mode 100644
index 0000000000..64d0fca6a9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsMappingWin.c
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include
+#include "org_apache_hadoop.h"
+#include "org_apache_hadoop_security_JniBasedUnixGroupsMapping.h"
+
+#include
+#include
+#include "winutils.h"
+
+static jobjectArray emptyGroups = NULL;
+
+/*
+ * Throw a java.IO.IOException, generating the message from errno.
+ */
+static void throw_ioexception(JNIEnv* env, DWORD errnum)
+{
+ DWORD len = 0;
+ LPSTR buffer = NULL;
+ const char* message = NULL;
+
+ len = FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, *(DWORD*) (&errnum), // reinterpret cast
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR*)&buffer, 0, NULL);
+
+ if (len > 0)
+ {
+ message = buffer;
+ }
+ else
+ {
+ message = "Unknown error.";
+ }
+
+ THROW(env, "java/io/IOException", message);
+
+ LocalFree(buffer);
+
+ return;
+}
+
+JNIEXPORT jobjectArray JNICALL
+Java_org_apache_hadoop_security_JniBasedUnixGroupsMapping_getGroupForUser
+(JNIEnv *env, jobject jobj, jstring juser) {
+ const WCHAR *user = NULL;
+ jobjectArray jgroups = NULL;
+ DWORD dwRtnCode = ERROR_SUCCESS;
+
+ LPLOCALGROUP_USERS_INFO_0 groups = NULL;
+ LPLOCALGROUP_USERS_INFO_0 tmpGroups = NULL;
+ DWORD ngroups = 0;
+
+ int i;
+
+ if (emptyGroups == NULL) {
+ jobjectArray lEmptyGroups = (jobjectArray)(*env)->NewObjectArray(env, 0,
+ (*env)->FindClass(env, "java/lang/String"), NULL);
+ if (lEmptyGroups == NULL) {
+ goto cleanup;
+ }
+ emptyGroups = (*env)->NewGlobalRef(env, lEmptyGroups);
+ if (emptyGroups == NULL) {
+ goto cleanup;
+ }
+ }
+ user = (*env)->GetStringChars(env, juser, NULL);
+ if (user == NULL) {
+ THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for user buffer");
+ goto cleanup;
+ }
+
+ dwRtnCode = GetLocalGroupsForUser(user, &groups, &ngroups);
+ if (dwRtnCode != ERROR_SUCCESS) {
+ throw_ioexception(env, dwRtnCode);
+ goto cleanup;
+ }
+
+ jgroups = (jobjectArray)(*env)->NewObjectArray(env, ngroups,
+ (*env)->FindClass(env, "java/lang/String"), NULL);
+ if (jgroups == NULL) {
+ THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for group buffer");
+ goto cleanup;
+ }
+
+ // use a tmp pointer to iterate over groups and keep the original pointer
+ // for memory deallocation
+ tmpGroups = groups;
+
+ // fill the output string array
+ for (i = 0; i < ngroups; i++) {
+ jsize groupStringLen = (jsize)wcslen(tmpGroups->lgrui0_name);
+ jstring jgrp = (*env)->NewString(env, tmpGroups->lgrui0_name, groupStringLen);
+ if (jgrp == NULL) {
+ THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for groups buffer");
+ goto cleanup;
+ }
+ (*env)->SetObjectArrayElement(env, jgroups, i, jgrp);
+ // move on to the next group
+ tmpGroups++;
+ }
+
+cleanup:
+ if (groups != NULL) NetApiBufferFree(groups);
+
+ if (user != NULL) {
+ (*env)->ReleaseStringChars(env, juser, user);
+ }
+
+ if (dwRtnCode == ERROR_SUCCESS) {
+ return jgroups;
+ } else {
+ return emptyGroups;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
index 4edb151630..738129e24b 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
@@ -16,7 +16,11 @@
* limitations under the License.
*/
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
#include "config.h"
+#endif // UNIX
#include
@@ -28,4 +32,4 @@ JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSup
#else
return JNI_FALSE;
#endif
-}
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
index 9934d4ff51..cba25fa304 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
@@ -16,18 +16,22 @@
* limitations under the License.
*/
-#include
+#include "org_apache_hadoop.h"
+#include "org_apache_hadoop_util_NativeCrc32.h"
+
#include
-#include
#include
#include
#include
-#include
+#ifdef UNIX
+#include
+#include
+#include
#include "config.h"
-#include "org_apache_hadoop.h"
-#include "org_apache_hadoop_util_NativeCrc32.h"
#include "gcc_optimizations.h"
+#endif // UNIX
+
#include "bulk_crc32.h"
static void throw_checksum_exception(JNIEnv *env,
@@ -36,6 +40,9 @@ static void throw_checksum_exception(JNIEnv *env,
char message[1024];
jstring jstr_message;
char *filename;
+ jclass checksum_exception_clazz;
+ jmethodID checksum_exception_ctor;
+ jthrowable obj;
// Get filename as C string, or "null" if not provided
if (j_filename == NULL) {
@@ -50,28 +57,38 @@ static void throw_checksum_exception(JNIEnv *env,
}
// Format error message
+#ifdef WINDOWS
+ _snprintf_s(
+ message,
+ sizeof(message),
+ _TRUNCATE,
+ "Checksum error: %s at %I64d exp: %d got: %d",
+ filename, pos, expected_crc, got_crc);
+#else
snprintf(message, sizeof(message),
"Checksum error: %s at %"PRId64" exp: %"PRId32" got: %"PRId32,
filename, pos, expected_crc, got_crc);
+#endif // WINDOWS
+
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
goto cleanup;
}
// Throw exception
- jclass checksum_exception_clazz = (*env)->FindClass(
+ checksum_exception_clazz = (*env)->FindClass(
env, "org/apache/hadoop/fs/ChecksumException");
if (checksum_exception_clazz == NULL) {
goto cleanup;
}
- jmethodID checksum_exception_ctor = (*env)->GetMethodID(env,
+ checksum_exception_ctor = (*env)->GetMethodID(env,
checksum_exception_clazz, "",
"(Ljava/lang/String;J)V");
if (checksum_exception_ctor == NULL) {
goto cleanup;
}
- jthrowable obj = (jthrowable)(*env)->NewObject(env, checksum_exception_clazz,
+ obj = (jthrowable)(*env)->NewObject(env, checksum_exception_clazz,
checksum_exception_ctor, jstr_message, pos);
if (obj == NULL) goto cleanup;
@@ -103,6 +120,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunk
jobject j_data, jint data_offset, jint data_len,
jstring j_filename, jlong base_pos)
{
+ uint8_t *sums_addr;
+ uint8_t *data_addr;
+ uint32_t *sums;
+ uint8_t *data;
+ int crc_type;
+ crc32_error_t error_data;
+ int ret;
+
if (unlikely(!j_sums || !j_data)) {
THROW(env, "java/lang/NullPointerException",
"input ByteBuffers must not be null");
@@ -110,8 +135,8 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunk
}
// Convert direct byte buffers to C pointers
- uint8_t *sums_addr = (*env)->GetDirectBufferAddress(env, j_sums);
- uint8_t *data_addr = (*env)->GetDirectBufferAddress(env, j_data);
+ sums_addr = (*env)->GetDirectBufferAddress(env, j_sums);
+ data_addr = (*env)->GetDirectBufferAddress(env, j_data);
if (unlikely(!sums_addr || !data_addr)) {
THROW(env, "java/lang/IllegalArgumentException",
@@ -129,16 +154,15 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunk
return;
}
- uint32_t *sums = (uint32_t *)(sums_addr + sums_offset);
- uint8_t *data = data_addr + data_offset;
+ sums = (uint32_t *)(sums_addr + sums_offset);
+ data = data_addr + data_offset;
// Convert to correct internal C constant for CRC type
- int crc_type = convert_java_crc_type(env, j_crc_type);
+ crc_type = convert_java_crc_type(env, j_crc_type);
if (crc_type == -1) return; // exception already thrown
// Setup complete. Actually verify checksums.
- crc32_error_t error_data;
- int ret = bulk_verify_crc(data, data_len, sums, crc_type,
+ ret = bulk_verify_crc(data, data_len, sums, crc_type,
bytes_per_checksum, &error_data);
if (likely(ret == CHECKSUMS_VALID)) {
return;
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
index 74f79dd35d..3e76b72155 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
@@ -21,25 +21,31 @@
* All rights reserved. Use of this source code is governed by a
* BSD-style license that can be found in the LICENSE file.
*/
+
+#include "org_apache_hadoop.h"
+
#include
-#include
#include
#include
+
+#ifdef UNIX
+#include
#include
+#endif // UNIX
#include "crc32_zlib_polynomial_tables.h"
#include "crc32c_tables.h"
#include "bulk_crc32.h"
#include "gcc_optimizations.h"
-#ifndef __FreeBSD__
+#if (!defined(__FreeBSD__) && !defined(WINDOWS))
#define USE_PIPELINED
#endif
#define CRC_INITIAL_VAL 0xffffffff
typedef uint32_t (*crc_update_func_t)(uint32_t, const uint8_t *, size_t);
-static inline uint32_t crc_val(uint32_t crc);
+static uint32_t crc_val(uint32_t crc);
static uint32_t crc32_zlib_sb8(uint32_t crc, const uint8_t *buf, size_t length);
static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
@@ -187,7 +193,7 @@ return_crc_error:
/**
* Extract the final result of a CRC
*/
-static inline uint32_t crc_val(uint32_t crc) {
+uint32_t crc_val(uint32_t crc) {
return ~crc;
}
@@ -200,11 +206,13 @@ static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
uint32_t end_bytes = length - running_length;
int li;
for (li=0; li < running_length/8; li++) {
+ uint32_t term1;
+ uint32_t term2;
crc ^= *(uint32_t *)buf;
buf += 4;
- uint32_t term1 = CRC32C_T8_7[crc & 0x000000FF] ^
+ term1 = CRC32C_T8_7[crc & 0x000000FF] ^
CRC32C_T8_6[(crc >> 8) & 0x000000FF];
- uint32_t term2 = crc >> 16;
+ term2 = crc >> 16;
crc = term1 ^
CRC32C_T8_5[term2 & 0x000000FF] ^
CRC32C_T8_4[(term2 >> 8) & 0x000000FF];
@@ -234,11 +242,13 @@ static uint32_t crc32_zlib_sb8(
uint32_t end_bytes = length - running_length;
int li;
for (li=0; li < running_length/8; li++) {
+ uint32_t term1;
+ uint32_t term2;
crc ^= *(uint32_t *)buf;
buf += 4;
- uint32_t term1 = CRC32_T8_7[crc & 0x000000FF] ^
+ term1 = CRC32_T8_7[crc & 0x000000FF] ^
CRC32_T8_6[(crc >> 8) & 0x000000FF];
- uint32_t term2 = crc >> 16;
+ term2 = crc >> 16;
crc = term1 ^
CRC32_T8_5[term2 & 0x000000FF] ^
CRC32_T8_4[(term2 >> 8) & 0x000000FF];
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h
index 44cf52eaec..fce5358d64 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h
@@ -19,7 +19,10 @@
#define BULK_CRC32_H_INCLUDED
#include
+
+#ifdef UNIX
#include /* for size_t */
+#endif // UNIX
// Constants for different CRC algorithms
#define CRC32C_POLYNOMIAL 1
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
index a50c41dbbb..bc353e15cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
@@ -17,19 +17,22 @@
*/
/**
- * This file includes some common utilities
+ * This file includes some common utilities
* for all native code used in hadoop.
*/
-
+
#if !defined ORG_APACHE_HADOOP_H
#define ORG_APACHE_HADOOP_H
-#include
-#include
+#if defined(_WIN32)
+#undef UNIX
+#define WINDOWS
+#else
+#undef WINDOWS
+#define UNIX
+#endif
-#include "config.h"
-
-/* A helper macro to 'throw' a java exception. */
+/* A helper macro to 'throw' a java exception. */
#define THROW(env, exception_name, message) \
{ \
jclass ecls = (*env)->FindClass(env, exception_name); \
@@ -55,13 +58,21 @@
if ((*env)->ExceptionCheck(env)) return (ret); \
}
-/**
- * A helper function to dlsym a 'symbol' from a given library-handle.
- *
+/**
+ * Unix definitions
+ */
+#ifdef UNIX
+#include
+#include
+#include
+
+/**
+ * A helper function to dlsym a 'symbol' from a given library-handle.
+ *
* @param env jni handle to report contingencies.
* @param handle handle to the dlopen'ed library.
* @param symbol symbol to load.
- * @return returns the address where the symbol is loaded in memory,
+ * @return returns the address where the symbol is loaded in memory,
* NULL on error.
*/
static __attribute__ ((unused))
@@ -84,6 +95,76 @@ void *do_dlsym(JNIEnv *env, void *handle, const char *symbol) {
if ((func_ptr = do_dlsym(env, handle, symbol)) == NULL) { \
return; \
}
+#endif
+// Unix part end
+
+
+/**
+ * Windows definitions
+ */
+#ifdef WINDOWS
+
+/* Force using Unicode throughout the code */
+#ifndef UNICODE
+#define UNICODE
+#endif
+
+/* Microsoft C Compiler does not support the C99 inline keyword */
+#ifndef __cplusplus
+#define inline __inline;
+#endif // _cplusplus
+
+/* Optimization macros supported by GCC but for which there is no
+ direct equivalent in the Microsoft C compiler */
+#define likely(_c) (_c)
+#define unlikely(_c) (_c)
+
+/* Disable certain warnings in the native CRC32 code. */
+#pragma warning(disable:4018) // Signed/unsigned mismatch.
+#pragma warning(disable:4244) // Possible loss of data in conversion.
+#pragma warning(disable:4267) // Possible loss of data.
+#pragma warning(disable:4996) // Use of deprecated function.
+
+#include
+#include
+#include
+
+#define snprintf(a, b ,c, d) _snprintf_s((a), (b), _TRUNCATE, (c), (d))
+
+/* A helper macro to dlsym the requisite dynamic symbol and bail-out on error. */
+#define LOAD_DYNAMIC_SYMBOL(func_type, func_ptr, env, handle, symbol) \
+ if ((func_ptr = (func_type) do_dlsym(env, handle, symbol)) == NULL) { \
+ return; \
+ }
+
+/**
+ * A helper function to dynamic load a 'symbol' from a given library-handle.
+ *
+ * @param env jni handle to report contingencies.
+ * @param handle handle to the dynamic library.
+ * @param symbol symbol to load.
+ * @return returns the address where the symbol is loaded in memory,
+ * NULL on error.
+ */
+static FARPROC WINAPI do_dlsym(JNIEnv *env, HMODULE handle, LPCSTR symbol) {
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ FARPROC func_ptr = NULL;
+
+ if (!env || !handle || !symbol) {
+ THROW(env, "java/lang/InternalError", NULL);
+ return NULL;
+ }
+
+ func_ptr = GetProcAddress(handle, symbol);
+ if (func_ptr == NULL)
+ {
+ THROW(env, "java/lang/UnsatisfiedLinkError", symbol);
+ }
+ return func_ptr;
+}
+#endif
+// Windows part end
+
#define LOCK_CLASS(env, clazz, classname) \
if ((*env)->MonitorEnter(env, clazz) != 0) { \
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c
index ff7753718c..c962337830 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c
@@ -16,6 +16,8 @@
* limitations under the License.
*/
+#include "org_apache_hadoop.h"
+
#include "bulk_crc32.h"
#include
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/chmod.c b/hadoop-common-project/hadoop-common/src/main/winutils/chmod.c
new file mode 100644
index 0000000000..98788bafde
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/chmod.c
@@ -0,0 +1,893 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with this
+* work for additional information regarding copyright ownership. The ASF
+* licenses this file to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations under
+* the License.
+*/
+
+#include "winutils.h"
+#include
+
+enum CHMOD_WHO
+{
+ CHMOD_WHO_NONE = 0,
+ CHMOD_WHO_OTHER = 07,
+ CHMOD_WHO_GROUP = 070,
+ CHMOD_WHO_USER = 0700,
+ CHMOD_WHO_ALL = CHMOD_WHO_OTHER | CHMOD_WHO_GROUP | CHMOD_WHO_USER
+};
+
+enum CHMOD_OP
+{
+ CHMOD_OP_INVALID,
+ CHMOD_OP_PLUS,
+ CHMOD_OP_MINUS,
+ CHMOD_OP_EQUAL,
+};
+
+enum CHMOD_PERM
+{
+ CHMOD_PERM_NA = 00,
+ CHMOD_PERM_R = 01,
+ CHMOD_PERM_W = 02,
+ CHMOD_PERM_X = 04,
+ CHMOD_PERM_LX = 010,
+};
+
+/*
+ * We use the following struct to build a linked list of mode change actions.
+ * The mode is described by the following grammar:
+ * mode ::= clause [, clause ...]
+ * clause ::= [who ...] [action ...]
+ * action ::= op [perm ...] | op [ref]
+ * who ::= a | u | g | o
+ * op ::= + | - | =
+ * perm ::= r | w | x | X
+ * ref ::= u | g | o
+ */
+typedef struct _MODE_CHANGE_ACTION
+{
+ USHORT who;
+ USHORT op;
+ USHORT perm;
+ USHORT ref;
+ struct _MODE_CHANGE_ACTION *next_action;
+} MODE_CHANGE_ACTION, *PMODE_CHANGE_ACTION;
+
+const MODE_CHANGE_ACTION INIT_MODE_CHANGE_ACTION = {
+ CHMOD_WHO_NONE, CHMOD_OP_INVALID, CHMOD_PERM_NA, CHMOD_WHO_NONE, NULL
+};
+
+static BOOL ParseOctalMode(LPCWSTR tsMask, INT *uMask);
+
+static BOOL ParseMode(LPCWSTR modeString, PMODE_CHANGE_ACTION *actions);
+
+static BOOL FreeActions(PMODE_CHANGE_ACTION actions);
+
+static BOOL ParseCommandLineArguments(__in int argc, __in wchar_t *argv[],
+ __out BOOL *rec, __out_opt INT *mask,
+ __out_opt PMODE_CHANGE_ACTION *actions, __out LPCWSTR *path);
+
+static BOOL ChangeFileModeByActions(__in LPCWSTR path,
+ PMODE_CHANGE_ACTION actions);
+
+static BOOL ChangeFileMode(__in LPCWSTR path, __in_opt INT mode,
+ __in_opt PMODE_CHANGE_ACTION actions);
+
+static BOOL ChangeFileModeRecursively(__in LPCWSTR path, __in_opt INT mode,
+ __in_opt PMODE_CHANGE_ACTION actions);
+
+
+//----------------------------------------------------------------------------
+// Function: Chmod
+//
+// Description:
+// The main method for chmod command
+//
+// Returns:
+// 0: on success
+//
+// Notes:
+//
+int Chmod(int argc, wchar_t *argv[])
+{
+ LPWSTR pathName = NULL;
+ LPWSTR longPathName = NULL;
+
+ BOOL recursive = FALSE;
+
+ PMODE_CHANGE_ACTION actions = NULL;
+
+ INT unixAccessMask = 0;
+
+ DWORD dwRtnCode = 0;
+
+ int ret = EXIT_FAILURE;
+
+ // Parsing chmod arguments
+ //
+ if (!ParseCommandLineArguments(argc, argv,
+ &recursive, &unixAccessMask, &actions, &pathName))
+ {
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ ChmodUsage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ // Convert the path the the long path
+ //
+ dwRtnCode = ConvertToLongPath(pathName, &longPathName);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ConvertToLongPath", dwRtnCode);
+ goto ChmodEnd;
+ }
+
+ if (!recursive)
+ {
+ if (ChangeFileMode(longPathName, unixAccessMask, actions))
+ {
+ ret = EXIT_SUCCESS;
+ }
+ }
+ else
+ {
+ if (ChangeFileModeRecursively(longPathName, unixAccessMask, actions))
+ {
+ ret = EXIT_SUCCESS;
+ }
+ }
+
+ChmodEnd:
+ FreeActions(actions);
+ LocalFree(longPathName);
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: ChangeFileMode
+//
+// Description:
+// Wrapper function for change file mode. Choose either change by action or by
+// access mask.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+//
+static BOOL ChangeFileMode(__in LPCWSTR path, __in_opt INT unixAccessMask,
+ __in_opt PMODE_CHANGE_ACTION actions)
+{
+ if (actions != NULL)
+ return ChangeFileModeByActions(path, actions);
+ else
+ {
+ DWORD dwRtnCode = ChangeFileModeByMask(path, unixAccessMask);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ChangeFileModeByMask", dwRtnCode);
+ return FALSE;
+ }
+ return TRUE;
+ }
+}
+
+//----------------------------------------------------------------------------
+// Function: ChangeFileModeRecursively
+//
+// Description:
+// Travel the directory recursively to change the permissions.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// The recursion works in the following way:
+// - If the path is not a directory, change its mode and return.
+// Symbolic links and junction points are not considered as directories.
+// - Otherwise, call the method on all its children, then change its mode.
+//
+static BOOL ChangeFileModeRecursively(__in LPCWSTR path, __in_opt INT mode,
+ __in_opt PMODE_CHANGE_ACTION actions)
+{
+ BOOL isDir = FALSE;
+ BOOL isSymlink = FALSE;
+ LPWSTR dir = NULL;
+
+ size_t pathSize = 0;
+ size_t dirSize = 0;
+
+ HANDLE hFind = INVALID_HANDLE_VALUE;
+ WIN32_FIND_DATA ffd;
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ BOOL ret = FALSE;
+
+ if ((dwRtnCode = DirectoryCheck(path, &isDir)) != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"IsDirectory", dwRtnCode);
+ return FALSE;
+ }
+ if ((dwRtnCode = SymbolicLinkCheck(path, &isSymlink)) != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"IsSymbolicLink", dwRtnCode);
+ return FALSE;
+ }
+
+ if (isSymlink || !isDir)
+ {
+ if (ChangeFileMode(path, mode, actions))
+ return TRUE;
+ else
+ return FALSE;
+ }
+
+ if (FAILED(StringCchLengthW(path, STRSAFE_MAX_CCH - 3, &pathSize)))
+ {
+ return FALSE;
+ }
+ dirSize = pathSize + 3;
+ dir = (LPWSTR)LocalAlloc(LPTR, dirSize * sizeof(WCHAR));
+ if (dir == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ if (FAILED(StringCchCopyW(dir, dirSize, path)) ||
+ FAILED(StringCchCatW(dir, dirSize, L"\\*")))
+ {
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ hFind = FindFirstFile(dir, &ffd);
+ if (hFind == INVALID_HANDLE_VALUE)
+ {
+ ReportErrorCode(L"FindFirstFile", GetLastError());
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ do
+ {
+ LPWSTR filename = NULL;
+ LPWSTR longFilename = NULL;
+ size_t filenameSize = 0;
+
+ if (wcscmp(ffd.cFileName, L".") == 0 ||
+ wcscmp(ffd.cFileName, L"..") == 0)
+ continue;
+
+ filenameSize = pathSize + wcslen(ffd.cFileName) + 2;
+ filename = (LPWSTR)LocalAlloc(LPTR, filenameSize * sizeof(WCHAR));
+ if (filename == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ if (FAILED(StringCchCopyW(filename, filenameSize, path)) ||
+ FAILED(StringCchCatW(filename, filenameSize, L"\\")) ||
+ FAILED(StringCchCatW(filename, filenameSize, ffd.cFileName)))
+ {
+ LocalFree(filename);
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ // The child fileanme is not prepended with long path prefix.
+ // Convert the filename to long path format.
+ //
+ dwRtnCode = ConvertToLongPath(filename, &longFilename);
+ LocalFree(filename);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ConvertToLongPath", dwRtnCode);
+ LocalFree(longFilename);
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ if(!ChangeFileModeRecursively(longFilename, mode, actions))
+ {
+ LocalFree(longFilename);
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ LocalFree(longFilename);
+
+ } while (FindNextFileW(hFind, &ffd));
+
+ if (!ChangeFileMode(path, mode, actions))
+ {
+ goto ChangeFileModeRecursivelyEnd;
+ }
+
+ ret = TRUE;
+
+ChangeFileModeRecursivelyEnd:
+ LocalFree(dir);
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: ParseCommandLineArguments
+//
+// Description:
+// Parse command line arguments for chmod.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// 1. Recursive is only set on directories
+// 2. 'actions' is NULL if the mode is octal
+//
+static BOOL ParseCommandLineArguments(__in int argc, __in wchar_t *argv[],
+ __out BOOL *rec,
+ __out_opt INT *mask,
+ __out_opt PMODE_CHANGE_ACTION *actions,
+ __out LPCWSTR *path)
+{
+ LPCWSTR maskString;
+ BY_HANDLE_FILE_INFORMATION fileInfo;
+ DWORD dwRtnCode = ERROR_SUCCESS;
+
+ assert(path != NULL);
+
+ if (argc != 3 && argc != 4)
+ return FALSE;
+
+ *rec = FALSE;
+ if (argc == 4)
+ {
+ maskString = argv[2];
+ *path = argv[3];
+
+ if (wcscmp(argv[1], L"-R") == 0)
+ {
+ // Check if the given path name is a file or directory
+ // Only set recursive flag if the given path is a directory
+ //
+ dwRtnCode = GetFileInformationByName(*path, FALSE, &fileInfo);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetFileInformationByName", dwRtnCode);
+ return FALSE;
+ }
+
+ if (IsDirFileInfo(&fileInfo))
+ {
+ *rec = TRUE;
+ }
+ }
+ else
+ return FALSE;
+ }
+ else
+ {
+ maskString = argv[1];
+ *path = argv[2];
+ }
+
+ if (ParseOctalMode(maskString, mask))
+ {
+ return TRUE;
+ }
+ else if (ParseMode(maskString, actions))
+ {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//----------------------------------------------------------------------------
+// Function: FreeActions
+//
+// Description:
+// Free a linked list of mode change actions given the head node.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// none
+//
+static BOOL FreeActions(PMODE_CHANGE_ACTION actions)
+{
+ PMODE_CHANGE_ACTION curr = NULL;
+ PMODE_CHANGE_ACTION next = NULL;
+
+ // Nothing to free if NULL is passed in
+ //
+ if (actions == NULL)
+ {
+ return TRUE;
+ }
+
+ curr = actions;
+ while (curr != NULL)
+ {
+ next = curr->next_action;
+ LocalFree(curr);
+ curr = next;
+ }
+ actions = NULL;
+
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: ComputeNewMode
+//
+// Description:
+// Compute a new mode based on the old mode and a mode change action.
+//
+// Returns:
+// The newly computed mode
+//
+// Notes:
+// Apply 'rwx' permission mask or reference permission mode according to the
+// '+', '-', or '=' operator.
+//
+static INT ComputeNewMode(__in INT oldMode,
+ __in USHORT who, __in USHORT op,
+ __in USHORT perm, __in USHORT ref)
+{
+ static const INT readMask = 0444;
+ static const INT writeMask = 0222;
+ static const INT exeMask = 0111;
+
+ INT mask = 0;
+ INT mode = 0;
+
+ // Operations are exclusive, and cannot be invalid
+ //
+ assert(op == CHMOD_OP_EQUAL || op == CHMOD_OP_PLUS || op == CHMOD_OP_MINUS);
+
+ // Nothing needs to be changed if there is not permission or reference
+ //
+ if(perm == CHMOD_PERM_NA && ref == CHMOD_WHO_NONE)
+ {
+ return oldMode;
+ }
+
+ // We should have only permissions or a reference target, not both.
+ //
+ assert((perm != CHMOD_PERM_NA && ref == CHMOD_WHO_NONE) ||
+ (perm == CHMOD_PERM_NA && ref != CHMOD_WHO_NONE));
+
+ if (perm != CHMOD_PERM_NA)
+ {
+ if ((perm & CHMOD_PERM_R) == CHMOD_PERM_R)
+ mask |= readMask;
+ if ((perm & CHMOD_PERM_W) == CHMOD_PERM_W)
+ mask |= writeMask;
+ if ((perm & CHMOD_PERM_X) == CHMOD_PERM_X)
+ mask |= exeMask;
+ if (((perm & CHMOD_PERM_LX) == CHMOD_PERM_LX))
+ {
+ // It applies execute permissions to directories regardless of their
+ // current permissions and applies execute permissions to a file which
+ // already has at least 1 execute permission bit already set (either user,
+ // group or other). It is only really useful when used with '+' and
+ // usually in combination with the -R option for giving group or other
+ // access to a big directory tree without setting execute permission on
+ // normal files (such as text files), which would normally happen if you
+ // just used "chmod -R a+rx .", whereas with 'X' you can do
+ // "chmod -R a+rX ." instead (Source: Wikipedia)
+ //
+ if ((oldMode & UX_DIRECTORY) == UX_DIRECTORY || (oldMode & exeMask))
+ mask |= exeMask;
+ }
+ }
+ else if (ref != CHMOD_WHO_NONE)
+ {
+ mask |= oldMode & ref;
+ switch(ref)
+ {
+ case CHMOD_WHO_GROUP:
+ mask |= mask >> 3;
+ mask |= mask << 3;
+ break;
+ case CHMOD_WHO_OTHER:
+ mask |= mask << 3;
+ mask |= mask << 6;
+ break;
+ case CHMOD_WHO_USER:
+ mask |= mask >> 3;
+ mask |= mask >> 6;
+ break;
+ default:
+ // Reference modes can only be U/G/O and are exclusive
+ assert(FALSE);
+ }
+ }
+
+ mask &= who;
+
+ if (op == CHMOD_OP_EQUAL)
+ {
+ mode = (oldMode & (~who)) | mask;
+ }
+ else if (op == CHMOD_OP_MINUS)
+ {
+ mode = oldMode & (~mask);
+ }
+ else if (op == CHMOD_OP_PLUS)
+ {
+ mode = oldMode | mask;
+ }
+
+ return mode;
+}
+
+//----------------------------------------------------------------------------
+// Function: ConvertActionsToMask
+//
+// Description:
+// Convert a linked list of mode change actions to the Unix permission mask
+// given the head node.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// none
+//
+static BOOL ConvertActionsToMask(__in LPCWSTR path,
+ __in PMODE_CHANGE_ACTION actions, __out PINT puMask)
+{
+ PMODE_CHANGE_ACTION curr = NULL;
+
+ BY_HANDLE_FILE_INFORMATION fileInformation;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ INT mode = 0;
+
+ dwErrorCode = GetFileInformationByName(path, FALSE, &fileInformation);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetFileInformationByName", dwErrorCode);
+ return FALSE;
+ }
+ if (IsDirFileInfo(&fileInformation))
+ {
+ mode |= UX_DIRECTORY;
+ }
+ dwErrorCode = FindFileOwnerAndPermission(path, NULL, NULL, &mode);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"FindFileOwnerAndPermission", dwErrorCode);
+ return FALSE;
+ }
+ *puMask = mode;
+
+ // Nothing to change if NULL is passed in
+ //
+ if (actions == NULL)
+ {
+ return TRUE;
+ }
+
+ for (curr = actions; curr != NULL; curr = curr->next_action)
+ {
+ mode = ComputeNewMode(mode, curr->who, curr->op, curr->perm, curr->ref);
+ }
+
+ *puMask = mode;
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: ChangeFileModeByActions
+//
+// Description:
+// Change a file mode through a list of actions.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// none
+//
+static BOOL ChangeFileModeByActions(__in LPCWSTR path,
+ PMODE_CHANGE_ACTION actions)
+{
+ INT mask = 0;
+
+ if (ConvertActionsToMask(path, actions, &mask))
+ {
+ DWORD dwRtnCode = ChangeFileModeByMask(path, mask);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ChangeFileModeByMask", dwRtnCode);
+ return FALSE;
+ }
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+
+//----------------------------------------------------------------------------
+// Function: ParseMode
+//
+// Description:
+// Convert a mode string into a linked list of actions
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// Take a state machine approach to parse the mode. Each mode change action
+// will be a node in the output linked list. The state machine has five state,
+// and each will only transit to the next; the end state can transit back to
+// the first state, and thus form a circle. In each state, if we see a
+// a character not belongs to the state, we will move to next state. WHO, PERM,
+// and REF states are optional; OP and END states are required; and errors
+// will only be reported at the latter two states.
+//
+static BOOL ParseMode(LPCWSTR modeString, PMODE_CHANGE_ACTION *pActions)
+{
+ enum __PARSE_MODE_ACTION_STATE
+ {
+ PARSE_MODE_ACTION_WHO_STATE,
+ PARSE_MODE_ACTION_OP_STATE,
+ PARSE_MODE_ACTION_PERM_STATE,
+ PARSE_MODE_ACTION_REF_STATE,
+ PARSE_MODE_ACTION_END_STATE
+ } state = PARSE_MODE_ACTION_WHO_STATE;
+
+ MODE_CHANGE_ACTION action = INIT_MODE_CHANGE_ACTION;
+ PMODE_CHANGE_ACTION actionsEnd = NULL;
+ PMODE_CHANGE_ACTION actionsLast = NULL;
+ USHORT lastWho;
+ WCHAR c = 0;
+ size_t len = 0;
+ size_t i = 0;
+
+ assert(modeString != NULL && pActions != NULL);
+
+ if (FAILED(StringCchLengthW(modeString, STRSAFE_MAX_CCH, &len)))
+ {
+ return FALSE;
+ }
+
+ actionsEnd = *pActions;
+ while(i <= len)
+ {
+ c = modeString[i];
+ if (state == PARSE_MODE_ACTION_WHO_STATE)
+ {
+ switch (c)
+ {
+ case L'a':
+ action.who |= CHMOD_WHO_ALL;
+ i++;
+ break;
+ case L'u':
+ action.who |= CHMOD_WHO_USER;
+ i++;
+ break;
+ case L'g':
+ action.who |= CHMOD_WHO_GROUP;
+ i++;
+ break;
+ case L'o':
+ action.who |= CHMOD_WHO_OTHER;
+ i++;
+ break;
+ default:
+ state = PARSE_MODE_ACTION_OP_STATE;
+ } // WHO switch
+ }
+ else if (state == PARSE_MODE_ACTION_OP_STATE)
+ {
+ switch (c)
+ {
+ case L'+':
+ action.op = CHMOD_OP_PLUS;
+ break;
+ case L'-':
+ action.op = CHMOD_OP_MINUS;
+ break;
+ case L'=':
+ action.op = CHMOD_OP_EQUAL;
+ break;
+ default:
+ fwprintf(stderr, L"Invalid mode: '%s'\n", modeString);
+ FreeActions(*pActions);
+ return FALSE;
+ } // OP switch
+ i++;
+ state = PARSE_MODE_ACTION_PERM_STATE;
+ }
+ else if (state == PARSE_MODE_ACTION_PERM_STATE)
+ {
+ switch (c)
+ {
+ case L'r':
+ action.perm |= CHMOD_PERM_R;
+ i++;
+ break;
+ case L'w':
+ action.perm |= CHMOD_PERM_W;
+ i++;
+ break;
+ case L'x':
+ action.perm |= CHMOD_PERM_X;
+ i++;
+ break;
+ case L'X':
+ action.perm |= CHMOD_PERM_LX;
+ i++;
+ break;
+ default:
+ state = PARSE_MODE_ACTION_REF_STATE;
+ } // PERM switch
+ }
+ else if (state == PARSE_MODE_ACTION_REF_STATE)
+ {
+ switch (c)
+ {
+ case L'u':
+ action.ref = CHMOD_WHO_USER;
+ i++;
+ break;
+ case L'g':
+ action.ref = CHMOD_WHO_GROUP;
+ i++;
+ break;
+ case L'o':
+ action.ref = CHMOD_WHO_OTHER;
+ i++;
+ break;
+ default:
+ state = PARSE_MODE_ACTION_END_STATE;
+ } // REF switch
+ }
+ else if (state == PARSE_MODE_ACTION_END_STATE)
+ {
+ switch (c)
+ {
+ case NULL:
+ case L',':
+ i++;
+ case L'+':
+ case L'-':
+ case L'=':
+ state = PARSE_MODE_ACTION_WHO_STATE;
+
+ // Append the current action to the end of the linked list
+ //
+ assert(actionsEnd == NULL);
+ // Allocate memory
+ actionsEnd = (PMODE_CHANGE_ACTION) LocalAlloc(LPTR,
+ sizeof(MODE_CHANGE_ACTION));
+ if (actionsEnd == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ FreeActions(*pActions);
+ return FALSE;
+ }
+ if (action.who == CHMOD_WHO_NONE) action.who = CHMOD_WHO_ALL;
+ // Copy the action to the new node
+ *actionsEnd = action;
+ // Append to the last node in the linked list
+ if (actionsLast != NULL) actionsLast->next_action = actionsEnd;
+ // pActions should point to the head of the linked list
+ if (*pActions == NULL) *pActions = actionsEnd;
+ // Update the two pointers to point to the last node and the tail
+ actionsLast = actionsEnd;
+ actionsEnd = actionsLast->next_action;
+
+ // Reset action
+ //
+ lastWho = action.who;
+ action = INIT_MODE_CHANGE_ACTION;
+ if (c != L',')
+ {
+ action.who = lastWho;
+ }
+
+ break;
+ default:
+ fwprintf(stderr, L"Invalid mode: '%s'\n", modeString);
+ FreeActions(*pActions);
+ return FALSE;
+ } // END switch
+ }
+ } // while
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: ParseOctalMode
+//
+// Description:
+// Convert the 3 or 4 digits Unix mask string into the binary representation
+// of the Unix access mask, i.e. 9 bits each an indicator of the permission
+// of 'rwxrwxrwx', i.e. user's, group's, and owner's read, write, and
+// execute/search permissions.
+//
+// Returns:
+// TRUE: on success
+// FALSE: otherwise
+//
+// Notes:
+// none
+//
+static BOOL ParseOctalMode(LPCWSTR tsMask, INT *uMask)
+{
+ size_t tsMaskLen = 0;
+ DWORD i;
+ LONG l;
+ WCHAR *end;
+
+ if (uMask == NULL)
+ return FALSE;
+
+ if (FAILED(StringCchLengthW(tsMask, STRSAFE_MAX_CCH, &tsMaskLen)))
+ return FALSE;
+
+ if (tsMaskLen == 0 || tsMaskLen > 4)
+ {
+ return FALSE;
+ }
+
+ for (i = 0; i < tsMaskLen; i++)
+ {
+ if (!(tsMask[tsMaskLen - i - 1] >= L'0' &&
+ tsMask[tsMaskLen - i - 1] <= L'7'))
+ return FALSE;
+ }
+
+ errno = 0;
+ if (tsMaskLen == 4)
+ // Windows does not have any equivalent of setuid/setgid and sticky bit.
+ // So the first bit is omitted for the 4 digit octal mode case.
+ //
+ l = wcstol(tsMask + 1, &end, 8);
+ else
+ l = wcstol(tsMask, &end, 8);
+
+ if (errno || l > 0x0777 || l < 0 || *end != 0)
+ {
+ return FALSE;
+ }
+
+ *uMask = (INT) l;
+
+ return TRUE;
+}
+
+void ChmodUsage(LPCWSTR program)
+{
+ fwprintf(stdout, L"\
+Usage: %s [OPTION] OCTAL-MODE [FILE]\n\
+ or: %s [OPTION] MODE [FILE]\n\
+Change the mode of the FILE to MODE.\n\
+\n\
+ -R: change files and directories recursively\n\
+\n\
+Each MODE is of the form '[ugoa]*([-+=]([rwxX]*|[ugo]))+'.\n",
+program, program);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/chown.c b/hadoop-common-project/hadoop-common/src/main/winutils/chown.c
new file mode 100644
index 0000000000..32ea77aa50
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/chown.c
@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#include "winutils.h"
+
+//----------------------------------------------------------------------------
+// Function: ChangeFileOwnerBySid
+//
+// Description:
+// Change a file or directory ownership by giving new owner and group SIDs
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Error code: otherwise
+//
+// Notes:
+// This function is long path safe, i.e. the path will be converted to long
+// path format if not already converted. So the caller does not need to do
+// the converstion before calling the method.
+//
+static DWORD ChangeFileOwnerBySid(__in LPCWSTR path,
+ __in_opt PSID pNewOwnerSid, __in_opt PSID pNewGroupSid)
+{
+ LPWSTR longPathName = NULL;
+ INT oldMode = 0;
+
+ SECURITY_INFORMATION securityInformation = 0;
+
+ DWORD dwRtnCode = ERROR_SUCCESS;
+
+ // Convert the path the the long path
+ //
+ dwRtnCode = ConvertToLongPath(path, &longPathName);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ goto ChangeFileOwnerByNameEnd;
+ }
+
+ // Get a pointer to the existing owner information and DACL
+ //
+ dwRtnCode = FindFileOwnerAndPermission(longPathName, NULL, NULL, &oldMode);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ goto ChangeFileOwnerByNameEnd;
+ }
+
+ // We need SeTakeOwnershipPrivilege to set the owner if the caller does not
+ // have WRITE_OWNER access to the object; we need SeRestorePrivilege if the
+ // SID is not contained in the caller's token, and have the SE_GROUP_OWNER
+ // permission enabled.
+ //
+ if (!EnablePrivilege(L"SeTakeOwnershipPrivilege"))
+ {
+ fwprintf(stdout, L"INFO: The user does not have SeTakeOwnershipPrivilege.\n");
+ }
+ if (!EnablePrivilege(L"SeRestorePrivilege"))
+ {
+ fwprintf(stdout, L"INFO: The user does not have SeRestorePrivilege.\n");
+ }
+
+ assert(pNewOwnerSid != NULL || pNewGroupSid != NULL);
+
+ // Set the owners of the file.
+ //
+ if (pNewOwnerSid != NULL) securityInformation |= OWNER_SECURITY_INFORMATION;
+ if (pNewGroupSid != NULL) securityInformation |= GROUP_SECURITY_INFORMATION;
+ dwRtnCode = SetNamedSecurityInfoW(
+ longPathName,
+ SE_FILE_OBJECT,
+ securityInformation,
+ pNewOwnerSid,
+ pNewGroupSid,
+ NULL,
+ NULL);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ goto ChangeFileOwnerByNameEnd;
+ }
+
+ // Set the permission on the file for the new owner.
+ //
+ dwRtnCode = ChangeFileModeByMask(longPathName, oldMode);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ goto ChangeFileOwnerByNameEnd;
+ }
+
+ChangeFileOwnerByNameEnd:
+ LocalFree(longPathName);
+ return dwRtnCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: Chown
+//
+// Description:
+// The main method for chown command
+//
+// Returns:
+// 0: on success
+//
+// Notes:
+//
+//
+int Chown(int argc, wchar_t *argv[])
+{
+ LPWSTR pathName = NULL;
+
+ LPWSTR ownerInfo = NULL;
+
+ LPWSTR colonPos = NULL;
+
+ LPWSTR userName = NULL;
+ size_t userNameLen = 0;
+
+ LPWSTR groupName = NULL;
+ size_t groupNameLen = 0;
+
+ PSID pNewOwnerSid = NULL;
+ PSID pNewGroupSid = NULL;
+
+ DWORD dwRtnCode = 0;
+
+ int ret = EXIT_FAILURE;
+
+ if (argc >= 3)
+ {
+ ownerInfo = argv[1];
+ pathName = argv[2];
+ }
+ else
+ {
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ ChownUsage(argv[0]);
+ return ret;
+ }
+
+ // Parsing the owner name
+ //
+ if ((colonPos = wcschr(ownerInfo, L':')) != NULL)
+ {
+ if (colonPos - ownerInfo != 0)
+ {
+ // Length includes NULL terminator
+ userNameLen = colonPos - ownerInfo + 1;
+ userName = (LPTSTR)LocalAlloc(LPTR, userNameLen * sizeof(WCHAR));
+ if (userName == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ goto ChownEnd;
+ }
+ if (FAILED(StringCchCopyNW(userName, userNameLen,
+ ownerInfo, userNameLen - 1)))
+ goto ChownEnd;
+ }
+
+ if (*(colonPos + 1) != 0)
+ {
+ // Length includes NULL terminator
+ groupNameLen = wcslen(ownerInfo) - (colonPos - ownerInfo) + 1;
+ groupName = (LPTSTR)LocalAlloc(LPTR, groupNameLen * sizeof(WCHAR));
+ if (groupName == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ goto ChownEnd;
+ }
+ if (FAILED(StringCchCopyNW(groupName, groupNameLen,
+ colonPos + 1, groupNameLen)))
+ goto ChownEnd;
+ }
+ }
+ else
+ {
+ // Length includes NULL terminator
+ userNameLen = wcslen(ownerInfo) + 1;
+ userName = (LPWSTR)LocalAlloc(LPTR, userNameLen * sizeof(WCHAR));
+ if (userName == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ goto ChownEnd;
+ }
+ if (FAILED(StringCchCopyNW(userName, userNameLen, ownerInfo, userNameLen)))
+ goto ChownEnd;
+ }
+
+ // Not allow zero length user name or group name in the parsing results.
+ //
+ assert(userName == NULL || wcslen(userName) > 0);
+ assert(groupName == NULL || wcslen(groupName) > 0);
+
+ // Nothing to change if both names are empty
+ //
+ if ((userName == NULL) && (groupName == NULL))
+ {
+ ret = EXIT_SUCCESS;
+ goto ChownEnd;
+ }
+
+ if (userName != NULL)
+ {
+ dwRtnCode = GetSidFromAcctNameW(userName, &pNewOwnerSid);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetSidFromAcctName", dwRtnCode);
+ fwprintf(stderr, L"Invalid user name: %s\n", userName);
+ goto ChownEnd;
+ }
+ }
+
+ if (groupName != NULL)
+ {
+ dwRtnCode = GetSidFromAcctNameW(groupName, &pNewGroupSid);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetSidFromAcctName", dwRtnCode);
+ fwprintf(stderr, L"Invalid group name: %s\n", groupName);
+ goto ChownEnd;
+ }
+ }
+
+ if (wcslen(pathName) == 0 || wcsspn(pathName, L"/?|><:*\"") != 0)
+ {
+ fwprintf(stderr, L"Incorrect file name format: %s\n", pathName);
+ goto ChownEnd;
+ }
+
+ dwRtnCode = ChangeFileOwnerBySid(pathName, pNewOwnerSid, pNewGroupSid);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ChangeFileOwnerBySid", dwRtnCode);
+ goto ChownEnd;
+ }
+
+ ret = EXIT_SUCCESS;
+
+ChownEnd:
+ LocalFree(userName);
+ LocalFree(groupName);
+ LocalFree(pNewOwnerSid);
+ LocalFree(pNewGroupSid);
+
+ return ret;
+}
+
+void ChownUsage(LPCWSTR program)
+{
+ fwprintf(stdout, L"\
+Usage: %s [OWNER][:[GROUP]] [FILE]\n\
+Change the owner and/or group of the FILE to OWNER and/or GROUP.\n\
+\n\
+Note:\n\
+On Linux, if a colon but no group name follows the user name, the group of\n\
+the files is changed to that user\'s login group. Windows has no concept of\n\
+a user's login group. So we do not change the group owner in this case.\n",
+program);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/groups.c b/hadoop-common-project/hadoop-common/src/main/winutils/groups.c
new file mode 100644
index 0000000000..1608c40ce7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/groups.c
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+#include "winutils.h"
+
+//----------------------------------------------------------------------------
+// Function: PrintGroups
+//
+// Description:
+// Print group names to the console standard output for the given user
+//
+// Returns:
+// TRUE: on success
+//
+// Notes:
+// This function could fail on first pass when we fail to find groups for
+// domain account; so we do not report Windows API errors in this function.
+// If formatOutput is true, pipe character is used as separator for groups
+// otherwise, space.
+//
+static BOOL PrintGroups(
+ LPLOCALGROUP_USERS_INFO_0 groups,
+ DWORD entries,
+ BOOL formatOutput)
+{
+ BOOL ret = TRUE;
+ LPLOCALGROUP_USERS_INFO_0 pTmpBuf = groups;
+ DWORD i;
+
+ for (i = 0; i < entries; i++)
+ {
+ if (pTmpBuf == NULL)
+ {
+ ret = FALSE;
+ break;
+ }
+
+ if (i != 0)
+ {
+ if (formatOutput)
+ {
+ wprintf(L"|");
+ }
+ else
+ {
+ wprintf(L" ");
+ }
+ }
+ wprintf(L"%s", pTmpBuf->lgrui0_name);
+
+ pTmpBuf++;
+ }
+
+ if (ret)
+ wprintf(L"\n");
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: ParseCommandLine
+//
+// Description:
+// Parses the command line
+//
+// Returns:
+// TRUE on the valid command line, FALSE otherwise
+//
+static BOOL ParseCommandLine(
+ int argc, wchar_t *argv[], wchar_t **user, BOOL *formatOutput)
+{
+ *formatOutput = FALSE;
+
+ assert(argv != NULL);
+ assert(user != NULL);
+
+ if (argc == 1)
+ {
+ // implicitly use the current user
+ *user = NULL;
+ return TRUE;
+ }
+ else if (argc == 2)
+ {
+ // check if the second argument is formating
+ if (wcscmp(argv[1], L"-F") == 0)
+ {
+ *user = NULL;
+ *formatOutput = TRUE;
+ return TRUE;
+ }
+ else
+ {
+ *user = argv[1];
+ return TRUE;
+ }
+ }
+ else if (argc == 3 && wcscmp(argv[1], L"-F") == 0)
+ {
+ // if 3 args, the second argument must be "-F"
+
+ *user = argv[2];
+ *formatOutput = TRUE;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+//----------------------------------------------------------------------------
+// Function: Groups
+//
+// Description:
+// The main method for groups command
+//
+// Returns:
+// 0: on success
+//
+// Notes:
+//
+//
+int Groups(int argc, wchar_t *argv[])
+{
+ LPWSTR input = NULL;
+
+ LPWSTR currentUser = NULL;
+ DWORD cchCurrentUser = 0;
+
+ LPLOCALGROUP_USERS_INFO_0 groups = NULL;
+ DWORD entries = 0;
+
+ DWORD dwRtnCode = ERROR_SUCCESS;
+
+ int ret = EXIT_SUCCESS;
+ BOOL formatOutput = FALSE;
+
+ if (!ParseCommandLine(argc, argv, &input, &formatOutput))
+ {
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ GroupsUsage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ // if username was not specified on the command line, fallback to the
+ // current user
+ if (input == NULL)
+ {
+ GetUserNameW(currentUser, &cchCurrentUser);
+ if (GetLastError() == ERROR_INSUFFICIENT_BUFFER)
+ {
+ currentUser = (LPWSTR) LocalAlloc(LPTR,
+ (cchCurrentUser + 1) * sizeof(wchar_t));
+ if (!currentUser)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ ret = EXIT_FAILURE;
+ goto GroupsEnd;
+ }
+ if (GetUserNameW(currentUser, &cchCurrentUser))
+ input = currentUser;
+ else
+ {
+ ReportErrorCode(L"GetUserName", GetLastError());
+ ret = EXIT_FAILURE;
+ goto GroupsEnd;
+ }
+ }
+ else
+ {
+ ReportErrorCode(L"GetUserName", GetLastError());
+ ret = EXIT_FAILURE;
+ goto GroupsEnd;
+ }
+ }
+
+ if ((dwRtnCode = GetLocalGroupsForUser(input, &groups, &entries))
+ != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetLocalGroupsForUser", dwRtnCode);
+ ret = EXIT_FAILURE;
+ goto GroupsEnd;
+ }
+
+ if (!PrintGroups(groups, entries, formatOutput))
+ {
+ ret = EXIT_FAILURE;
+ }
+
+GroupsEnd:
+ LocalFree(currentUser);
+ if (groups != NULL) NetApiBufferFree(groups);
+ return ret;
+}
+
+void GroupsUsage(LPCWSTR program)
+{
+ fwprintf(stdout, L"\
+Usage: %s [OPTIONS] [USERNAME]\n\
+Print group information of the specified USERNAME \
+(the current user by default).\n\
+\n\
+OPTIONS: -F format the output by separating tokens with a pipe\n",
+program);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/hardlink.c b/hadoop-common-project/hadoop-common/src/main/winutils/hardlink.c
new file mode 100644
index 0000000000..1be2541f04
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/hardlink.c
@@ -0,0 +1,230 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with this
+* work for additional information regarding copyright ownership. The ASF
+* licenses this file to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations under
+* the License.
+*/
+
+#include "winutils.h"
+
+// List of different hardlink related command line options supported by
+// winutils.
+typedef enum HardLinkCommandOptionType
+{
+ HardLinkInvalid,
+ HardLinkCreate,
+ HardLinkStat
+} HardLinkCommandOption;
+
+//----------------------------------------------------------------------------
+// Function: ParseCommandLine
+//
+// Description:
+// Parses the given command line. On success, out param 'command' contains
+// the user specified command.
+//
+// Returns:
+// TRUE: If the command line is valid
+// FALSE: otherwise
+static BOOL ParseCommandLine(__in int argc,
+ __in wchar_t *argv[],
+ __out HardLinkCommandOption *command)
+{
+ *command = HardLinkInvalid;
+
+ if (argc != 3 && argc != 4) {
+ return FALSE;
+ }
+
+ if (argc == 3) {
+ if (wcscmp(argv[0], L"hardlink") != 0 || wcscmp(argv[1], L"stat") != 0)
+ {
+ return FALSE;
+ }
+
+ *command = HardLinkStat;
+ }
+
+ if (argc == 4) {
+ if (wcscmp(argv[0], L"hardlink") != 0 || wcscmp(argv[1], L"create") != 0)
+ {
+ return FALSE;
+ }
+
+ *command = HardLinkCreate;
+ }
+
+ assert(*command != HardLinkInvalid);
+
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: HardlinkStat
+//
+// Description:
+// Computes the number of hard links for a given file.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// error code: otherwise
+static DWORD HardlinkStat(__in LPCWSTR fileName, __out DWORD *puHardLinkCount)
+{
+ BY_HANDLE_FILE_INFORMATION fileInformation;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ PWSTR longFileName = NULL;
+
+ // First convert input paths to long paths
+ //
+ dwErrorCode = ConvertToLongPath(fileName, &longFileName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ goto HardlinkStatExit;
+ }
+
+ // Get file information which contains the hard link count
+ //
+ dwErrorCode = GetFileInformationByName(longFileName, FALSE, &fileInformation);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ goto HardlinkStatExit;
+ }
+
+ *puHardLinkCount = fileInformation.nNumberOfLinks;
+
+HardlinkStatExit:
+ LocalFree(longFileName);
+
+ return dwErrorCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: HardlinkCreate
+//
+// Description:
+// Creates a hard link for a given file under the given name.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// error code: otherwise
+static DWORD HardlinkCreate(__in LPCWSTR linkName, __in LPCWSTR fileName)
+{
+ PWSTR longLinkName = NULL;
+ PWSTR longFileName = NULL;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ // First convert input paths to long paths
+ //
+ dwErrorCode = ConvertToLongPath(linkName, &longLinkName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ goto HardlinkCreateExit;
+ }
+
+ dwErrorCode = ConvertToLongPath(fileName, &longFileName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ goto HardlinkCreateExit;
+ }
+
+ // Create the hard link
+ //
+ if (!CreateHardLink(longLinkName, longFileName, NULL))
+ {
+ dwErrorCode = GetLastError();
+ }
+
+HardlinkCreateExit:
+ LocalFree(longLinkName);
+ LocalFree(longFileName);
+
+ return dwErrorCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: Hardlink
+//
+// Description:
+// Creates a hard link for a given file under the given name. Outputs the
+// appropriate information to stdout on success, or stderr on failure.
+//
+// Returns:
+// EXIT_SUCCESS: On success
+// EXIT_FAILURE: otherwise
+int Hardlink(int argc, wchar_t *argv[])
+{
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ int ret = EXIT_FAILURE;
+ HardLinkCommandOption command = HardLinkInvalid;
+
+ if (!ParseCommandLine(argc, argv, &command)) {
+ dwErrorCode = ERROR_INVALID_COMMAND_LINE;
+
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ HardlinkUsage();
+ goto HardLinkExit;
+ }
+
+ if (command == HardLinkStat)
+ {
+ // Compute the number of hard links
+ //
+ DWORD uHardLinkCount = 0;
+ dwErrorCode = HardlinkStat(argv[2], &uHardLinkCount);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"HardlinkStat", dwErrorCode);
+ goto HardLinkExit;
+ }
+
+ // Output the result
+ //
+ fwprintf(stdout, L"%d\n", uHardLinkCount);
+
+ } else if (command == HardLinkCreate)
+ {
+ // Create the hard link
+ //
+ dwErrorCode = HardlinkCreate(argv[2], argv[3]);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"HardlinkCreate", dwErrorCode);
+ goto HardLinkExit;
+ }
+
+ // Output the success message
+ //
+ fwprintf(stdout, L"Hardlink created for %s <<===>> %s\n", argv[2], argv[3]);
+
+ } else
+ {
+ // Should not happen
+ //
+ assert(FALSE);
+ }
+
+ ret = EXIT_SUCCESS;
+
+HardLinkExit:
+
+ return ret;
+}
+
+void HardlinkUsage()
+{
+ fwprintf(stdout, L"\
+Usage: hardlink create [LINKNAME] [FILENAME] |\n\
+ hardlink stat [FILENAME]\n\
+Creates a new hardlink on the existing file or displays the number of links\n\
+for the given file\n");
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
new file mode 100644
index 0000000000..34225fd8aa
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#ifndef UNICODE
+#define UNICODE
+#endif
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+enum EXIT_CODE
+{
+ /* Common success exit code shared among all utilities */
+ SUCCESS = EXIT_SUCCESS,
+ /* Generic failure exit code share among all utilities */
+ FAILURE = EXIT_FAILURE,
+ /* Failure code indicates the user does not privilege to create symlinks */
+ SYMLINK_NO_PRIVILEGE = 2,
+};
+
+
+/*
+ * The array of 12 months' three-letter abbreviations
+ */
+extern const LPCWSTR MONTHS[];
+
+/*
+ * The Unix masks
+ * The Windows version of does not contain all the POSIX flag/mask
+ * definitions. The following masks are used in 'winutils' to represent POSIX
+ * permission mode.
+ *
+ */
+enum UnixAclMask
+{
+ UX_O_EXECUTE = 00001, // S_IXOTH
+ UX_O_WRITE = 00002, // S_IWOTH
+ UX_O_READ = 00004, // S_IROTH
+ UX_G_EXECUTE = 00010, // S_IXGRP
+ UX_G_WRITE = 00020, // S_IWGRP
+ UX_G_READ = 00040, // S_IRGRP
+ UX_U_EXECUTE = 00100, // S_IXUSR
+ UX_U_WRITE = 00200, // S_IWUSR
+ UX_U_READ = 00400, // S_IRUSR
+ UX_DIRECTORY = 0040000, // S_IFDIR
+ UX_SYMLINK = 0120000, // S_IFLNK
+};
+
+
+/*
+ * The WindowsAclMask and WinMasks contain the definitions used to establish
+ * the mapping between Unix and Windows.
+ */
+enum WindowsAclMask
+{
+ WIN_READ, // The permission(s) that enable Unix read permission
+ WIN_WRITE, // The permission(s) that enable Unix write permission
+ WIN_EXECUTE, // The permission(s) that enbale Unix execute permission
+ WIN_OWNER_SE, // The permissions that are always set for file owners
+ WIN_ALL, // The permissions that all files on Windows should have
+ WIN_MASKS_TOTAL
+};
+extern const ACCESS_MASK WinMasks[];
+
+
+int Ls(int argc, wchar_t *argv[]);
+void LsUsage(LPCWSTR program);
+
+int Chmod(int argc, wchar_t *argv[]);
+void ChmodUsage(LPCWSTR program);
+
+int Chown(int argc, wchar_t *argv[]);
+void ChownUsage(LPCWSTR program);
+
+int Groups(int argc, wchar_t *argv[]);
+void GroupsUsage(LPCWSTR program);
+
+int Hardlink(int argc, wchar_t *argv[]);
+void HardlinkUsage();
+
+int Task(int argc, wchar_t *argv[]);
+void TaskUsage();
+
+int Symlink(int argc, wchar_t *argv[]);
+void SymlinkUsage();
+
+int SystemInfo();
+void SystemInfoUsage();
+
+DWORD GetFileInformationByName(__in LPCWSTR pathName, __in BOOL followLink,
+ __out LPBY_HANDLE_FILE_INFORMATION lpFileInformation);
+
+DWORD ConvertToLongPath(__in PCWSTR path, __deref_out PWSTR *newPath);
+
+DWORD GetSidFromAcctNameW(LPCWSTR acctName, PSID* ppSid);
+
+DWORD GetAccntNameFromSid(PSID pSid, LPWSTR *ppAcctName);
+
+void ReportErrorCode(LPCWSTR func, DWORD err);
+
+BOOL IsDirFileInfo(const BY_HANDLE_FILE_INFORMATION *fileInformation);
+
+DWORD FindFileOwnerAndPermission(
+ __in LPCWSTR pathName,
+ __out_opt LPWSTR *pOwnerName,
+ __out_opt LPWSTR *pGroupName,
+ __out_opt PINT pMask);
+
+DWORD DirectoryCheck(__in LPCWSTR pathName, __out LPBOOL result);
+
+DWORD SymbolicLinkCheck(__in LPCWSTR pathName, __out LPBOOL result);
+
+DWORD JunctionPointCheck(__in LPCWSTR pathName, __out LPBOOL result);
+
+DWORD ChangeFileModeByMask(__in LPCWSTR path, INT mode);
+
+DWORD GetLocalGroupsForUser(__in LPCWSTR user,
+ __out LPLOCALGROUP_USERS_INFO_0 *groups, __out LPDWORD entries);
+
+BOOL EnablePrivilege(__in LPCWSTR privilegeName);
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
new file mode 100644
index 0000000000..d21906638e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
@@ -0,0 +1,1515 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#pragma comment(lib, "authz.lib")
+#pragma comment(lib, "netapi32.lib")
+#include "winutils.h"
+#include
+#include
+
+/*
+ * The array of 12 months' three-letter abbreviations
+ */
+const LPCWSTR MONTHS[] = { L"Jan", L"Feb", L"Mar", L"Apr", L"May", L"Jun",
+ L"Jul", L"Aug", L"Sep", L"Oct", L"Nov", L"Dec" };
+
+/*
+ * The WindowsAclMask and WinMasks contain the definitions used to establish
+ * the mapping between Unix and Windows.
+ * We set up the mapping with the following rules.
+ * 1. Everyone will have WIN_ALL permissions;
+ * 2. Owner will always have WIN_OWNER_SE permissions in addition;
+ * 2. When Unix read/write/excute permission is set on the file, the
+ * corresponding Windows allow ACE will be added to the file.
+ * More details and explaination can be found in the following white paper:
+ * http://technet.microsoft.com/en-us/library/bb463216.aspx
+ */
+const ACCESS_MASK WinMasks[WIN_MASKS_TOTAL] =
+{
+ /* WIN_READ */
+ FILE_READ_DATA,
+ /* WIN_WRITE */
+ FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_APPEND_DATA | FILE_WRITE_EA |
+ FILE_DELETE_CHILD,
+ /* WIN_EXECUTE */
+ FILE_EXECUTE,
+ /* WIN_OWNER_SE */
+ DELETE | WRITE_DAC | WRITE_OWNER | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES,
+ /* WIN_ALL */
+ READ_CONTROL | FILE_READ_EA | FILE_READ_ATTRIBUTES | SYNCHRONIZE,
+};
+
+//----------------------------------------------------------------------------
+// Function: GetFileInformationByName
+//
+// Description:
+// To retrieve the by handle file information given the file name
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// error code: otherwise
+//
+// Notes:
+// If followLink parameter is set to TRUE, we will follow the symbolic link
+// or junction point to get the target file information. Otherwise, the
+// information for the symbolic link or junction point is retrieved.
+//
+DWORD GetFileInformationByName(
+ __in LPCWSTR pathName,
+ __in BOOL followLink,
+ __out LPBY_HANDLE_FILE_INFORMATION lpFileInformation)
+{
+ HANDLE fileHandle = INVALID_HANDLE_VALUE;
+ BOOL isSymlink = FALSE;
+ BOOL isJunction = FALSE;
+ DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ assert(lpFileInformation != NULL);
+
+ if (!followLink)
+ {
+ if ((dwErrorCode = SymbolicLinkCheck(pathName, &isSymlink)) != ERROR_SUCCESS)
+ return dwErrorCode;
+ if ((dwErrorCode = JunctionPointCheck(pathName, &isJunction)) != ERROR_SUCCESS)
+ return dwErrorCode;
+ if (isSymlink || isJunction)
+ dwFlagsAndAttributes |= FILE_FLAG_OPEN_REPARSE_POINT;
+ }
+
+ fileHandle = CreateFileW(
+ pathName,
+ FILE_READ_ATTRIBUTES,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_EXISTING,
+ dwFlagsAndAttributes,
+ NULL);
+ if (fileHandle == INVALID_HANDLE_VALUE)
+ {
+ dwErrorCode = GetLastError();
+ return dwErrorCode;
+ }
+
+ if (!GetFileInformationByHandle(fileHandle, lpFileInformation))
+ {
+ dwErrorCode = GetLastError();
+ CloseHandle(fileHandle);
+ return dwErrorCode;
+ }
+
+ CloseHandle(fileHandle);
+
+ return dwErrorCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: IsLongWindowsPath
+//
+// Description:
+// Checks if the path is longer than MAX_PATH in which case it needs to be
+// prepended with \\?\ for Windows OS to understand it.
+//
+// Returns:
+// TRUE long path
+// FALSE otherwise
+static BOOL IsLongWindowsPath(__in PCWSTR path)
+{
+ return (wcslen(path) + 1) > MAX_PATH;
+}
+
+//----------------------------------------------------------------------------
+// Function: IsPrefixedAlready
+//
+// Description:
+// Checks if the given path is already prepended with \\?\.
+//
+// Returns:
+// TRUE if yes
+// FALSE otherwise
+static BOOL IsPrefixedAlready(__in PCWSTR path)
+{
+ static const PCWSTR LongPathPrefix = L"\\\\?\\";
+ size_t Prefixlen = wcslen(LongPathPrefix);
+ size_t i = 0;
+
+ if (path == NULL || wcslen(path) < Prefixlen)
+ {
+ return FALSE;
+ }
+
+ for (i = 0; i < Prefixlen; ++i)
+ {
+ if (path[i] != LongPathPrefix[i])
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: ConvertToLongPath
+//
+// Description:
+// Prepends the path with the \\?\ prefix if the path is longer than MAX_PATH.
+// On success, newPath should be freed with LocalFree(). Given that relative
+// paths cannot be longer than MAX_PATH, we will never prepend the prefix
+// to relative paths.
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code on failure
+DWORD ConvertToLongPath(__in PCWSTR path, __deref_out PWSTR *newPath)
+{
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ static const PCWSTR LongPathPrefix = L"\\\\?\\";
+ BOOL bAppendPrefix = IsLongWindowsPath(path) && !IsPrefixedAlready(path);
+ HRESULT hr = S_OK;
+
+ size_t newPathLen = wcslen(path) + (bAppendPrefix ? wcslen(LongPathPrefix) : 0);
+
+ // Allocate the buffer for the output path (+1 for terminating NULL char)
+ //
+ PWSTR newPathValue = (PWSTR)LocalAlloc(LPTR, (newPathLen + 1) * sizeof(WCHAR));
+ if (newPathValue == NULL)
+ {
+ dwErrorCode = GetLastError();
+ goto ConvertToLongPathExit;
+ }
+
+ if (bAppendPrefix)
+ {
+ // Append the prefix to the path
+ //
+ hr = StringCchPrintfW(newPathValue, newPathLen + 1, L"%s%s",
+ LongPathPrefix, path);
+ if (FAILED(hr))
+ {
+ dwErrorCode = HRESULT_CODE(hr);
+ goto ConvertToLongPathExit;
+ }
+ }
+ else
+ {
+ // Just copy the original value into the output path. In this scenario
+ // we are doing extra buffer copy. We decided to trade code simplicity
+ // on the call site for small performance impact (extra allocation and
+ // buffer copy). As paths are short, the impact is generally small.
+ //
+ hr = StringCchPrintfW(newPathValue, newPathLen + 1, L"%s", path);
+ if (FAILED(hr))
+ {
+ dwErrorCode = HRESULT_CODE(hr);
+ goto ConvertToLongPathExit;
+ }
+ }
+
+ *newPath = newPathValue;
+
+ConvertToLongPathExit:
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ LocalFree(newPathValue);
+ *newPath = NULL;
+ }
+
+ return dwErrorCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: IsDirFileInfo
+//
+// Description:
+// Test if the given file information is a directory
+//
+// Returns:
+// TRUE if it is a directory
+// FALSE otherwise
+//
+// Notes:
+//
+BOOL IsDirFileInfo(const BY_HANDLE_FILE_INFORMATION *fileInformation)
+{
+ if ((fileInformation->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+ == FILE_ATTRIBUTE_DIRECTORY)
+ return TRUE;
+ return FALSE;
+}
+
+//----------------------------------------------------------------------------
+// Function: CheckFileAttributes
+//
+// Description:
+// Check if the given file has all the given attribute(s)
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+static DWORD FileAttributesCheck(
+ __in LPCWSTR path, __in DWORD attr, __out PBOOL res)
+{
+ DWORD attrs = INVALID_FILE_ATTRIBUTES;
+ *res = FALSE;
+ if ((attrs = GetFileAttributes(path)) != INVALID_FILE_ATTRIBUTES)
+ *res = ((attrs & attr) == attr);
+ else
+ return GetLastError();
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: IsDirectory
+//
+// Description:
+// Check if the given file is a directory
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+DWORD DirectoryCheck(__in LPCWSTR pathName, __out PBOOL res)
+{
+ return FileAttributesCheck(pathName, FILE_ATTRIBUTE_DIRECTORY, res);
+}
+
+//----------------------------------------------------------------------------
+// Function: IsReparsePoint
+//
+// Description:
+// Check if the given file is a reparse point
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+static DWORD ReparsePointCheck(__in LPCWSTR pathName, __out PBOOL res)
+{
+ return FileAttributesCheck(pathName, FILE_ATTRIBUTE_REPARSE_POINT, res);
+}
+
+//----------------------------------------------------------------------------
+// Function: CheckReparseTag
+//
+// Description:
+// Check if the given file is a reparse point of the given tag.
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+static DWORD ReparseTagCheck(__in LPCWSTR path, __in DWORD tag, __out PBOOL res)
+{
+ BOOL isReparsePoint = FALSE;
+ HANDLE hFind = INVALID_HANDLE_VALUE;
+ WIN32_FIND_DATA findData;
+ DWORD dwRtnCode;
+
+ if ((dwRtnCode = ReparsePointCheck(path, &isReparsePoint)) != ERROR_SUCCESS)
+ return dwRtnCode;
+
+ if (!isReparsePoint)
+ {
+ *res = FALSE;
+ }
+ else
+ {
+ if ((hFind = FindFirstFile(path, &findData)) == INVALID_HANDLE_VALUE)
+ {
+ return GetLastError();
+ }
+ else
+ {
+ *res = (findData.dwReserved0 == tag);
+ FindClose(hFind);
+ }
+ }
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: IsSymbolicLink
+//
+// Description:
+// Check if the given file is a symbolic link.
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+DWORD SymbolicLinkCheck(__in LPCWSTR pathName, __out PBOOL res)
+{
+ return ReparseTagCheck(pathName, IO_REPARSE_TAG_SYMLINK, res);
+}
+
+//----------------------------------------------------------------------------
+// Function: IsJunctionPoint
+//
+// Description:
+// Check if the given file is a junction point.
+//
+// Returns:
+// ERROR_SUCCESS on success
+// error code otherwise
+//
+// Notes:
+//
+DWORD JunctionPointCheck(__in LPCWSTR pathName, __out PBOOL res)
+{
+ return ReparseTagCheck(pathName, IO_REPARSE_TAG_MOUNT_POINT, res);
+}
+
+//----------------------------------------------------------------------------
+// Function: GetSidFromAcctNameW
+//
+// Description:
+// To retrieve the SID for a user account
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Other error code: otherwise
+//
+// Notes:
+// Caller needs to destroy the memory of Sid by calling LocalFree()
+//
+DWORD GetSidFromAcctNameW(LPCWSTR acctName, PSID *ppSid)
+{
+ DWORD dwSidSize = 0;
+ DWORD cchDomainName = 0;
+ DWORD dwDomainNameSize = 0;
+ LPWSTR domainName = NULL;
+ SID_NAME_USE eSidType;
+
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ // Validate the input parameters.
+ //
+ assert (acctName != NULL && ppSid != NULL);
+
+ // Empty name is invalid. However, LookupAccountName() function will return a
+ // false Sid, i.e. Sid for 'BUILDIN', for an empty name instead failing. We
+ // report the error before calling LookupAccountName() function for this
+ // special case. The error code returned here is the same as the last error
+ // code set by LookupAccountName() function for an invalid name.
+ //
+ if (wcslen(acctName) == 0)
+ return ERROR_NONE_MAPPED;
+
+ // First pass to retrieve the buffer size.
+ //
+ LookupAccountName(
+ NULL, // Computer name. NULL for the local computer
+ acctName,
+ NULL, // pSid. NULL to retrieve buffer size
+ &dwSidSize,
+ NULL, // Domain Name. NULL to retrieve buffer size
+ &cchDomainName,
+ &eSidType);
+
+ if((dwErrorCode = GetLastError()) != ERROR_INSUFFICIENT_BUFFER)
+ {
+ return dwErrorCode;
+ }
+ else
+ {
+ // Reallocate memory for the buffers.
+ //
+ *ppSid = (PSID)LocalAlloc(LPTR, dwSidSize);
+ if (*ppSid == NULL)
+ {
+ return GetLastError();
+ }
+ dwDomainNameSize = (cchDomainName + 1) * sizeof(wchar_t);
+ domainName = (LPWSTR)LocalAlloc(LPTR, dwDomainNameSize);
+ if (domainName == NULL)
+ {
+ return GetLastError();
+ }
+
+ // Second pass to retrieve the SID and domain name.
+ //
+ if (!LookupAccountNameW(
+ NULL, // Computer name. NULL for the local computer
+ acctName,
+ *ppSid,
+ &dwSidSize,
+ domainName,
+ &cchDomainName,
+ &eSidType))
+ {
+ LocalFree(domainName);
+ return GetLastError();
+ }
+
+ assert(IsValidSid(*ppSid));
+ }
+
+ LocalFree(domainName);
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetUnixAccessMask
+//
+// Description:
+// Compute the 3 bit Unix mask for the owner, group, or, others
+//
+// Returns:
+// The 3 bit Unix mask in INT
+//
+// Notes:
+//
+static INT GetUnixAccessMask(ACCESS_MASK Mask)
+{
+ static const INT exe = 0x0001;
+ static const INT write = 0x0002;
+ static const INT read = 0x0004;
+ INT mask = 0;
+
+ if ((Mask & WinMasks[WIN_READ]) == WinMasks[WIN_READ])
+ mask |= read;
+ if ((Mask & WinMasks[WIN_WRITE]) == WinMasks[WIN_WRITE])
+ mask |= write;
+ if ((Mask & WinMasks[WIN_EXECUTE]) == WinMasks[WIN_EXECUTE])
+ mask |= exe;
+ return mask;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetAccess
+//
+// Description:
+// Get Windows acces mask by AuthZ methods
+//
+// Returns:
+// ERROR_SUCCESS: on success
+//
+// Notes:
+//
+static DWORD GetAccess(AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClient,
+ PSECURITY_DESCRIPTOR psd, PACCESS_MASK pAccessRights)
+{
+ AUTHZ_ACCESS_REQUEST AccessRequest = {0};
+ AUTHZ_ACCESS_REPLY AccessReply = {0};
+ BYTE Buffer[1024];
+
+ assert (pAccessRights != NULL);
+
+ // Do AccessCheck
+ AccessRequest.DesiredAccess = MAXIMUM_ALLOWED;
+ AccessRequest.PrincipalSelfSid = NULL;
+ AccessRequest.ObjectTypeList = NULL;
+ AccessRequest.ObjectTypeListLength = 0;
+ AccessRequest.OptionalArguments = NULL;
+
+ RtlZeroMemory(Buffer, sizeof(Buffer));
+ AccessReply.ResultListLength = 1;
+ AccessReply.GrantedAccessMask = (PACCESS_MASK) (Buffer);
+ AccessReply.Error = (PDWORD) (Buffer + sizeof(ACCESS_MASK));
+
+ if (!AuthzAccessCheck(0,
+ hAuthzClient,
+ &AccessRequest,
+ NULL,
+ psd,
+ NULL,
+ 0,
+ &AccessReply,
+ NULL))
+ {
+ return GetLastError();
+ }
+ *pAccessRights = (*(PACCESS_MASK)(AccessReply.GrantedAccessMask));
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetEffectiveRightsForSid
+//
+// Description:
+// Get Windows acces mask by AuthZ methods
+//
+// Returns:
+// ERROR_SUCCESS: on success
+//
+// Notes:
+// We run into problems for local user accounts when using the method
+// GetEffectiveRightsFromAcl(). We resort to using AuthZ methods as
+// an alternative way suggested on MSDN:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa446637.aspx
+//
+static DWORD GetEffectiveRightsForSid(PSECURITY_DESCRIPTOR psd,
+ PSID pSid,
+ PACCESS_MASK pAccessRights)
+{
+ AUTHZ_RESOURCE_MANAGER_HANDLE hManager;
+ LUID unusedId = { 0 };
+ AUTHZ_CLIENT_CONTEXT_HANDLE hAuthzClientContext = NULL;
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ DWORD ret = ERROR_SUCCESS;
+
+ assert (pAccessRights != NULL);
+
+ if (!AuthzInitializeResourceManager(AUTHZ_RM_FLAG_NO_AUDIT,
+ NULL, NULL, NULL, NULL, &hManager))
+ {
+ return GetLastError();
+ }
+
+ if(!AuthzInitializeContextFromSid(AUTHZ_SKIP_TOKEN_GROUPS,
+ pSid, hManager, NULL, unusedId, NULL, &hAuthzClientContext))
+ {
+ ret = GetLastError();
+ goto GetEffectiveRightsForSidEnd;
+ }
+
+ if ((dwRtnCode = GetAccess(hAuthzClientContext, psd, pAccessRights))
+ != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto GetEffectiveRightsForSidEnd;
+ }
+ if (!AuthzFreeContext(hAuthzClientContext))
+ {
+ ret = GetLastError();
+ goto GetEffectiveRightsForSidEnd;
+ }
+
+GetEffectiveRightsForSidEnd:
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: FindFileOwnerAndPermission
+//
+// Description:
+// Find the owner, primary group and permissions of a file object
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Error code otherwise
+//
+// Notes:
+// - Caller needs to destroy the memeory of owner and group names by calling
+// LocalFree() function.
+//
+// - If the user or group name does not exist, the user or group SID will be
+// returned as the name.
+//
+DWORD FindFileOwnerAndPermission(
+ __in LPCWSTR pathName,
+ __out_opt LPWSTR *pOwnerName,
+ __out_opt LPWSTR *pGroupName,
+ __out_opt PINT pMask)
+{
+ DWORD dwRtnCode = 0;
+
+ PSECURITY_DESCRIPTOR pSd = NULL;
+
+ PSID psidOwner = NULL;
+ PSID psidGroup = NULL;
+ PSID psidEveryone = NULL;
+ DWORD cbSid = SECURITY_MAX_SID_SIZE;
+ PACL pDacl = NULL;
+
+ ACCESS_MASK ownerAccessRights = 0;
+ ACCESS_MASK groupAccessRights = 0;
+ ACCESS_MASK worldAccessRights = 0;
+
+ DWORD ret = ERROR_SUCCESS;
+
+ // Do nothing if the caller request nothing
+ //
+ if (pOwnerName == NULL && pGroupName == NULL && pMask == NULL)
+ {
+ return ret;
+ }
+
+ dwRtnCode = GetNamedSecurityInfo(pathName, SE_FILE_OBJECT,
+ OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION |
+ DACL_SECURITY_INFORMATION,
+ &psidOwner, &psidGroup, &pDacl, NULL, &pSd);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+
+ if (pOwnerName != NULL)
+ {
+ dwRtnCode = GetAccntNameFromSid(psidOwner, pOwnerName);
+ if (dwRtnCode == ERROR_NONE_MAPPED)
+ {
+ if (!ConvertSidToStringSid(psidOwner, pOwnerName))
+ {
+ ret = GetLastError();
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ }
+ else if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ }
+
+ if (pGroupName != NULL)
+ {
+ dwRtnCode = GetAccntNameFromSid(psidGroup, pGroupName);
+ if (dwRtnCode == ERROR_NONE_MAPPED)
+ {
+ if (!ConvertSidToStringSid(psidGroup, pGroupName))
+ {
+ ret = GetLastError();
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ }
+ else if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ }
+
+ if (pMask == NULL) goto FindFileOwnerAndPermissionEnd;
+
+ if ((dwRtnCode = GetEffectiveRightsForSid(pSd,
+ psidOwner, &ownerAccessRights)) != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+
+ if ((dwRtnCode = GetEffectiveRightsForSid(pSd,
+ psidGroup, &groupAccessRights)) != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+
+ if ((psidEveryone = LocalAlloc(LPTR, cbSid)) == NULL)
+ {
+ ret = GetLastError();
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ if (!CreateWellKnownSid(WinWorldSid, NULL, psidEveryone, &cbSid))
+ {
+ ret = GetLastError();
+ goto FindFileOwnerAndPermissionEnd;
+ }
+ if ((dwRtnCode = GetEffectiveRightsForSid(pSd,
+ psidEveryone, &worldAccessRights)) != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto FindFileOwnerAndPermissionEnd;
+ }
+
+ *pMask |= GetUnixAccessMask(ownerAccessRights) << 6;
+ *pMask |= GetUnixAccessMask(groupAccessRights) << 3;
+ *pMask |= GetUnixAccessMask(worldAccessRights);
+
+FindFileOwnerAndPermissionEnd:
+ LocalFree(psidEveryone);
+ LocalFree(pSd);
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetWindowsAccessMask
+//
+// Description:
+// Get the Windows AccessMask for user, group and everyone based on the Unix
+// permission mask
+//
+// Returns:
+// none
+//
+// Notes:
+// none
+//
+static void GetWindowsAccessMask(INT unixMask,
+ ACCESS_MASK *userAllow,
+ ACCESS_MASK *userDeny,
+ ACCESS_MASK *groupAllow,
+ ACCESS_MASK *groupDeny,
+ ACCESS_MASK *otherAllow)
+{
+ assert (userAllow != NULL && userDeny != NULL &&
+ groupAllow != NULL && groupDeny != NULL &&
+ otherAllow != NULL);
+
+ *userAllow = WinMasks[WIN_ALL] | WinMasks[WIN_OWNER_SE];
+ if ((unixMask & UX_U_READ) == UX_U_READ)
+ *userAllow |= WinMasks[WIN_READ];
+
+ if ((unixMask & UX_U_WRITE) == UX_U_WRITE)
+ *userAllow |= WinMasks[WIN_WRITE];
+
+ if ((unixMask & UX_U_EXECUTE) == UX_U_EXECUTE)
+ *userAllow |= WinMasks[WIN_EXECUTE];
+
+ *userDeny = 0;
+ if ((unixMask & UX_U_READ) != UX_U_READ &&
+ ((unixMask & UX_G_READ) == UX_G_READ ||
+ (unixMask & UX_O_READ) == UX_O_READ))
+ *userDeny |= WinMasks[WIN_READ];
+
+ if ((unixMask & UX_U_WRITE) != UX_U_WRITE &&
+ ((unixMask & UX_G_WRITE) == UX_G_WRITE ||
+ (unixMask & UX_O_WRITE) == UX_O_WRITE))
+ *userDeny |= WinMasks[WIN_WRITE];
+
+ if ((unixMask & UX_U_EXECUTE) != UX_U_EXECUTE &&
+ ((unixMask & UX_G_EXECUTE) == UX_G_EXECUTE ||
+ (unixMask & UX_O_EXECUTE) == UX_O_EXECUTE))
+ *userDeny |= WinMasks[WIN_EXECUTE];
+
+ *groupAllow = WinMasks[WIN_ALL];
+ if ((unixMask & UX_G_READ) == UX_G_READ)
+ *groupAllow |= FILE_GENERIC_READ;
+
+ if ((unixMask & UX_G_WRITE) == UX_G_WRITE)
+ *groupAllow |= WinMasks[WIN_WRITE];
+
+ if ((unixMask & UX_G_EXECUTE) == UX_G_EXECUTE)
+ *groupAllow |= WinMasks[WIN_EXECUTE];
+
+ *groupDeny = 0;
+ if ((unixMask & UX_G_READ) != UX_G_READ &&
+ (unixMask & UX_O_READ) == UX_O_READ)
+ *groupDeny |= WinMasks[WIN_READ];
+
+ if ((unixMask & UX_G_WRITE) != UX_G_WRITE &&
+ (unixMask & UX_O_WRITE) == UX_O_WRITE)
+ *groupDeny |= WinMasks[WIN_WRITE];
+
+ if ((unixMask & UX_G_EXECUTE) != UX_G_EXECUTE &&
+ (unixMask & UX_O_EXECUTE) == UX_O_EXECUTE)
+ *groupDeny |= WinMasks[WIN_EXECUTE];
+
+ *otherAllow = WinMasks[WIN_ALL];
+ if ((unixMask & UX_O_READ) == UX_O_READ)
+ *otherAllow |= WinMasks[WIN_READ];
+
+ if ((unixMask & UX_O_WRITE) == UX_O_WRITE)
+ *otherAllow |= WinMasks[WIN_WRITE];
+
+ if ((unixMask & UX_O_EXECUTE) == UX_O_EXECUTE)
+ *otherAllow |= WinMasks[WIN_EXECUTE];
+}
+
+//----------------------------------------------------------------------------
+// Function: GetWindowsDACLs
+//
+// Description:
+// Get the Windows DACs based the Unix access mask
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Error code: otherwise
+//
+// Notes:
+// - Administrators and SYSTEM are always given full permission to the file,
+// unless Administrators or SYSTEM itself is the file owner and the user
+// explictly set the permission to something else. For example, file 'foo'
+// belongs to Administrators, 'chmod 000' on the file will not directly
+// assign Administrators full permission on the file.
+// - Only full permission for Administrators and SYSTEM are inheritable.
+// - CREATOR OWNER is always given full permission and the permission is
+// inheritable, more specifically OBJECT_INHERIT_ACE, CONTAINER_INHERIT_ACE
+// flags are set. The reason is to give the creator of child file full
+// permission, i.e., the child file will have permission mode 700 for
+// a user other than Administrator or SYSTEM.
+//
+static DWORD GetWindowsDACLs(__in INT unixMask,
+ __in PSID pOwnerSid, __in PSID pGroupSid, __out PACL *ppNewDACL)
+{
+ DWORD winUserAccessDenyMask;
+ DWORD winUserAccessAllowMask;
+ DWORD winGroupAccessDenyMask;
+ DWORD winGroupAccessAllowMask;
+ DWORD winOtherAccessAllowMask;
+
+ PSID pEveryoneSid = NULL;
+ DWORD cbEveryoneSidSize = SECURITY_MAX_SID_SIZE;
+
+ PSID pSystemSid = NULL;
+ DWORD cbSystemSidSize = SECURITY_MAX_SID_SIZE;
+ BOOL bAddSystemAcls = FALSE;
+
+ PSID pAdministratorsSid = NULL;
+ DWORD cbAdministratorsSidSize = SECURITY_MAX_SID_SIZE;
+ BOOL bAddAdministratorsAcls = FALSE;
+
+ PSID pCreatorOwnerSid = NULL;
+ DWORD cbCreatorOwnerSidSize = SECURITY_MAX_SID_SIZE;
+
+ PACL pNewDACL = NULL;
+ DWORD dwNewAclSize = 0;
+
+ DWORD ret = ERROR_SUCCESS;
+
+ GetWindowsAccessMask(unixMask,
+ &winUserAccessAllowMask, &winUserAccessDenyMask,
+ &winGroupAccessAllowMask, &winGroupAccessDenyMask,
+ &winOtherAccessAllowMask);
+
+ // Create a well-known SID for the Everyone group
+ //
+ if ((pEveryoneSid = LocalAlloc(LPTR, cbEveryoneSidSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!CreateWellKnownSid(WinWorldSid, NULL, pEveryoneSid, &cbEveryoneSidSize))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ // Create a well-known SID for the Administrators group
+ //
+ if ((pAdministratorsSid = LocalAlloc(LPTR, cbAdministratorsSidSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!CreateWellKnownSid(WinBuiltinAdministratorsSid, NULL,
+ pAdministratorsSid, &cbAdministratorsSidSize))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!EqualSid(pAdministratorsSid, pOwnerSid)
+ && !EqualSid(pAdministratorsSid, pGroupSid))
+ bAddAdministratorsAcls = TRUE;
+
+ // Create a well-known SID for the SYSTEM
+ //
+ if ((pSystemSid = LocalAlloc(LPTR, cbSystemSidSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!CreateWellKnownSid(WinLocalSystemSid, NULL,
+ pSystemSid, &cbSystemSidSize))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!EqualSid(pSystemSid, pOwnerSid)
+ && !EqualSid(pSystemSid, pGroupSid))
+ bAddSystemAcls = TRUE;
+
+ // Create a well-known SID for the Creator Owner
+ //
+ if ((pCreatorOwnerSid = LocalAlloc(LPTR, cbCreatorOwnerSidSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!CreateWellKnownSid(WinCreatorOwnerSid, NULL,
+ pCreatorOwnerSid, &cbCreatorOwnerSidSize))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ // Create the new DACL
+ //
+ dwNewAclSize = sizeof(ACL);
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ GetLengthSid(pOwnerSid) - sizeof(DWORD);
+ if (winUserAccessDenyMask)
+ dwNewAclSize += sizeof(ACCESS_DENIED_ACE) +
+ GetLengthSid(pOwnerSid) - sizeof(DWORD);
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ GetLengthSid(pGroupSid) - sizeof(DWORD);
+ if (winGroupAccessDenyMask)
+ dwNewAclSize += sizeof(ACCESS_DENIED_ACE) +
+ GetLengthSid(pGroupSid) - sizeof(DWORD);
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ GetLengthSid(pEveryoneSid) - sizeof(DWORD);
+
+ if (bAddSystemAcls)
+ {
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ cbSystemSidSize - sizeof(DWORD);
+ }
+
+ if (bAddAdministratorsAcls)
+ {
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ cbAdministratorsSidSize - sizeof(DWORD);
+ }
+
+ dwNewAclSize += sizeof(ACCESS_ALLOWED_ACE) +
+ cbCreatorOwnerSidSize - sizeof(DWORD);
+
+ pNewDACL = (PACL)LocalAlloc(LPTR, dwNewAclSize);
+ if (pNewDACL == NULL)
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!InitializeAcl(pNewDACL, dwNewAclSize, ACL_REVISION))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ if (!AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE,
+ GENERIC_ALL, pCreatorOwnerSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ if (bAddSystemAcls &&
+ !AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE,
+ GENERIC_ALL, pSystemSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ if (bAddAdministratorsAcls &&
+ !AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE,
+ GENERIC_ALL, pAdministratorsSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ if (winUserAccessDenyMask &&
+ !AddAccessDeniedAceEx(pNewDACL, ACL_REVISION,
+ NO_PROPAGATE_INHERIT_ACE,
+ winUserAccessDenyMask, pOwnerSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ NO_PROPAGATE_INHERIT_ACE,
+ winUserAccessAllowMask, pOwnerSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (winGroupAccessDenyMask &&
+ !AddAccessDeniedAceEx(pNewDACL, ACL_REVISION,
+ NO_PROPAGATE_INHERIT_ACE,
+ winGroupAccessDenyMask, pGroupSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ NO_PROPAGATE_INHERIT_ACE,
+ winGroupAccessAllowMask, pGroupSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+ if (!AddAccessAllowedAceEx(pNewDACL, ACL_REVISION,
+ NO_PROPAGATE_INHERIT_ACE,
+ winOtherAccessAllowMask, pEveryoneSid))
+ {
+ ret = GetLastError();
+ goto GetWindowsDACLsEnd;
+ }
+
+ *ppNewDACL = pNewDACL;
+
+GetWindowsDACLsEnd:
+ LocalFree(pEveryoneSid);
+ LocalFree(pAdministratorsSid);
+ LocalFree(pSystemSid);
+ LocalFree(pCreatorOwnerSid);
+ if (ret != ERROR_SUCCESS) LocalFree(pNewDACL);
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: ChangeFileModeByMask
+//
+// Description:
+// Change a file or direcotry at the path to Unix mode
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Error code: otherwise
+//
+// Notes:
+// This function is long path safe, i.e. the path will be converted to long
+// path format if not already converted. So the caller does not need to do
+// the converstion before calling the method.
+//
+DWORD ChangeFileModeByMask(__in LPCWSTR path, INT mode)
+{
+ LPWSTR longPathName = NULL;
+ PACL pNewDACL = NULL;
+ PSID pOwnerSid = NULL;
+ PSID pGroupSid = NULL;
+ PSECURITY_DESCRIPTOR pSD = NULL;
+
+ SECURITY_DESCRIPTOR_CONTROL control;
+ DWORD revision = 0;
+
+ PSECURITY_DESCRIPTOR pAbsSD = NULL;
+ PACL pAbsDacl = NULL;
+ PACL pAbsSacl = NULL;
+ PSID pAbsOwner = NULL;
+ PSID pAbsGroup = NULL;
+
+ DWORD dwRtnCode = 0;
+ DWORD dwErrorCode = 0;
+
+ DWORD ret = ERROR_SUCCESS;
+
+ dwRtnCode = ConvertToLongPath(path, &longPathName);
+ if (dwRtnCode != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ // Get owner and group Sids
+ //
+ dwRtnCode = GetNamedSecurityInfoW(
+ longPathName,
+ SE_FILE_OBJECT,
+ OWNER_SECURITY_INFORMATION | GROUP_SECURITY_INFORMATION,
+ &pOwnerSid,
+ &pGroupSid,
+ NULL,
+ NULL,
+ &pSD);
+ if (ERROR_SUCCESS != dwRtnCode)
+ {
+ ret = dwRtnCode;
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ // SetSecurityDescriptorDacl function used below only accepts security
+ // descriptor in absolute format, meaning that its members must be pointers to
+ // other structures, rather than offsets to contiguous data.
+ // To determine whether a security descriptor is self-relative or absolute,
+ // call the GetSecurityDescriptorControl function and check the
+ // SE_SELF_RELATIVE flag of the SECURITY_DESCRIPTOR_CONTROL parameter.
+ //
+ if (!GetSecurityDescriptorControl(pSD, &control, &revision))
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ // If the security descriptor is self-relative, we use MakeAbsoluteSD function
+ // to convert it to absolute format.
+ //
+ if ((control & SE_SELF_RELATIVE) == SE_SELF_RELATIVE)
+ {
+ DWORD absSDSize = 0;
+ DWORD daclSize = 0;
+ DWORD saclSize = 0;
+ DWORD ownerSize = 0;
+ DWORD primaryGroupSize = 0;
+ MakeAbsoluteSD(pSD, NULL, &absSDSize, NULL, &daclSize, NULL,
+ &saclSize, NULL, &ownerSize, NULL, &primaryGroupSize);
+ if ((dwErrorCode = GetLastError()) != ERROR_INSUFFICIENT_BUFFER)
+ {
+ ret = dwErrorCode;
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ if ((pAbsSD = (PSECURITY_DESCRIPTOR) LocalAlloc(LPTR, absSDSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+ if ((pAbsDacl = (PACL) LocalAlloc(LPTR, daclSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+ if ((pAbsSacl = (PACL) LocalAlloc(LPTR, saclSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+ if ((pAbsOwner = (PSID) LocalAlloc(LPTR, ownerSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+ if ((pAbsGroup = (PSID) LocalAlloc(LPTR, primaryGroupSize)) == NULL)
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ if (!MakeAbsoluteSD(pSD, pAbsSD, &absSDSize, pAbsDacl, &daclSize, pAbsSacl,
+ &saclSize, pAbsOwner, &ownerSize, pAbsGroup, &primaryGroupSize))
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+ }
+
+ // Get Windows DACLs based on Unix access mask
+ //
+ if ((dwRtnCode = GetWindowsDACLs(mode, pOwnerSid, pGroupSid, &pNewDACL))
+ != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ // Set the DACL information in the security descriptor; if a DACL is already
+ // present in the security descriptor, the DACL is replaced. The security
+ // descriptor is then used to set the security of a file or directory.
+ //
+ if (!SetSecurityDescriptorDacl(pAbsSD, TRUE, pNewDACL, FALSE))
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ // MSDN states "This function is obsolete. Use the SetNamedSecurityInfo
+ // function instead." However we have the following problem when using
+ // SetNamedSecurityInfo:
+ // - When PROTECTED_DACL_SECURITY_INFORMATION is not passed in as part of
+ // security information, the object will include inheritable permissions
+ // from its parent.
+ // - When PROTECTED_DACL_SECURITY_INFORMATION is passsed in to set
+ // permissions on a directory, the child object of the directory will lose
+ // inheritable permissions from their parent (the current directory).
+ // By using SetFileSecurity, we have the nice property that the new
+ // permissions of the object does not include the inheritable permissions from
+ // its parent, and the child objects will not lose their inherited permissions
+ // from the current object.
+ //
+ if (!SetFileSecurity(longPathName, DACL_SECURITY_INFORMATION, pAbsSD))
+ {
+ ret = GetLastError();
+ goto ChangeFileModeByMaskEnd;
+ }
+
+ChangeFileModeByMaskEnd:
+ LocalFree(longPathName);
+ LocalFree(pSD);
+ LocalFree(pNewDACL);
+ LocalFree(pAbsDacl);
+ LocalFree(pAbsSacl);
+ LocalFree(pAbsOwner);
+ LocalFree(pAbsGroup);
+ LocalFree(pAbsSD);
+
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetAccntNameFromSid
+//
+// Description:
+// To retrieve an account name given the SID
+//
+// Returns:
+// ERROR_SUCCESS: on success
+// Other error code: otherwise
+//
+// Notes:
+// Caller needs to destroy the memory of account name by calling LocalFree()
+//
+DWORD GetAccntNameFromSid(PSID pSid, LPWSTR *ppAcctName)
+{
+ LPWSTR lpName = NULL;
+ DWORD cchName = 0;
+ LPWSTR lpDomainName = NULL;
+ DWORD cchDomainName = 0;
+ SID_NAME_USE eUse = SidTypeUnknown;
+ DWORD cchAcctName = 0;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ HRESULT hr = S_OK;
+
+ DWORD ret = ERROR_SUCCESS;
+
+ assert(ppAcctName != NULL);
+
+ // NOTE:
+ // MSDN says the length returned for the buffer size including the terminating
+ // null character. However we found it is not true during debuging.
+ //
+ LookupAccountSid(NULL, pSid, NULL, &cchName, NULL, &cchDomainName, &eUse);
+ if ((dwErrorCode = GetLastError()) != ERROR_INSUFFICIENT_BUFFER)
+ return dwErrorCode;
+ lpName = (LPWSTR) LocalAlloc(LPTR, (cchName + 1) * sizeof(WCHAR));
+ if (lpName == NULL)
+ {
+ ret = GetLastError();
+ goto GetAccntNameFromSidEnd;
+ }
+ lpDomainName = (LPWSTR) LocalAlloc(LPTR, (cchDomainName + 1) * sizeof(WCHAR));
+ if (lpDomainName == NULL)
+ {
+ ret = GetLastError();
+ goto GetAccntNameFromSidEnd;
+ }
+
+ if (!LookupAccountSid(NULL, pSid,
+ lpName, &cchName, lpDomainName, &cchDomainName, &eUse))
+ {
+ ret = GetLastError();
+ goto GetAccntNameFromSidEnd;
+ }
+
+ // Buffer size = name length + 1 for '\' + domain length + 1 for NULL
+ cchAcctName = cchName + cchDomainName + 2;
+ *ppAcctName = (LPWSTR) LocalAlloc(LPTR, cchAcctName * sizeof(WCHAR));
+ if (*ppAcctName == NULL)
+ {
+ ret = GetLastError();
+ goto GetAccntNameFromSidEnd;
+ }
+
+ hr = StringCchCopyW(*ppAcctName, cchAcctName, lpDomainName);
+ if (FAILED(hr))
+ {
+ ret = HRESULT_CODE(hr);
+ goto GetAccntNameFromSidEnd;
+ }
+
+ hr = StringCchCatW(*ppAcctName, cchAcctName, L"\\");
+ if (FAILED(hr))
+ {
+ ret = HRESULT_CODE(hr);
+ goto GetAccntNameFromSidEnd;
+ }
+
+ hr = StringCchCatW(*ppAcctName, cchAcctName, lpName);
+ if (FAILED(hr))
+ {
+ ret = HRESULT_CODE(hr);
+ goto GetAccntNameFromSidEnd;
+ }
+
+GetAccntNameFromSidEnd:
+ LocalFree(lpName);
+ LocalFree(lpDomainName);
+ if (ret != ERROR_SUCCESS)
+ {
+ LocalFree(*ppAcctName);
+ *ppAcctName = NULL;
+ }
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: GetLocalGroupsForUser
+//
+// Description:
+// Get an array of groups for the given user.
+//
+// Returns:
+// ERROR_SUCCESS on success
+// Other error code on failure
+//
+// Notes:
+// - NetUserGetLocalGroups() function only accepts full user name in the format
+// [domain name]\[username]. The user input to this function can be only the
+// username. In this case, NetUserGetLocalGroups() will fail on the first try,
+// and we will try to find full user name using LookupAccountNameW() method,
+// and call NetUserGetLocalGroups() function again with full user name.
+// However, it is not always possible to find full user name given only user
+// name. For example, a computer named 'win1' joined domain 'redmond' can have
+// two different users, 'win1\alex' and 'redmond\alex'. Given only 'alex', we
+// cannot tell which one is correct.
+//
+// - Caller needs to destroy the memory of groups by using the
+// NetApiBufferFree() function
+//
+DWORD GetLocalGroupsForUser(
+ __in LPCWSTR user,
+ __out LPLOCALGROUP_USERS_INFO_0 *groups,
+ __out LPDWORD entries)
+{
+ DWORD dwEntriesRead = 0;
+ DWORD dwTotalEntries = 0;
+ NET_API_STATUS nStatus = NERR_Success;
+
+ PSID pUserSid = NULL;
+ LPWSTR fullName = NULL;
+
+ DWORD dwRtnCode = ERROR_SUCCESS;
+
+ DWORD ret = ERROR_SUCCESS;
+
+ *groups = NULL;
+ *entries = 0;
+
+ nStatus = NetUserGetLocalGroups(NULL,
+ user,
+ 0,
+ 0,
+ (LPBYTE *) groups,
+ MAX_PREFERRED_LENGTH,
+ &dwEntriesRead,
+ &dwTotalEntries);
+
+ if (nStatus == NERR_Success)
+ {
+ *entries = dwEntriesRead;
+ return ERROR_SUCCESS;
+ }
+ else if (nStatus != NERR_UserNotFound)
+ {
+ return nStatus;
+ }
+
+ if ((dwRtnCode = GetSidFromAcctNameW(user, &pUserSid)) != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto GetLocalGroupsForUserEnd;
+ }
+
+ if ((dwRtnCode = GetAccntNameFromSid(pUserSid, &fullName)) != ERROR_SUCCESS)
+ {
+ ret = dwRtnCode;
+ goto GetLocalGroupsForUserEnd;
+ }
+
+ nStatus = NetUserGetLocalGroups(NULL,
+ fullName,
+ 0,
+ 0,
+ (LPBYTE *) groups,
+ MAX_PREFERRED_LENGTH,
+ &dwEntriesRead,
+ &dwTotalEntries);
+ if (nStatus != NERR_Success)
+ {
+ // NERR_DCNotFound (2453) and NERR_UserNotFound (2221) are not published
+ // Windows System Error Code. All other error codes returned by
+ // NetUserGetLocalGroups() are valid System Error Codes according to MSDN.
+ ret = nStatus;
+ goto GetLocalGroupsForUserEnd;
+ }
+
+ *entries = dwEntriesRead;
+
+GetLocalGroupsForUserEnd:
+ LocalFree(pUserSid);
+ LocalFree(fullName);
+ return ret;
+}
+
+//----------------------------------------------------------------------------
+// Function: EnablePrivilege
+//
+// Description:
+// Check if the process has the given privilege. If yes, enable the privilege
+// to the process's access token.
+//
+// Returns:
+// TRUE: on success
+//
+// Notes:
+//
+BOOL EnablePrivilege(__in LPCWSTR privilegeName)
+{
+ HANDLE hToken = INVALID_HANDLE_VALUE;
+ TOKEN_PRIVILEGES tp = { 0 };
+ DWORD dwErrCode = ERROR_SUCCESS;
+
+ if (!OpenProcessToken(GetCurrentProcess(),
+ TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken))
+ {
+ ReportErrorCode(L"OpenProcessToken", GetLastError());
+ return FALSE;
+ }
+
+ tp.PrivilegeCount = 1;
+ if (!LookupPrivilegeValueW(NULL,
+ privilegeName, &(tp.Privileges[0].Luid)))
+ {
+ ReportErrorCode(L"LookupPrivilegeValue", GetLastError());
+ CloseHandle(hToken);
+ return FALSE;
+ }
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ // As stated on MSDN, we need to use GetLastError() to check if
+ // AdjustTokenPrivileges() adjusted all of the specified privileges.
+ //
+ AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL);
+ dwErrCode = GetLastError();
+ CloseHandle(hToken);
+
+ return dwErrCode == ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: ReportErrorCode
+//
+// Description:
+// Report an error. Use FormatMessage function to get the system error message.
+//
+// Returns:
+// None
+//
+// Notes:
+//
+//
+void ReportErrorCode(LPCWSTR func, DWORD err)
+{
+ DWORD len = 0;
+ LPWSTR msg = NULL;
+
+ assert(func != NULL);
+
+ len = FormatMessageW(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, err,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPWSTR)&msg, 0, NULL);
+ if (len > 0)
+ {
+ fwprintf(stderr, L"%s error (%d): %s\n", func, err, msg);
+ }
+ else
+ {
+ fwprintf(stderr, L"%s error code: %d.\n", func, err);
+ }
+ if (msg != NULL) LocalFree(msg);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.vcxproj b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.vcxproj
new file mode 100644
index 0000000000..fc0519dff7
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.vcxproj
@@ -0,0 +1,171 @@
+
+
+
+
+
+
+
+ Debug
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ Win32
+
+
+ Release
+ x64
+
+
+
+ {12131AA7-902E-4a6d-9CE3-043261D22A12}
+ Win32Proj
+ winutils
+
+
+
+ StaticLibrary
+ true
+ Unicode
+
+
+ StaticLibrary
+ true
+ Unicode
+
+
+ StaticLibrary
+ false
+ true
+ Unicode
+
+
+ StaticLibrary
+ false
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ include;$(IncludePath)
+
+
+ true
+
+
+ true
+
+ ..\..\..\target\winutils\$(Configuration)\
+
+
+ false
+
+
+ false
+ ..\..\..\target\bin\
+ ..\..\..\target\winutils\$(Platform)\$(Configuration)\
+
+
+
+
+
+ Level3
+ Disabled
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+
+
+
+
+
+
+ Level4
+ Disabled
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/ls.c b/hadoop-common-project/hadoop-common/src/main/winutils/ls.c
new file mode 100644
index 0000000000..8c9892d48a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/ls.c
@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#include "winutils.h"
+
+//----------------------------------------------------------------------------
+// Function: GetMaskString
+//
+// Description:
+// Get the mask string that are used for output to the console.
+//
+// Returns:
+// TRUE: on success
+//
+// Notes:
+// The function only sets the existed permission in the mask string. If the
+// permission does not exist, the corresponding character in mask string is not
+// altered. The caller need to initilize the mask string to be all '-' to get
+// the correct mask string.
+//
+static BOOL GetMaskString(INT accessMask, LPWSTR maskString)
+{
+ if(wcslen(maskString) != 10)
+ return FALSE;
+
+ if ((accessMask & UX_DIRECTORY) == UX_DIRECTORY)
+ maskString[0] = L'd';
+ else if ((accessMask & UX_SYMLINK) == UX_SYMLINK)
+ maskString[0] = L'l';
+
+ if ((accessMask & UX_U_READ) == UX_U_READ)
+ maskString[1] = L'r';
+ if ((accessMask & UX_U_WRITE) == UX_U_WRITE)
+ maskString[2] = L'w';
+ if ((accessMask & UX_U_EXECUTE) == UX_U_EXECUTE)
+ maskString[3] = L'x';
+
+ if ((accessMask & UX_G_READ) == UX_G_READ)
+ maskString[4] = L'r';
+ if ((accessMask & UX_G_WRITE) == UX_G_WRITE)
+ maskString[5] = L'w';
+ if ((accessMask & UX_G_EXECUTE) == UX_G_EXECUTE)
+ maskString[6] = L'x';
+
+ if ((accessMask & UX_O_READ) == UX_O_READ)
+ maskString[7] = L'r';
+ if ((accessMask & UX_O_WRITE) == UX_O_WRITE)
+ maskString[8] = L'w';
+ if ((accessMask & UX_O_EXECUTE) == UX_O_EXECUTE)
+ maskString[9] = L'x';
+
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: LsPrintLine
+//
+// Description:
+// Print one line of 'ls' command given all the information needed
+//
+// Returns:
+// None
+//
+// Notes:
+// if useSeparator is false, separates the output tokens with a space
+// character, otherwise, with a pipe character
+//
+static BOOL LsPrintLine(
+ const INT mask,
+ const DWORD hardlinkCount,
+ LPCWSTR ownerName,
+ LPCWSTR groupName,
+ const FILETIME *lpFileWritetime,
+ const LARGE_INTEGER fileSize,
+ LPCWSTR path,
+ BOOL useSeparator)
+{
+ // 'd' + 'rwx' for user, group, other
+ static const size_t ck_ullMaskLen = 1 + 3 * 3;
+
+ LPWSTR maskString = NULL;
+ SYSTEMTIME stFileWriteTime;
+ BOOL ret = FALSE;
+
+ maskString = (LPWSTR)LocalAlloc(LPTR, (ck_ullMaskLen+1)*sizeof(WCHAR));
+ if (maskString == NULL)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ return FALSE;
+ }
+
+ // Build mask string from mask mode
+ if (FAILED(StringCchCopyW(maskString, (ck_ullMaskLen+1), L"----------")))
+ {
+ goto LsPrintLineEnd;
+ }
+
+ if (!GetMaskString(mask, maskString))
+ {
+ goto LsPrintLineEnd;
+ }
+
+ // Convert file time to system time
+ if (!FileTimeToSystemTime(lpFileWritetime, &stFileWriteTime))
+ {
+ goto LsPrintLineEnd;
+ }
+
+ if (useSeparator)
+ {
+ fwprintf(stdout, L"%10s|%d|%s|%s|%lld|%3s|%2d|%4d|%s\n",
+ maskString, hardlinkCount, ownerName, groupName, fileSize.QuadPart,
+ MONTHS[stFileWriteTime.wMonth-1], stFileWriteTime.wDay,
+ stFileWriteTime.wYear, path);
+ }
+ else
+ {
+ fwprintf(stdout, L"%10s %d %s %s %lld %3s %2d %4d %s\n",
+ maskString, hardlinkCount, ownerName, groupName, fileSize.QuadPart,
+ MONTHS[stFileWriteTime.wMonth-1], stFileWriteTime.wDay,
+ stFileWriteTime.wYear, path);
+ }
+
+ ret = TRUE;
+
+LsPrintLineEnd:
+ LocalFree(maskString);
+
+ return ret;
+}
+
+// List of command line options supported by "winutils ls"
+enum CmdLineOption
+{
+ CmdLineOptionFollowSymlink = 0x1, // "-L"
+ CmdLineOptionSeparator = 0x2 // "-F"
+ // options should be powers of 2 (aka next is 0x4)
+};
+
+static wchar_t* CurrentDir = L".";
+
+//----------------------------------------------------------------------------
+// Function: ParseCommandLine
+//
+// Description:
+// Parses the command line
+//
+// Returns:
+// TRUE on the valid command line, FALSE otherwise
+//
+BOOL ParseCommandLine(
+ int argc, wchar_t *argv[], wchar_t** path, int *optionsMask)
+{
+ int MaxOptions = 2; // Should be equal to the number of elems in CmdLineOption
+ int i = 0;
+
+ assert(optionsMask != NULL);
+ assert(argv != NULL);
+ assert(path != NULL);
+
+ *optionsMask = 0;
+
+ if (argc == 1)
+ {
+ // no path specified, assume "."
+ *path = CurrentDir;
+ return TRUE;
+ }
+
+ if (argc == 2)
+ {
+ // only path specified, no other options
+ *path = argv[1];
+ return TRUE;
+ }
+
+ if (argc > 2 + MaxOptions)
+ {
+ // too many parameters
+ return FALSE;
+ }
+
+ for (i = 1; i < argc - 1; ++i)
+ {
+ if (wcscmp(argv[i], L"-L") == 0)
+ {
+ // Check if this option was already specified
+ BOOL alreadySet = *optionsMask & CmdLineOptionFollowSymlink;
+ if (alreadySet)
+ return FALSE;
+
+ *optionsMask |= CmdLineOptionFollowSymlink;
+ }
+ else if (wcscmp(argv[i], L"-F") == 0)
+ {
+ // Check if this option was already specified
+ BOOL alreadySet = *optionsMask & CmdLineOptionSeparator;
+ if (alreadySet)
+ return FALSE;
+
+ *optionsMask |= CmdLineOptionSeparator;
+ }
+ else
+ {
+ return FALSE;
+ }
+ }
+
+ *path = argv[argc - 1];
+
+ return TRUE;
+}
+
+//----------------------------------------------------------------------------
+// Function: Ls
+//
+// Description:
+// The main method for ls command
+//
+// Returns:
+// 0: on success
+//
+// Notes:
+//
+int Ls(int argc, wchar_t *argv[])
+{
+ LPWSTR pathName = NULL;
+ LPWSTR longPathName = NULL;
+
+ BY_HANDLE_FILE_INFORMATION fileInformation;
+
+ LPWSTR ownerName = NULL;
+ LPWSTR groupName = NULL;
+ INT unixAccessMode = 0;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ LARGE_INTEGER fileSize;
+
+ BOOL isSymlink = FALSE;
+
+ int ret = EXIT_FAILURE;
+ int optionsMask = 0;
+
+ if (!ParseCommandLine(argc, argv, &pathName, &optionsMask))
+ {
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ LsUsage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ assert(pathName != NULL);
+
+ if (wcsspn(pathName, L"/?|><:*\"") != 0)
+ {
+ fwprintf(stderr, L"Incorrect file name format: %s\n", pathName);
+ return EXIT_FAILURE;
+ }
+
+ // Convert the path the the long path
+ //
+ dwErrorCode = ConvertToLongPath(pathName, &longPathName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"ConvertToLongPath", dwErrorCode);
+ goto LsEnd;
+ }
+
+ dwErrorCode = GetFileInformationByName(
+ longPathName, optionsMask & CmdLineOptionFollowSymlink, &fileInformation);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"GetFileInformationByName", dwErrorCode);
+ goto LsEnd;
+ }
+
+ dwErrorCode = SymbolicLinkCheck(longPathName, &isSymlink);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"IsSymbolicLink", dwErrorCode);
+ goto LsEnd;
+ }
+
+ if (isSymlink)
+ unixAccessMode |= UX_SYMLINK;
+ else if (IsDirFileInfo(&fileInformation))
+ unixAccessMode |= UX_DIRECTORY;
+
+ dwErrorCode = FindFileOwnerAndPermission(longPathName,
+ &ownerName, &groupName, &unixAccessMode);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"FindFileOwnerAndPermission", dwErrorCode);
+ goto LsEnd;
+ }
+
+ fileSize.HighPart = fileInformation.nFileSizeHigh;
+ fileSize.LowPart = fileInformation.nFileSizeLow;
+
+ // Print output using the input path name (not the long one)
+ //
+ if (!LsPrintLine(unixAccessMode,
+ fileInformation.nNumberOfLinks,
+ ownerName, groupName,
+ &fileInformation.ftLastWriteTime,
+ fileSize,
+ pathName,
+ optionsMask & CmdLineOptionSeparator))
+ goto LsEnd;
+
+ ret = EXIT_SUCCESS;
+
+LsEnd:
+ LocalFree(ownerName);
+ LocalFree(groupName);
+ LocalFree(longPathName);
+
+ return ret;
+}
+
+void LsUsage(LPCWSTR program)
+{
+ fwprintf(stdout, L"\
+Usage: %s [OPTIONS] [FILE]\n\
+List information about the FILE (the current directory by default).\n\
+Using long listing format and list directory entries instead of contents,\n\
+and do not dereference symbolic links.\n\
+Provides equivalent or similar function as 'ls -ld' on GNU/Linux.\n\
+\n\
+OPTIONS: -L dereference symbolic links\n\
+ -F format the output by separating tokens with a pipe\n",
+program);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/main.c b/hadoop-common-project/hadoop-common/src/main/winutils/main.c
new file mode 100644
index 0000000000..8e5f695ca8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/main.c
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#include "winutils.h"
+
+static void Usage(LPCWSTR program);
+
+int wmain(int argc, wchar_t* argv[])
+{
+ LPCWSTR cmd = NULL;
+
+ if (argc < 2)
+ {
+ Usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+
+ cmd = argv[1];
+
+ if (wcscmp(L"ls", cmd) == 0)
+ {
+ return Ls(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"chmod", cmd) == 0)
+ {
+ return Chmod(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"chown", cmd) == 0)
+ {
+ return Chown(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"groups", cmd) == 0)
+ {
+ return Groups(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"hardlink", cmd) == 0)
+ {
+ return Hardlink(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"symlink", cmd) == 0)
+ {
+ return Symlink(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"task", cmd) == 0)
+ {
+ return Task(argc - 1, argv + 1);
+ }
+ else if (wcscmp(L"systeminfo", cmd) == 0)
+ {
+ return SystemInfo();
+ }
+ else if (wcscmp(L"help", cmd) == 0)
+ {
+ Usage(argv[0]);
+ return EXIT_SUCCESS;
+ }
+ else
+ {
+ Usage(argv[0]);
+ return EXIT_FAILURE;
+ }
+}
+
+static void Usage(LPCWSTR program)
+{
+ fwprintf(stdout, L"Usage: %s [command] ...\n\
+Provide basic command line utilities for Hadoop on Windows.\n\n\
+The available commands and their usages are:\n\n", program);
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"chmod", L"Change file mode bits.");
+ ChmodUsage(L"chmod");
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"chown", L"Change file owner.");
+ ChownUsage(L"chown");
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"groups", L"List user groups.");
+ GroupsUsage(L"groups");
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"hardlink", L"Hard link operations.");
+ HardlinkUsage();
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"ls", L"List file information.");
+ LsUsage(L"ls");
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-10s%s\n\n", L"symlink", L"Create a symbolic link.");
+ SymlinkUsage();
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"systeminfo", L"System information.");
+ SystemInfoUsage();
+ fwprintf(stdout, L"\n\n");
+
+ fwprintf(stdout, L"%-15s%s\n\n", L"task", L"Task operations.");
+ TaskUsage();
+ fwprintf(stdout, L"\n\n");
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c b/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c
new file mode 100644
index 0000000000..564459a454
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/symlink.c
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+#include "winutils.h"
+
+//----------------------------------------------------------------------------
+// Function: Symlink
+//
+// Description:
+// The main method for symlink command
+//
+// Returns:
+// 0: on success
+//
+// Notes:
+//
+int Symlink(int argc, wchar_t *argv[])
+{
+ PWSTR longLinkName = NULL;
+ PWSTR longFileName = NULL;
+ DWORD dwErrorCode = ERROR_SUCCESS;
+
+ BOOL isDir = FALSE;
+
+ DWORD dwRtnCode = ERROR_SUCCESS;
+ DWORD dwFlag = 0;
+
+ int ret = SUCCESS;
+
+ if (argc != 3)
+ {
+ SymlinkUsage();
+ return FAILURE;
+ }
+
+ dwErrorCode = ConvertToLongPath(argv[1], &longLinkName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ret = FAILURE;
+ goto SymlinkEnd;
+ }
+ dwErrorCode = ConvertToLongPath(argv[2], &longFileName);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ret = FAILURE;
+ goto SymlinkEnd;
+ }
+
+ // Check if the the process's access token has the privilege to create
+ // symbolic links. Without this step, the call to CreateSymbolicLink() from
+ // users have the privilege to create symbolic links will still succeed.
+ // This is just an additional step to do the privilege check by not using
+ // error code from CreateSymbolicLink() method.
+ //
+ if (!EnablePrivilege(L"SeCreateSymbolicLinkPrivilege"))
+ {
+ fwprintf(stderr,
+ L"No privilege to create symbolic links.\n");
+ ret = SYMLINK_NO_PRIVILEGE;
+ goto SymlinkEnd;
+ }
+
+ if ((dwRtnCode = DirectoryCheck(longFileName, &isDir)) != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"DirectoryCheck", dwRtnCode);
+ ret = FAILURE;
+ goto SymlinkEnd;
+ }
+
+ if (isDir)
+ dwFlag = SYMBOLIC_LINK_FLAG_DIRECTORY;
+
+ if (!CreateSymbolicLinkW(longLinkName, longFileName, dwFlag))
+ {
+ ReportErrorCode(L"CreateSymbolicLink", GetLastError());
+ ret = FAILURE;
+ goto SymlinkEnd;
+ }
+
+SymlinkEnd:
+ LocalFree(longLinkName);
+ LocalFree(longFileName);
+ return ret;
+}
+
+void SymlinkUsage()
+{
+ fwprintf(stdout, L"\
+Usage: symlink [LINKNAME] [FILENAME]\n\
+Creates a symbolic link\n\
+\n\
+0 is returned on success.\n\
+2 is returned if the user does no have privilege to create symbolic links.\n\
+1 is returned for all other errors.\n\
+\n\
+The default security settings in Windows disallow non-elevated administrators\n\
+and all non-administrators from creating symbolic links. The security settings\n\
+for symbolic links can be changed in the Local Security Policy management\n\
+console.\n");
+}
+
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c b/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
new file mode 100644
index 0000000000..00c0f0b6e1
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/systeminfo.c
@@ -0,0 +1,120 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with this
+* work for additional information regarding copyright ownership. The ASF
+* licenses this file to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations under
+* the License.
+*/
+
+#include "winutils.h"
+#include
+#include
+
+#define PSAPI_VERSION 1
+#pragma comment(lib, "psapi.lib")
+#pragma comment(lib, "Powrprof.lib")
+
+typedef struct _PROCESSOR_POWER_INFORMATION {
+ ULONG Number;
+ ULONG MaxMhz;
+ ULONG CurrentMhz;
+ ULONG MhzLimit;
+ ULONG MaxIdleState;
+ ULONG CurrentIdleState;
+} PROCESSOR_POWER_INFORMATION, *PPROCESSOR_POWER_INFORMATION;
+
+//----------------------------------------------------------------------------
+// Function: SystemInfo
+//
+// Description:
+// Returns the resource information about the machine
+//
+// Returns:
+// EXIT_SUCCESS: On success
+// EXIT_FAILURE: otherwise
+int SystemInfo()
+{
+ size_t vmemSize, vmemFree, memSize, memFree;
+ PERFORMANCE_INFORMATION memInfo;
+ SYSTEM_INFO sysInfo;
+ FILETIME idleTimeFt, kernelTimeFt, userTimeFt;
+ ULARGE_INTEGER idleTime, kernelTime, userTime;
+ ULONGLONG cpuTimeMs;
+ size_t size;
+ LPBYTE pBuffer;
+ PPROCESSOR_POWER_INFORMATION ppi;
+ long cpuFrequencyKhz;
+ NTSTATUS status;
+
+ ZeroMemory(&memInfo, sizeof(PERFORMANCE_INFORMATION));
+ memInfo.cb = sizeof(PERFORMANCE_INFORMATION);
+ if(!GetPerformanceInfo(&memInfo, sizeof(PERFORMANCE_INFORMATION)))
+ {
+ ReportErrorCode(L"GetPerformanceInfo", GetLastError());
+ return EXIT_FAILURE;
+ }
+ vmemSize = memInfo.CommitLimit*memInfo.PageSize;
+ vmemFree = vmemSize - memInfo.CommitTotal*memInfo.PageSize;
+ memSize = memInfo.PhysicalTotal*memInfo.PageSize;
+ memFree = memInfo.PhysicalAvailable*memInfo.PageSize;
+
+ GetSystemInfo(&sysInfo);
+
+ if(!GetSystemTimes(&idleTimeFt, &kernelTimeFt, &userTimeFt))
+ {
+ ReportErrorCode(L"GetSystemTimes", GetLastError());
+ return EXIT_FAILURE;
+ }
+ idleTime.HighPart = idleTimeFt.dwHighDateTime;
+ idleTime.LowPart = idleTimeFt.dwLowDateTime;
+ kernelTime.HighPart = kernelTimeFt.dwHighDateTime;
+ kernelTime.LowPart = kernelTimeFt.dwLowDateTime;
+ userTime.HighPart = userTimeFt.dwHighDateTime;
+ userTime.LowPart = userTimeFt.dwLowDateTime;
+
+ cpuTimeMs = (kernelTime.QuadPart - idleTime.QuadPart + userTime.QuadPart)/10000;
+
+ // allocate buffer to get info for each processor
+ size = sysInfo.dwNumberOfProcessors * sizeof(PROCESSOR_POWER_INFORMATION);
+ pBuffer = (BYTE*) LocalAlloc(LPTR, size);
+ if(!pBuffer)
+ {
+ ReportErrorCode(L"LocalAlloc", GetLastError());
+ return EXIT_FAILURE;
+ }
+ status = CallNtPowerInformation(ProcessorInformation, NULL, 0, pBuffer, (long)size);
+ if(0 != status)
+ {
+ fwprintf_s(stderr, L"Error in CallNtPowerInformation. Err:%d\n", status);
+ LocalFree(pBuffer);
+ return EXIT_FAILURE;
+ }
+ ppi = (PPROCESSOR_POWER_INFORMATION)pBuffer;
+ cpuFrequencyKhz = ppi->MaxMhz*1000;
+ LocalFree(pBuffer);
+
+ fwprintf_s(stdout, L"%Iu,%Iu,%Iu,%Iu,%Iu,%Iu,%Iu\n", vmemSize, memSize, vmemFree, memFree, sysInfo.dwNumberOfProcessors, cpuFrequencyKhz, cpuTimeMs);
+
+ return EXIT_SUCCESS;
+}
+
+void SystemInfoUsage()
+{
+ fwprintf(stdout, L"\
+ Usage: systeminfo\n\
+ Prints machine information on stdout\n\
+ Comma separated list of the following values.\n\
+ VirtualMemorySize(bytes),PhysicalMemorySize(bytes),\n\
+ FreeVirtualMemory(bytes),FreePhysicalMemory(bytes),\n\
+ NumberOfProcessors,CpuFrequency(Khz),\n\
+ CpuTime(MilliSec,Kernel+User)\n");
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/task.c b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
new file mode 100644
index 0000000000..5a5345beae
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/task.c
@@ -0,0 +1,461 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements. See the NOTICE file distributed with this
+* work for additional information regarding copyright ownership. The ASF
+* licenses this file to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+* License for the specific language governing permissions and limitations under
+* the License.
+*/
+
+#include "winutils.h"
+#include
+#include
+
+#define PSAPI_VERSION 1
+#pragma comment(lib, "psapi.lib")
+
+#define ERROR_TASK_NOT_ALIVE 1
+
+// List of different task related command line options supported by
+// winutils.
+typedef enum TaskCommandOptionType
+{
+ TaskInvalid,
+ TaskCreate,
+ TaskIsAlive,
+ TaskKill,
+ TaskProcessList
+} TaskCommandOption;
+
+//----------------------------------------------------------------------------
+// Function: ParseCommandLine
+//
+// Description:
+// Parses the given command line. On success, out param 'command' contains
+// the user specified command.
+//
+// Returns:
+// TRUE: If the command line is valid
+// FALSE: otherwise
+static BOOL ParseCommandLine(__in int argc,
+ __in wchar_t *argv[],
+ __out TaskCommandOption *command)
+{
+ *command = TaskInvalid;
+
+ if (wcscmp(argv[0], L"task") != 0 )
+ {
+ return FALSE;
+ }
+
+ if (argc == 3) {
+ if (wcscmp(argv[1], L"isAlive") == 0)
+ {
+ *command = TaskIsAlive;
+ return TRUE;
+ }
+ if (wcscmp(argv[1], L"kill") == 0)
+ {
+ *command = TaskKill;
+ return TRUE;
+ }
+ if (wcscmp(argv[1], L"processList") == 0)
+ {
+ *command = TaskProcessList;
+ return TRUE;
+ }
+ }
+
+ if (argc == 4) {
+ if (wcscmp(argv[1], L"create") == 0)
+ {
+ *command = TaskCreate;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+//----------------------------------------------------------------------------
+// Function: createTask
+//
+// Description:
+// Creates a task via a jobobject. Outputs the
+// appropriate information to stdout on success, or stderr on failure.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// GetLastError: otherwise
+DWORD createTask(_TCHAR* jobObjName, _TCHAR* cmdLine)
+{
+ DWORD err = ERROR_SUCCESS;
+ DWORD exitCode = EXIT_FAILURE;
+ STARTUPINFO si;
+ PROCESS_INFORMATION pi;
+ HANDLE jobObject = NULL;
+ JOBOBJECT_EXTENDED_LIMIT_INFORMATION jeli = { 0 };
+
+ // Create un-inheritable job object handle and set job object to terminate
+ // when last handle is closed. So winutils.exe invocation has the only open
+ // job object handle. Exit of winutils.exe ensures termination of job object.
+ // Either a clean exit of winutils or crash or external termination.
+ jobObject = CreateJobObject(NULL, jobObjName);
+ err = GetLastError();
+ if(jobObject == NULL || err == ERROR_ALREADY_EXISTS)
+ {
+ return err;
+ }
+ jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+ if(SetInformationJobObject(jobObject,
+ JobObjectExtendedLimitInformation,
+ &jeli,
+ sizeof(jeli)) == 0)
+ {
+ err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+
+ if(AssignProcessToJobObject(jobObject, GetCurrentProcess()) == 0)
+ {
+ err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+
+ // the child JVM uses this env var to send the task OS process identifier
+ // to the TaskTracker. We pass the job object name.
+ if(SetEnvironmentVariable(_T("JVM_PID"), jobObjName) == 0)
+ {
+ err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+
+ ZeroMemory( &si, sizeof(si) );
+ si.cb = sizeof(si);
+ ZeroMemory( &pi, sizeof(pi) );
+ if(CreateProcess(NULL, cmdLine, NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi) == 0)
+ {
+ err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+ CloseHandle(pi.hThread);
+
+ // Wait until child process exits.
+ WaitForSingleObject( pi.hProcess, INFINITE );
+ if(GetExitCodeProcess(pi.hProcess, &exitCode) == 0)
+ {
+ err = GetLastError();
+ }
+ CloseHandle( pi.hProcess );
+
+ // Terminate job object so that all spawned processes are also killed.
+ // This is needed because once this process closes the handle to the job
+ // object and none of the spawned objects have the handle open (via
+ // inheritance on creation) then it will not be possible for any other external
+ // program (say winutils task kill) to terminate this job object via its name.
+ if(TerminateJobObject(jobObject, exitCode) == 0)
+ {
+ err = GetLastError();
+ }
+
+ // comes here only on failure or TerminateJobObject
+ CloseHandle(jobObject);
+
+ if(err != ERROR_SUCCESS)
+ {
+ return err;
+ }
+ return exitCode;
+}
+
+//----------------------------------------------------------------------------
+// Function: isTaskAlive
+//
+// Description:
+// Checks if a task is alive via a jobobject. Outputs the
+// appropriate information to stdout on success, or stderr on failure.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// GetLastError: otherwise
+DWORD isTaskAlive(const _TCHAR* jobObjName, int* isAlive, int* procsInJob)
+{
+ PJOBOBJECT_BASIC_PROCESS_ID_LIST procList;
+ HANDLE jobObject = NULL;
+ int numProcs = 100;
+
+ *isAlive = FALSE;
+
+ jobObject = OpenJobObject(JOB_OBJECT_QUERY, FALSE, jobObjName);
+
+ if(jobObject == NULL)
+ {
+ DWORD err = GetLastError();
+ if(err == ERROR_FILE_NOT_FOUND)
+ {
+ // job object does not exist. assume its not alive
+ return ERROR_SUCCESS;
+ }
+ return err;
+ }
+
+ procList = (PJOBOBJECT_BASIC_PROCESS_ID_LIST) LocalAlloc(LPTR, sizeof (JOBOBJECT_BASIC_PROCESS_ID_LIST) + numProcs*32);
+ if (!procList)
+ {
+ DWORD err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+ if(QueryInformationJobObject(jobObject, JobObjectBasicProcessIdList, procList, sizeof(JOBOBJECT_BASIC_PROCESS_ID_LIST)+numProcs*32, NULL) == 0)
+ {
+ DWORD err = GetLastError();
+ if(err != ERROR_MORE_DATA)
+ {
+ CloseHandle(jobObject);
+ LocalFree(procList);
+ return err;
+ }
+ }
+
+ if(procList->NumberOfAssignedProcesses > 0)
+ {
+ *isAlive = TRUE;
+ *procsInJob = procList->NumberOfAssignedProcesses;
+ }
+
+ LocalFree(procList);
+
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: killTask
+//
+// Description:
+// Kills a task via a jobobject. Outputs the
+// appropriate information to stdout on success, or stderr on failure.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// GetLastError: otherwise
+DWORD killTask(_TCHAR* jobObjName)
+{
+ HANDLE jobObject = OpenJobObject(JOB_OBJECT_TERMINATE, FALSE, jobObjName);
+ if(jobObject == NULL)
+ {
+ DWORD err = GetLastError();
+ if(err == ERROR_FILE_NOT_FOUND)
+ {
+ // job object does not exist. assume its not alive
+ return ERROR_SUCCESS;
+ }
+ return err;
+ }
+
+ if(TerminateJobObject(jobObject, 1) == 0)
+ {
+ return GetLastError();
+ }
+ CloseHandle(jobObject);
+
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: printTaskProcessList
+//
+// Description:
+// Prints resource usage of all processes in the task jobobject
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// GetLastError: otherwise
+DWORD printTaskProcessList(const _TCHAR* jobObjName)
+{
+ DWORD i;
+ PJOBOBJECT_BASIC_PROCESS_ID_LIST procList;
+ int numProcs = 100;
+ HANDLE jobObject = OpenJobObject(JOB_OBJECT_QUERY, FALSE, jobObjName);
+ if(jobObject == NULL)
+ {
+ DWORD err = GetLastError();
+ return err;
+ }
+
+ procList = (PJOBOBJECT_BASIC_PROCESS_ID_LIST) LocalAlloc(LPTR, sizeof (JOBOBJECT_BASIC_PROCESS_ID_LIST) + numProcs*32);
+ if (!procList)
+ {
+ DWORD err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+ while(QueryInformationJobObject(jobObject, JobObjectBasicProcessIdList, procList, sizeof(JOBOBJECT_BASIC_PROCESS_ID_LIST)+numProcs*32, NULL) == 0)
+ {
+ DWORD err = GetLastError();
+ if(err != ERROR_MORE_DATA)
+ {
+ CloseHandle(jobObject);
+ LocalFree(procList);
+ return err;
+ }
+ numProcs = procList->NumberOfAssignedProcesses;
+ LocalFree(procList);
+ procList = (PJOBOBJECT_BASIC_PROCESS_ID_LIST) LocalAlloc(LPTR, sizeof (JOBOBJECT_BASIC_PROCESS_ID_LIST) + numProcs*32);
+ if (!procList)
+ {
+ DWORD err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+ }
+
+ for(i=0; iNumberOfProcessIdsInList; ++i)
+ {
+ HANDLE hProcess = OpenProcess( PROCESS_QUERY_INFORMATION, FALSE, (DWORD)procList->ProcessIdList[i] );
+ if( hProcess != NULL )
+ {
+ PROCESS_MEMORY_COUNTERS_EX pmc;
+ if ( GetProcessMemoryInfo( hProcess, (PPROCESS_MEMORY_COUNTERS)&pmc, sizeof(pmc)) )
+ {
+ FILETIME create, exit, kernel, user;
+ if( GetProcessTimes( hProcess, &create, &exit, &kernel, &user) )
+ {
+ ULARGE_INTEGER kernelTime, userTime;
+ ULONGLONG cpuTimeMs;
+ kernelTime.HighPart = kernel.dwHighDateTime;
+ kernelTime.LowPart = kernel.dwLowDateTime;
+ userTime.HighPart = user.dwHighDateTime;
+ userTime.LowPart = user.dwLowDateTime;
+ cpuTimeMs = (kernelTime.QuadPart+userTime.QuadPart)/10000;
+ _ftprintf_s(stdout, TEXT("%u,%Iu,%Iu,%Iu\n"), procList->ProcessIdList[i], pmc.PrivateUsage, pmc.WorkingSetSize, cpuTimeMs);
+ }
+ }
+ CloseHandle( hProcess );
+ }
+ }
+
+ LocalFree(procList);
+ CloseHandle(jobObject);
+
+ return ERROR_SUCCESS;
+}
+
+//----------------------------------------------------------------------------
+// Function: Task
+//
+// Description:
+// Manages a task via a jobobject (create/isAlive/kill). Outputs the
+// appropriate information to stdout on success, or stderr on failure.
+//
+// Returns:
+// ERROR_SUCCESS: On success
+// Error code otherwise: otherwise
+int Task(int argc, wchar_t *argv[])
+{
+ DWORD dwErrorCode = ERROR_SUCCESS;
+ TaskCommandOption command = TaskInvalid;
+
+ if (!ParseCommandLine(argc, argv, &command)) {
+ dwErrorCode = ERROR_INVALID_COMMAND_LINE;
+
+ fwprintf(stderr, L"Incorrect command line arguments.\n\n");
+ TaskUsage();
+ goto TaskExit;
+ }
+
+ if (command == TaskCreate)
+ {
+ // Create the task jobobject
+ //
+ dwErrorCode = createTask(argv[2], argv[3]);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"createTask", dwErrorCode);
+ goto TaskExit;
+ }
+ } else if (command == TaskIsAlive)
+ {
+ // Check if task jobobject
+ //
+ int isAlive;
+ int numProcs;
+ dwErrorCode = isTaskAlive(argv[2], &isAlive, &numProcs);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"isTaskAlive", dwErrorCode);
+ goto TaskExit;
+ }
+
+ // Output the result
+ if(isAlive == TRUE)
+ {
+ fwprintf(stdout, L"IsAlive,%d\n", numProcs);
+ }
+ else
+ {
+ dwErrorCode = ERROR_TASK_NOT_ALIVE;
+ ReportErrorCode(L"isTaskAlive returned false", dwErrorCode);
+ goto TaskExit;
+ }
+ } else if (command == TaskKill)
+ {
+ // Check if task jobobject
+ //
+ dwErrorCode = killTask(argv[2]);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"killTask", dwErrorCode);
+ goto TaskExit;
+ }
+ } else if (command == TaskProcessList)
+ {
+ // Check if task jobobject
+ //
+ dwErrorCode = printTaskProcessList(argv[2]);
+ if (dwErrorCode != ERROR_SUCCESS)
+ {
+ ReportErrorCode(L"printTaskProcessList", dwErrorCode);
+ goto TaskExit;
+ }
+ } else
+ {
+ // Should not happen
+ //
+ assert(FALSE);
+ }
+
+TaskExit:
+ return dwErrorCode;
+}
+
+void TaskUsage()
+{
+ // Hadoop code checks for this string to determine if
+ // jobobject's are being used.
+ // ProcessTree.isSetsidSupported()
+ fwprintf(stdout, L"\
+ Usage: task create [TASKNAME] [COMMAND_LINE] |\n\
+ task isAlive [TASKNAME] |\n\
+ task kill [TASKNAME]\n\
+ task processList [TASKNAME]\n\
+ Creates a new task jobobject with taskname\n\
+ Checks if task jobobject is alive\n\
+ Kills task jobobject\n\
+ Prints to stdout a list of processes in the task\n\
+ along with their resource usage. One process per line\n\
+ and comma separated info per process\n\
+ ProcessId,VirtualMemoryCommitted(bytes),\n\
+ WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.sln b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.sln
new file mode 100644
index 0000000000..d4e019e60d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.sln
@@ -0,0 +1,55 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "winutils", "winutils.vcxproj", "{D94B3BD7-39CC-47A0-AE9A-353FDE506F33}"
+ ProjectSection(ProjectDependencies) = postProject
+ {12131AA7-902E-4A6D-9CE3-043261D22A12} = {12131AA7-902E-4A6D-9CE3-043261D22A12}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libwinutils", "libwinutils.vcxproj", "{12131AA7-902E-4A6D-9CE3-043261D22A12}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Debug|Win32.ActiveCfg = Debug|x64
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Debug|Win32.Build.0 = Debug|x64
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Debug|x64.ActiveCfg = Debug|x64
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Debug|x64.Build.0 = Debug|x64
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Release|Win32.ActiveCfg = Release|Win32
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Release|Win32.Build.0 = Release|Win32
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Release|x64.ActiveCfg = Release|x64
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}.Release|x64.Build.0 = Release|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Debug|Win32.ActiveCfg = Debug|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Debug|Win32.Build.0 = Debug|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Debug|x64.ActiveCfg = Debug|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Debug|x64.Build.0 = Debug|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Release|Win32.ActiveCfg = Release|Win32
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Release|Win32.Build.0 = Release|Win32
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Release|x64.ActiveCfg = Release|x64
+ {12131AA7-902E-4A6D-9CE3-043261D22A12}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
new file mode 100644
index 0000000000..9ae4c8745e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
@@ -0,0 +1,181 @@
+
+
+
+
+
+
+
+ Debug
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ Win32
+
+
+ Release
+ x64
+
+
+
+ {D94B3BD7-39CC-47A0-AE9A-353FDE506F33}
+ Win32Proj
+ winutils
+
+
+
+ Application
+ true
+ Unicode
+
+
+ Application
+ true
+ Unicode
+
+
+ Application
+ false
+ true
+ Unicode
+
+
+ Application
+ false
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ include;$(IncludePath)
+
+
+ true
+
+
+ true
+
+ ..\..\..\target\winutils\$(Configuration)\
+
+
+ false
+
+
+ false
+ ..\..\..\target\winutils\$(Platform)\$(Configuration)\
+ ..\..\..\target\bin\
+
+
+
+
+
+ Level3
+ Disabled
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+
+
+
+
+
+
+ Level4
+ Disabled
+ WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+ Level3
+
+
+ MaxSpeed
+ true
+ true
+ WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
+
+
+ Console
+ true
+ true
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {12131aa7-902e-4a6d-9ce3-043261d22a12}
+
+
+
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm
index c6a2d6b318..9951857108 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm
@@ -33,9 +33,7 @@ Single Node Setup
* GNU/Linux is supported as a development and production platform.
Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
- * Win32 is supported as a development platform. Distributed operation
- has not been well tested on Win32, so it is not supported as a
- production platform.
+ * Windows is also a supported platform.
** Required Software
@@ -46,11 +44,6 @@ Single Node Setup
[[2]] ssh must be installed and sshd must be running to use the Hadoop
scripts that manage remote Hadoop daemons.
- Additional requirements for Windows include:
-
- [[1]] Cygwin - Required for shell support in addition to the required
- software above.
-
** Installing Software
If your cluster doesn't have the requisite software you will need to
@@ -63,11 +56,6 @@ Single Node Setup
$ sudo apt-get install rsync
----
- On Windows, if you did not install the required software when you
- installed cygwin, start the cygwin installer and select the packages:
-
- * openssh - the Net category
-
* Download
To get a Hadoop distribution, download a recent stable release from one
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
index 8d09540b1c..97ae6a256d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
@@ -68,7 +68,7 @@ public static Path getTestRootPath(FileContext fc, String pathString) {
public static String getAbsoluteTestRootDir(FileContext fc)
throws IOException {
if (absTestRootDir == null) {
- if (TEST_ROOT_DIR.startsWith("/")) {
+ if (new Path(TEST_ROOT_DIR).isAbsolute()) {
absTestRootDir = TEST_ROOT_DIR;
} else {
absTestRootDir = fc.getWorkingDirectory().toString() + "/"
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
index 0acd416dd8..506e941e35 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
@@ -20,9 +20,11 @@
import java.io.*;
import java.util.ArrayList;
+import java.util.regex.Pattern;
import junit.framework.Assert;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -52,6 +54,12 @@ public abstract class FileContextURIBase {
private static final String basePath = System.getProperty("test.build.data",
"build/test/data") + "/testContextURI";
private static final Path BASE = new Path(basePath);
+
+ // Matches anything containing <, >, :, ", |, ?, *, or anything that ends with
+ // space or dot.
+ private static final Pattern WIN_INVALID_FILE_NAME_PATTERN = Pattern.compile(
+ "^(.*?[<>\\:\"\\|\\?\\*].*?)|(.*?[ \\.])$");
+
protected FileContext fc1;
protected FileContext fc2;
@@ -81,6 +89,10 @@ public void testCreateFile() throws IOException {
" ", "^ " };
for (String f : fileNames) {
+ if (!isTestableFileNameOnPlatform(f)) {
+ continue;
+ }
+
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
@@ -205,6 +217,10 @@ public void testCreateDirectory() throws IOException {
"deleteTest/()&^%$#@!~_+}{>", " ", "^ " };
for (String f : dirNames) {
+ if (!isTestableFileNameOnPlatform(f)) {
+ continue;
+ }
+
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
@@ -374,6 +390,10 @@ public void testDeleteDirectory() throws IOException {
"deleteTest/()&^%$#@!~_+}{>", " ", "^ " };
for (String f : dirNames) {
+ if (!isTestableFileNameOnPlatform(f)) {
+ continue;
+ }
+
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
@@ -492,6 +512,10 @@ public void testListStatus() throws Exception {
ArrayList testDirs = new ArrayList();
for (String d : dirs) {
+ if (!isTestableFileNameOnPlatform(d)) {
+ continue;
+ }
+
testDirs.add(qualifiedPath(d, fc2));
}
Assert.assertFalse(exists(fc1, testDirs.get(0)));
@@ -506,15 +530,17 @@ public void testListStatus() throws Exception {
Assert.assertEquals(qualifiedPath(hPrefix, fc1), paths[0].getPath());
paths = fc1.util().listStatus(qualifiedPath(hPrefix, fc1));
- Assert.assertEquals(6, paths.length);
- for (int i = 0; i < dirs.length; i++) {
+ Assert.assertEquals(testDirs.size(), paths.length);
+ for (int i = 0; i < testDirs.size(); i++) {
boolean found = false;
for (int j = 0; j < paths.length; j++) {
- if (qualifiedPath(dirs[i],fc1).equals(paths[j].getPath())) {
+ if (qualifiedPath(testDirs.get(i).toString(), fc1).equals(
+ paths[j].getPath())) {
+
found = true;
}
}
- Assert.assertTrue(dirs[i] + " not found", found);
+ Assert.assertTrue(testDirs.get(i) + " not found", found);
}
paths = fc1.util().listStatus(qualifiedPath(dirs[0], fc1));
@@ -539,9 +565,32 @@ public void testListStatus() throws Exception {
}
Assert.assertTrue(stat.getPath() + " not found", found);
}
- Assert.assertEquals(6, dirLen);
+ Assert.assertEquals(testDirs.size(), dirLen);
pathsItor = fc1.listStatus(qualifiedPath(dirs[0], fc1));
Assert.assertFalse(pathsItor.hasNext());
}
+
+ /**
+ * Returns true if the argument is a file name that is testable on the platform
+ * currently running the test. This is intended for use by tests so that they
+ * can skip checking file names that aren't supported by the underlying
+ * platform. The current implementation specifically checks for patterns that
+ * are not valid file names on Windows when the tests are running on Windows.
+ *
+ * @param fileName String file name to check
+ * @return boolean true if the argument is valid as a file name
+ */
+ private static boolean isTestableFileNameOnPlatform(String fileName) {
+ boolean valid = true;
+
+ if (Shell.WINDOWS) {
+ // Disallow reserved characters: <, >, :, ", |, ?, *.
+ // Disallow trailing space or period.
+ // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+ valid = !WIN_INVALID_FILE_NAME_PATTERN.matcher(fileName).matches();
+ }
+
+ return valid;
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
index c066aade28..47e201db97 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
@@ -86,7 +86,7 @@ static String getAbsoluteTestRootDir(FileSystem fSys)
throws IOException {
// NOTE: can't cache because of different filesystems!
//if (absTestRootDir == null)
- if (TEST_ROOT_DIR.startsWith("/")) {
+ if (new Path(TEST_ROOT_DIR).isAbsolute()) {
absTestRootDir = TEST_ROOT_DIR;
} else {
absTestRootDir = fSys.getWorkingDirectory().toString() + "/"
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
index 90378f780a..ca9de83c52 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
@@ -43,13 +43,14 @@ public void setup() throws IOException {
fc = FileContext.getFileContext();
}
- @Test
+ @Test (timeout = 30000)
public void testFileContextResolveAfs() throws IOException {
Configuration conf = new Configuration();
localFs = FileSystem.get(conf);
Path localPath = new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1");
- Path linkPath = new Path("file://" + TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs2");
+ Path linkPath = localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,
+ "TestFileContextResolveAfs2"));
localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL));
localFs.create(localPath);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index e73c644fb0..720811746d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -20,16 +20,24 @@
import org.junit.Before;
import java.io.BufferedReader;
import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.jar.Attributes;
+import java.util.jar.JarFile;
+import java.util.jar.Manifest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
@@ -121,7 +129,7 @@ private void createFile(File directory, String name, String contents)
}
}
- @Test
+ @Test (timeout = 30000)
public void testListFiles() throws IOException {
setupDirs();
//Test existing files case
@@ -148,7 +156,7 @@ public void testListFiles() throws IOException {
}
}
- @Test
+ @Test (timeout = 30000)
public void testListAPI() throws IOException {
setupDirs();
//Test existing files case
@@ -196,7 +204,7 @@ private void cleanupImpl() throws IOException {
Assert.assertTrue(!partitioned.exists());
}
- @Test
+ @Test (timeout = 30000)
public void testFullyDelete() throws IOException {
setupDirs();
boolean ret = FileUtil.fullyDelete(del);
@@ -211,7 +219,7 @@ public void testFullyDelete() throws IOException {
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
@@ -241,7 +249,7 @@ public void testFullyDeleteSymlinks() throws IOException {
* (b) dangling symlink to directory properly
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testFullyDeleteDanglingSymlinks() throws IOException {
setupDirs();
// delete the directory tmp to make tmpDir a dangling link to dir tmp and
@@ -268,7 +276,7 @@ public void testFullyDeleteDanglingSymlinks() throws IOException {
Assert.assertEquals(3, del.list().length);
}
- @Test
+ @Test (timeout = 30000)
public void testFullyDeleteContents() throws IOException {
setupDirs();
boolean ret = FileUtil.fullyDeleteContents(del);
@@ -384,15 +392,19 @@ private void validateAndSetWritablePermissions(
zlink.exists());
}
- @Test
+ @Test (timeout = 30000)
public void testFailFullyDelete() throws IOException {
+ if(Shell.WINDOWS) {
+ // windows Dir.setWritable(false) does not work for directories
+ return;
+ }
LOG.info("Running test to verify failure of fullyDelete()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
- @Test
+ @Test (timeout = 30000)
public void testFailFullyDeleteGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
@@ -461,15 +473,19 @@ public File[] listFiles() {
}
}
- @Test
+ @Test (timeout = 30000)
public void testFailFullyDeleteContents() throws IOException {
+ if(Shell.WINDOWS) {
+ // windows Dir.setWritable(false) does not work for directories
+ return;
+ }
LOG.info("Running test to verify failure of fullyDeleteContents()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
- @Test
+ @Test (timeout = 30000)
public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
@@ -477,7 +493,7 @@ public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
validateAndSetWritablePermissions(false, ret);
}
- @Test
+ @Test (timeout = 30000)
public void testCopyMergeSingleDirectory() throws IOException {
setupDirs();
boolean copyMergeResult = copyMerge("partitioned", "tmp/merged");
@@ -536,7 +552,7 @@ private boolean copyMerge(String src, String dst)
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testGetDU() throws IOException {
setupDirs();
@@ -547,6 +563,136 @@ public void testGetDU() throws IOException {
Assert.assertEquals(expected, du);
}
+ @Test (timeout = 30000)
+ public void testSymlink() throws Exception {
+ Assert.assertFalse(del.exists());
+ del.mkdirs();
+
+ byte[] data = "testSymLink".getBytes();
+
+ File file = new File(del, FILE);
+ File link = new File(del, "_link");
+
+ //write some data to the file
+ FileOutputStream os = new FileOutputStream(file);
+ os.write(data);
+ os.close();
+
+ //create the symlink
+ FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
+
+ //ensure that symlink length is correctly reported by Java
+ Assert.assertEquals(data.length, file.length());
+ Assert.assertEquals(data.length, link.length());
+
+ //ensure that we can read from link.
+ FileInputStream in = new FileInputStream(link);
+ long len = 0;
+ while (in.read() > 0) {
+ len++;
+ }
+ in.close();
+ Assert.assertEquals(data.length, len);
+ }
+
+ /**
+ * Test that rename on a symlink works as expected.
+ */
+ @Test (timeout = 30000)
+ public void testSymlinkRenameTo() throws Exception {
+ Assert.assertFalse(del.exists());
+ del.mkdirs();
+
+ File file = new File(del, FILE);
+ file.createNewFile();
+ File link = new File(del, "_link");
+
+ // create the symlink
+ FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
+
+ Assert.assertTrue(file.exists());
+ Assert.assertTrue(link.exists());
+
+ File link2 = new File(del, "_link2");
+
+ // Rename the symlink
+ Assert.assertTrue(link.renameTo(link2));
+
+ // Make sure the file still exists
+ // (NOTE: this would fail on Java6 on Windows if we didn't
+ // copy the file in FileUtil#symlink)
+ Assert.assertTrue(file.exists());
+
+ Assert.assertTrue(link2.exists());
+ Assert.assertFalse(link.exists());
+ }
+
+ /**
+ * Test that deletion of a symlink works as expected.
+ */
+ @Test (timeout = 30000)
+ public void testSymlinkDelete() throws Exception {
+ Assert.assertFalse(del.exists());
+ del.mkdirs();
+
+ File file = new File(del, FILE);
+ file.createNewFile();
+ File link = new File(del, "_link");
+
+ // create the symlink
+ FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
+
+ Assert.assertTrue(file.exists());
+ Assert.assertTrue(link.exists());
+
+ // make sure that deleting a symlink works properly
+ Assert.assertTrue(link.delete());
+ Assert.assertFalse(link.exists());
+ Assert.assertTrue(file.exists());
+ }
+
+ /**
+ * Test that length on a symlink works as expected.
+ */
+ @Test (timeout = 30000)
+ public void testSymlinkLength() throws Exception {
+ Assert.assertFalse(del.exists());
+ del.mkdirs();
+
+ byte[] data = "testSymLinkData".getBytes();
+
+ File file = new File(del, FILE);
+ File link = new File(del, "_link");
+
+ // write some data to the file
+ FileOutputStream os = new FileOutputStream(file);
+ os.write(data);
+ os.close();
+
+ Assert.assertEquals(0, link.length());
+
+ // create the symlink
+ FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
+
+ // ensure that File#length returns the target file and link size
+ Assert.assertEquals(data.length, file.length());
+ Assert.assertEquals(data.length, link.length());
+
+ file.delete();
+ Assert.assertFalse(file.exists());
+
+ if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
+ // On Java6 on Windows, we copied the file
+ Assert.assertEquals(data.length, link.length());
+ } else {
+ // Otherwise, the target file size is zero
+ Assert.assertEquals(0, link.length());
+ }
+
+ link.delete();
+ Assert.assertFalse(link.exists());
+ }
+
private void doUntarAndVerify(File tarFile, File untarDir)
throws IOException {
if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
@@ -574,7 +720,7 @@ private void doUntarAndVerify(File tarFile, File untarDir)
Assert.assertTrue(testFile.length() == 8);
}
- @Test
+ @Test (timeout = 30000)
public void testUntar() throws IOException {
String tarGzFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tgz";
@@ -586,4 +732,69 @@ public void testUntar() throws IOException {
doUntarAndVerify(new File(tarGzFileName), untarDir);
doUntarAndVerify(new File(tarFileName), untarDir);
}
+
+ @Test (timeout = 30000)
+ public void testCreateJarWithClassPath() throws Exception {
+ // setup test directory for files
+ Assert.assertFalse(tmp.exists());
+ Assert.assertTrue(tmp.mkdirs());
+
+ // create files expected to match a wildcard
+ List wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"),
+ new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"),
+ new File(tmp, "wildcard4.JAR"));
+ for (File wildcardMatch: wildcardMatches) {
+ Assert.assertTrue("failure creating file: " + wildcardMatch,
+ wildcardMatch.createNewFile());
+ }
+
+ // create non-jar files, which we expect to not be included in the classpath
+ Assert.assertTrue(new File(tmp, "text.txt").createNewFile());
+ Assert.assertTrue(new File(tmp, "executable.exe").createNewFile());
+ Assert.assertTrue(new File(tmp, "README").createNewFile());
+
+ // create classpath jar
+ String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
+ List classPaths = Arrays.asList("cp1.jar", "cp2.jar", wildcardPath,
+ "cp3.jar");
+ String inputClassPath = StringUtils.join(File.pathSeparator, classPaths);
+ String classPathJar = FileUtil.createJarWithClassPath(inputClassPath,
+ new Path(tmp.getCanonicalPath()));
+
+ // verify classpath by reading manifest from jar file
+ JarFile jarFile = null;
+ try {
+ jarFile = new JarFile(classPathJar);
+ Manifest jarManifest = jarFile.getManifest();
+ Assert.assertNotNull(jarManifest);
+ Attributes mainAttributes = jarManifest.getMainAttributes();
+ Assert.assertNotNull(mainAttributes);
+ Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
+ String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
+ Assert.assertNotNull(classPathAttr);
+ List expectedClassPaths = new ArrayList();
+ for (String classPath: classPaths) {
+ if (!wildcardPath.equals(classPath)) {
+ expectedClassPaths.add(new File(classPath).toURI().toURL()
+ .toExternalForm());
+ } else {
+ // add wildcard matches
+ for (File wildcardMatch: wildcardMatches) {
+ expectedClassPaths.add(wildcardMatch.toURI().toURL()
+ .toExternalForm());
+ }
+ }
+ }
+ List actualClassPaths = Arrays.asList(classPathAttr.split(" "));
+ Assert.assertEquals(expectedClassPaths, actualClassPaths);
+ } finally {
+ if (jarFile != null) {
+ try {
+ jarFile.close();
+ } catch (IOException e) {
+ LOG.warn("exception closing jarFile: " + classPathJar, e);
+ }
+ }
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
index 9298077663..d64292b39d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java
@@ -121,20 +121,22 @@ private void change(int exit, String owner, String group, String...files)
*
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testChmod() throws Exception {
+ Path p1 = new Path(TEST_ROOT_DIR, "testChmod/fileExists");
- final String f1 = TEST_ROOT_DIR + "/" + "testChmod/fileExists";
- final String f2 = TEST_ROOT_DIR + "/" + "testChmod/fileDoesNotExist";
- final String f3 = TEST_ROOT_DIR + "/" + "testChmod/nonExistingfiles*";
+ final String f1 = p1.toUri().getPath();
+ final String f2 = new Path(TEST_ROOT_DIR, "testChmod/fileDoesNotExist")
+ .toUri().getPath();
+ final String f3 = new Path(TEST_ROOT_DIR, "testChmod/nonExistingfiles*")
+ .toUri().getPath();
- Path p1 = new Path(f1);
+ final Path p4 = new Path(TEST_ROOT_DIR, "testChmod/file1");
+ final Path p5 = new Path(TEST_ROOT_DIR, "testChmod/file2");
+ final Path p6 = new Path(TEST_ROOT_DIR, "testChmod/file3");
- final Path p4 = new Path(TEST_ROOT_DIR + "/" + "testChmod/file1");
- final Path p5 = new Path(TEST_ROOT_DIR + "/" + "testChmod/file2");
- final Path p6 = new Path(TEST_ROOT_DIR + "/" + "testChmod/file3");
-
- final String f7 = TEST_ROOT_DIR + "/" + "testChmod/file*";
+ final String f7 = new Path(TEST_ROOT_DIR, "testChmod/file*").toUri()
+ .getPath();
// create and write test file
writeFile(fileSys, p1);
@@ -175,20 +177,23 @@ public void testChmod() throws Exception {
*
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testChown() throws Exception {
+ Path p1 = new Path(TEST_ROOT_DIR, "testChown/fileExists");
- final String f1 = TEST_ROOT_DIR + "/" + "testChown/fileExists";
- final String f2 = TEST_ROOT_DIR + "/" + "testChown/fileDoesNotExist";
- final String f3 = TEST_ROOT_DIR + "/" + "testChown/nonExistingfiles*";
+ final String f1 = p1.toUri().getPath();
+ final String f2 = new Path(TEST_ROOT_DIR, "testChown/fileDoesNotExist")
+ .toUri().getPath();
+ final String f3 = new Path(TEST_ROOT_DIR, "testChown/nonExistingfiles*")
+ .toUri().getPath();
- Path p1 = new Path(f1);
- final Path p4 = new Path(TEST_ROOT_DIR + "/" + "testChown/file1");
- final Path p5 = new Path(TEST_ROOT_DIR + "/" + "testChown/file2");
- final Path p6 = new Path(TEST_ROOT_DIR + "/" + "testChown/file3");
+ final Path p4 = new Path(TEST_ROOT_DIR, "testChown/file1");
+ final Path p5 = new Path(TEST_ROOT_DIR, "testChown/file2");
+ final Path p6 = new Path(TEST_ROOT_DIR, "testChown/file3");
- final String f7 = TEST_ROOT_DIR + "/" + "testChown/file*";
+ final String f7 = new Path(TEST_ROOT_DIR, "testChown/file*").toUri()
+ .getPath();
// create and write test file
writeFile(fileSys, p1);
@@ -228,20 +233,22 @@ public void testChown() throws Exception {
*
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testChgrp() throws Exception {
+ Path p1 = new Path(TEST_ROOT_DIR, "testChgrp/fileExists");
- final String f1 = TEST_ROOT_DIR + "/" + "testChgrp/fileExists";
- final String f2 = TEST_ROOT_DIR + "/" + "testChgrp/fileDoesNotExist";
- final String f3 = TEST_ROOT_DIR + "/" + "testChgrp/nonExistingfiles*";
+ final String f1 = p1.toUri().getPath();
+ final String f2 = new Path(TEST_ROOT_DIR, "testChgrp/fileDoesNotExist")
+ .toUri().getPath();
+ final String f3 = new Path(TEST_ROOT_DIR, "testChgrp/nonExistingfiles*")
+ .toUri().getPath();
- Path p1 = new Path(f1);
+ final Path p4 = new Path(TEST_ROOT_DIR, "testChgrp/file1");
+ final Path p5 = new Path(TEST_ROOT_DIR, "testChgrp/file2");
+ final Path p6 = new Path(TEST_ROOT_DIR, "testChgrp/file3");
- final Path p4 = new Path(TEST_ROOT_DIR + "/" + "testChgrp/file1");
- final Path p5 = new Path(TEST_ROOT_DIR + "/" + "testChgrp/file2");
- final Path p6 = new Path(TEST_ROOT_DIR + "/" + "testChgrp/file3");
-
- final String f7 = TEST_ROOT_DIR + "/" + "testChgrp/file*";
+ final String f7 = new Path(TEST_ROOT_DIR, "testChgrp/file*").toUri()
+ .getPath();
// create and write test file
writeFile(fileSys, p1);
@@ -271,7 +278,7 @@ public void testChgrp() throws Exception {
change(1, null, "admin", f2, f7);
}
- @Test
+ @Test (timeout = 30000)
public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole()
throws Exception {
Configuration conf = new Configuration();
@@ -288,8 +295,8 @@ public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole()
fileSys.mkdirs(tdir);
String[] args = new String[3];
args[0] = "-get";
- args[1] = tdir+"/invalidSrc";
- args[2] = tdir+"/invalidDst";
+ args[1] = new Path(tdir.toUri().getPath(), "/invalidSrc").toString();
+ args[2] = new Path(tdir.toUri().getPath(), "/invalidDst").toString();
assertTrue("file exists", !fileSys.exists(new Path(args[1])));
assertTrue("file exists", !fileSys.exists(new Path(args[2])));
int run = shell.run(args);
@@ -303,7 +310,7 @@ public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole()
}
}
- @Test
+ @Test (timeout = 30000)
public void testRmWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
@@ -324,7 +331,7 @@ public void testRmWithNonexistentGlob() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
@@ -343,7 +350,7 @@ public void testRmForceWithNonexistentGlob() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testInvalidDefaultFS() throws Exception {
// if default fs doesn't exist or is invalid, but the path provided in
// arguments is valid - fsshell should work
@@ -374,7 +381,7 @@ public void testInvalidDefaultFS() throws Exception {
}
- @Test
+ @Test (timeout = 30000)
public void testInterrupt() throws Exception {
MyFsShell shell = new MyFsShell();
shell.setConf(new Configuration());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
index 3b76947246..1e06864c3b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHardLink.java
@@ -54,17 +54,6 @@
* NOTICE: This test class only tests the functionality of the OS
* upon which the test is run! (although you're pretty safe with the
* unix-like OS's, unless a typo sneaks in.)
- *
- * Notes about Windows testing:
- * (a) In order to create hardlinks, the process must be run with
- * administrative privs, in both the account AND the invocation.
- * For instance, to run within Eclipse, the Eclipse application must be
- * launched by right-clicking on it, and selecting "Run as Administrator"
- * (and that option will only be available if the current user id does
- * in fact have admin privs).
- * (b) The getLinkCount() test case will fail for Windows, unless Cygwin
- * is set up properly. In particular, ${cygwin}/bin must be in
- * the PATH environment variable, so the cygwin utilities can be found.
*/
public class TestHardLink {
@@ -221,9 +210,6 @@ private String fetchFileContents(File file)
* Sanity check the simplest case of HardLink.getLinkCount()
* to make sure we get back "1" for ordinary single-linked files.
* Tests with multiply-linked files are in later test cases.
- *
- * If this fails on Windows but passes on Unix, the most likely cause is
- * incorrect configuration of the Cygwin installation; see above.
*/
@Test
public void testGetLinkCount() throws IOException {
@@ -412,7 +398,7 @@ class win extends HardLinkCGWin {};
assertEquals(5, win.hardLinkCommand.length);
assertEquals(7, win.hardLinkMultPrefix.length);
assertEquals(8, win.hardLinkMultSuffix.length);
- assertEquals(3, win.getLinkCountCommand.length);
+ assertEquals(4, win.getLinkCountCommand.length);
assertTrue(win.hardLinkMultPrefix[4].equals("%f"));
//make sure "%f" was not munged
@@ -423,7 +409,7 @@ class win extends HardLinkCGWin {};
assertTrue(win.hardLinkMultSuffix[7].equals("1>NUL"));
//make sure "1>NUL" was not munged
assertEquals(5, ("1>NUL").length());
- assertTrue(win.getLinkCountCommand[1].equals("-c%h"));
+ assertTrue(win.getLinkCountCommand[1].equals("hardlink"));
//make sure "-c%h" was not munged
assertEquals(4, ("-c%h").length());
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
index 719480844a..ab887b901d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
@@ -129,7 +129,7 @@ private String buildBufferDir(String dir, int i) {
* The second dir exists & is RW
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void test0() throws Exception {
if (isWindows) return;
String dir0 = buildBufferDir(ROOT, 0);
@@ -141,7 +141,8 @@ public void test0() throws Exception {
validateTempDirCreation(dir1);
validateTempDirCreation(dir1);
} finally {
- Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+ Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
+ BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
@@ -150,7 +151,7 @@ public void test0() throws Exception {
* The second dir exists & is RW
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testROBufferDirAndRWBufferDir() throws Exception {
if (isWindows) return;
String dir1 = buildBufferDir(ROOT, 1);
@@ -162,14 +163,15 @@ public void testROBufferDirAndRWBufferDir() throws Exception {
validateTempDirCreation(dir2);
validateTempDirCreation(dir2);
} finally {
- Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+ Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
+ BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
/** Two buffer dirs. Both do not exist but on a RW disk.
* Check if tmp dirs are allocated in a round-robin
*/
- @Test
+ @Test (timeout = 30000)
public void testDirsNotExist() throws Exception {
if (isWindows) return;
String dir2 = buildBufferDir(ROOT, 2);
@@ -195,7 +197,7 @@ public void testDirsNotExist() throws Exception {
* Later disk1 becomes read-only.
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testRWBufferDirBecomesRO() throws Exception {
if (isWindows) return;
String dir3 = buildBufferDir(ROOT, 3);
@@ -233,7 +235,7 @@ public void testRWBufferDirBecomesRO() throws Exception {
* @throws Exception
*/
static final int TRIALS = 100;
- @Test
+ @Test (timeout = 30000)
public void testCreateManyFiles() throws Exception {
if (isWindows) return;
String dir5 = buildBufferDir(ROOT, 5);
@@ -270,7 +272,7 @@ public void testCreateManyFiles() throws Exception {
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testLocalPathForWriteDirCreation() throws IOException {
String dir0 = buildBufferDir(ROOT, 0);
String dir1 = buildBufferDir(ROOT, 1);
@@ -291,7 +293,8 @@ public void testLocalPathForWriteDirCreation() throws IOException {
assertEquals(e.getClass(), FileNotFoundException.class);
}
} finally {
- Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
+ Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
+ BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
@@ -300,7 +303,7 @@ public void testLocalPathForWriteDirCreation() throws IOException {
* Test when mapred.local.dir not configured and called
* getLocalPathForWrite
*/
- @Test
+ @Test (timeout = 30000)
public void testShouldNotthrowNPE() throws Exception {
Configuration conf1 = new Configuration();
try {
@@ -319,7 +322,7 @@ public void testShouldNotthrowNPE() throws Exception {
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir = buildBufferDir(ROOT, 0);
@@ -330,7 +333,8 @@ public void testNoSideEffects() throws IOException {
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
} finally {
- Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+ Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
+ BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
@@ -340,7 +344,7 @@ public void testNoSideEffects() throws IOException {
*
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testGetLocalPathToRead() throws IOException {
assumeTrue(!isWindows);
String dir = buildBufferDir(ROOT, 0);
@@ -353,7 +357,8 @@ public void testGetLocalPathToRead() throws IOException {
assertEquals(f1.getName(), p1.getName());
assertEquals("file", p1.getFileSystem(conf).getUri().getScheme());
} finally {
- Shell.execCommand(new String[] { "chmod", "u+w", BUFFER_DIR_ROOT });
+ Shell.execCommand(Shell.getSetPermissionCommand("u+w", false,
+ BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
@@ -364,7 +369,7 @@ public void testGetLocalPathToRead() throws IOException {
*
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
@@ -412,7 +417,7 @@ public void testGetAllLocalPathsToRead() throws IOException {
}
}
- @Test
+ @Test (timeout = 30000)
public void testRemoveContext() throws IOException {
String dir = buildBufferDir(ROOT, 0);
try {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
index 5032caab91..7a5843a8a7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.fs;
+import org.junit.Test;
import java.io.IOException;
import java.net.URI;
@@ -25,10 +26,14 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
+import org.apache.hadoop.util.Shell;
import junit.framework.TestCase;
+import static org.junit.Assert.fail;
+
public class TestPath extends TestCase {
+ @Test (timeout = 30000)
public void testToString() {
toStringTest("/");
toStringTest("/foo");
@@ -61,6 +66,7 @@ private void toStringTest(String pathString) {
assertEquals(pathString, new Path(pathString).toString());
}
+ @Test (timeout = 30000)
public void testNormalize() throws URISyntaxException {
assertEquals("", new Path(".").toString());
assertEquals("..", new Path("..").toString());
@@ -82,6 +88,7 @@ public void testNormalize() throws URISyntaxException {
}
}
+ @Test (timeout = 30000)
public void testIsAbsolute() {
assertTrue(new Path("/").isAbsolute());
assertTrue(new Path("/foo").isAbsolute());
@@ -94,6 +101,7 @@ public void testIsAbsolute() {
}
}
+ @Test (timeout = 30000)
public void testParent() {
assertEquals(new Path("/foo"), new Path("/foo/bar").getParent());
assertEquals(new Path("foo"), new Path("foo/bar").getParent());
@@ -104,6 +112,7 @@ public void testParent() {
}
}
+ @Test (timeout = 30000)
public void testChild() {
assertEquals(new Path("."), new Path(".", "."));
assertEquals(new Path("/"), new Path("/", "."));
@@ -123,10 +132,12 @@ public void testChild() {
}
}
+ @Test (timeout = 30000)
public void testEquals() {
assertFalse(new Path("/").equals(new Path("/foo")));
}
+ @Test (timeout = 30000)
public void testDots() {
// Test Path(String)
assertEquals(new Path("/foo/bar/baz").toString(), "/foo/bar/baz");
@@ -164,18 +175,54 @@ public void testDots() {
assertEquals(new Path("foo/bar/baz","../../../../..").toString(), "../..");
}
+ /** Test that Windows paths are correctly handled */
+ @Test (timeout = 5000)
+ public void testWindowsPaths() throws URISyntaxException, IOException {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ assertEquals(new Path("c:\\foo\\bar").toString(), "c:/foo/bar");
+ assertEquals(new Path("c:/foo/bar").toString(), "c:/foo/bar");
+ assertEquals(new Path("/c:/foo/bar").toString(), "c:/foo/bar");
+ assertEquals(new Path("file://c:/foo/bar").toString(), "file://c:/foo/bar");
+ }
+
+ /** Test invalid paths on Windows are correctly rejected */
+ @Test (timeout = 5000)
+ public void testInvalidWindowsPaths() throws URISyntaxException, IOException {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ String [] invalidPaths = {
+ "hdfs:\\\\\\tmp"
+ };
+
+ for (String path : invalidPaths) {
+ try {
+ Path item = new Path(path);
+ fail("Did not throw for invalid path " + path);
+ } catch (IllegalArgumentException iae) {
+ }
+ }
+ }
+
/** Test Path objects created from other Path objects */
+ @Test (timeout = 30000)
public void testChildParentResolution() throws URISyntaxException, IOException {
Path parent = new Path("foo1://bar1/baz1");
Path child = new Path("foo2://bar2/baz2");
assertEquals(child, new Path(parent, child));
}
+ @Test (timeout = 30000)
public void testScheme() throws java.io.IOException {
assertEquals("foo:/bar", new Path("foo:/","/bar").toString());
assertEquals("foo://bar/baz", new Path("foo://bar/","/baz").toString());
}
+ @Test (timeout = 30000)
public void testURI() throws URISyntaxException, IOException {
URI uri = new URI("file:///bar#baz");
Path path = new Path(uri);
@@ -198,6 +245,7 @@ public void testURI() throws URISyntaxException, IOException {
}
/** Test URIs created from Path objects */
+ @Test (timeout = 30000)
public void testPathToUriConversion() throws URISyntaxException, IOException {
// Path differs from URI in that it ignores the query part..
assertEquals(new URI(null, null, "/foo?bar", null, null), new Path("/foo?bar").toUri());
@@ -218,6 +266,7 @@ public void testPathToUriConversion() throws URISyntaxException, IOException {
}
/** Test reserved characters in URIs (and therefore Paths) */
+ @Test (timeout = 30000)
public void testReservedCharacters() throws URISyntaxException, IOException {
// URI encodes the path
assertEquals("/foo%20bar", new URI(null, null, "/foo bar", null, null).getRawPath());
@@ -239,6 +288,7 @@ public void testReservedCharacters() throws URISyntaxException, IOException {
assertEquals("/foo%3Fbar", new URI("http", "localhost", "/foo?bar", null, null).toURL().getPath());
}
+ @Test (timeout = 30000)
public void testMakeQualified() throws URISyntaxException {
URI defaultUri = new URI("hdfs://host1/dir1");
URI wd = new URI("hdfs://host2/dir2");
@@ -252,6 +302,7 @@ public void testMakeQualified() throws URISyntaxException {
new Path("file").makeQualified(defaultUri, new Path(wd)));
}
+ @Test (timeout = 30000)
public void testGetName() {
assertEquals("", new Path("/").getName());
assertEquals("foo", new Path("foo").getName());
@@ -261,13 +312,17 @@ public void testGetName() {
assertEquals("bar", new Path("hdfs://host/foo/bar").getName());
}
+ @Test (timeout = 30000)
public void testAvroReflect() throws Exception {
AvroTestUtil.testReflect
(new Path("foo"),
"{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.fs.Path\"}");
}
+ @Test (timeout = 30000)
public void testGlobEscapeStatus() throws Exception {
+ // This test is not meaningful on Windows where * is disallowed in file name.
+ if (Shell.WINDOWS) return;
FileSystem lfs = FileSystem.getLocal(new Configuration());
Path testRoot = lfs.makeQualified(new Path(
System.getProperty("test.build.data","test/build/data"),
@@ -324,4 +379,31 @@ public void testGlobEscapeStatus() throws Exception {
assertEquals(1, stats.length);
assertEquals(new Path(testRoot, "*/f"), stats[0].getPath());
}
+
+ @Test (timeout = 30000)
+ public void testMergePaths() {
+ assertEquals(new Path("/foo/bar"),
+ Path.mergePaths(new Path("/foo"),
+ new Path("/bar")));
+
+ assertEquals(new Path("/foo/bar/baz"),
+ Path.mergePaths(new Path("/foo/bar"),
+ new Path("/baz")));
+
+ assertEquals(new Path("/foo/bar/baz"),
+ Path.mergePaths(new Path("/foo"),
+ new Path("/bar/baz")));
+
+ assertEquals(new Path(Shell.WINDOWS ? "/C:/foo/bar" : "/C:/foo/C:/bar"),
+ Path.mergePaths(new Path("/C:/foo"),
+ new Path("/C:/bar")));
+
+ assertEquals(new Path("viewfs:///foo/bar"),
+ Path.mergePaths(new Path("viewfs:///foo"),
+ new Path("file:///bar")));
+
+ assertEquals(new Path("viewfs://vfsauthority/foo/bar"),
+ Path.mergePaths(new Path("viewfs://vfsauthority/foo"),
+ new Path("file://fileauthority/bar")));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index bc5e4bd170..a675e30a0a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -55,7 +55,7 @@ protected static Path mkdir(FileSystem fs, Path p) throws IOException {
// check that the specified file is in Trash
protected static void checkTrash(FileSystem trashFs, Path trashRoot,
Path path) throws IOException {
- Path p = new Path(trashRoot+"/"+ path.toUri().getPath());
+ Path p = Path.mergePaths(trashRoot, path);
assertTrue("Could not find file in trash: "+ p , trashFs.exists(p));
}
@@ -399,7 +399,8 @@ public static void trashShell(final Configuration conf, final Path base,
assertTrue(val==0);
}
// current trash directory
- Path trashDir = new Path(trashRoot.toUri().getPath() + myFile.getParent().toUri().getPath());
+ Path trashDir = Path.mergePaths(new Path(trashRoot.toUri().getPath()),
+ new Path(myFile.getParent().toUri().getPath()));
System.out.println("Deleting same myFile: myFile.parent=" + myFile.getParent().toUri().getPath() +
"; trashroot="+trashRoot.toUri().getPath() +
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
index 4f3ae6f04e..320a79ecca 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
@@ -19,8 +19,10 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.File;
+import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
@@ -59,7 +61,7 @@ public void cleanup() throws Exception {
fs.close();
}
- @Test
+ @Test (timeout = 30000)
public void testWithDirStringAndConf() throws Exception {
String dirString = "d1";
PathData item = new PathData(dirString, conf);
@@ -72,7 +74,7 @@ public void testWithDirStringAndConf() throws Exception {
checkPathData(dirString, item);
}
- @Test
+ @Test (timeout = 30000)
public void testUnqualifiedUriContents() throws Exception {
String dirString = "d1";
PathData item = new PathData(dirString, conf);
@@ -83,7 +85,7 @@ public void testUnqualifiedUriContents() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testQualifiedUriContents() throws Exception {
String dirString = fs.makeQualified(new Path("d1")).toString();
PathData item = new PathData(dirString, conf);
@@ -94,7 +96,7 @@ public void testQualifiedUriContents() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testCwdContents() throws Exception {
String dirString = Path.CUR_DIR;
PathData item = new PathData(dirString, conf);
@@ -105,7 +107,7 @@ public void testCwdContents() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testToFile() throws Exception {
PathData item = new PathData(".", conf);
assertEquals(new File(testDir.toString()), item.toFile());
@@ -115,7 +117,56 @@ public void testToFile() throws Exception {
assertEquals(new File(testDir + "/d1/f1"), item.toFile());
}
- @Test
+ @Test (timeout = 5000)
+ public void testToFileRawWindowsPaths() throws Exception {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ // Can we handle raw Windows paths? The files need not exist for
+ // these tests to succeed.
+ String[] winPaths = {
+ "n:\\",
+ "N:\\",
+ "N:\\foo",
+ "N:\\foo\\bar",
+ "N:/",
+ "N:/foo",
+ "N:/foo/bar"
+ };
+
+ PathData item;
+
+ for (String path : winPaths) {
+ item = new PathData(path, conf);
+ assertEquals(new File(path), item.toFile());
+ }
+
+ item = new PathData("foo\\bar", conf);
+ assertEquals(new File(testDir + "\\foo\\bar"), item.toFile());
+ }
+
+ @Test (timeout = 5000)
+ public void testInvalidWindowsPath() throws Exception {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ // Verify that the following invalid paths are rejected.
+ String [] winPaths = {
+ "N:\\foo/bar"
+ };
+
+ for (String path : winPaths) {
+ try {
+ PathData item = new PathData(path, conf);
+ fail("Did not throw for invalid path " + path);
+ } catch (IOException ioe) {
+ }
+ }
+ }
+
+ @Test (timeout = 30000)
public void testAbsoluteGlob() throws Exception {
PathData[] items = PathData.expandAsGlob(testDir+"/d1/f1*", conf);
assertEquals(
@@ -124,7 +175,7 @@ public void testAbsoluteGlob() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testRelativeGlob() throws Exception {
PathData[] items = PathData.expandAsGlob("d1/f1*", conf);
assertEquals(
@@ -133,7 +184,7 @@ public void testRelativeGlob() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testRelativeGlobBack() throws Exception {
fs.setWorkingDirectory(new Path("d1"));
PathData[] items = PathData.expandAsGlob("../d2/*", conf);
@@ -143,7 +194,7 @@ public void testRelativeGlobBack() throws Exception {
);
}
- @Test
+ @Test (timeout = 30000)
public void testWithStringAndConfForBuggyPath() throws Exception {
String dirString = "file:///tmp";
Path tmpDir = new Path(dirString);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
index 99c1ae7b04..0c8a6acf4a 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
@@ -26,9 +26,11 @@
import java.io.IOException;
import java.io.StringWriter;
import java.lang.reflect.Method;
+import java.net.URI;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
@@ -38,12 +40,13 @@
public class TestTextCommand {
private static final String TEST_ROOT_DIR =
System.getProperty("test.build.data", "build/test/data/") + "/testText";
- private static final String AVRO_FILENAME = TEST_ROOT_DIR + "/weather.avro";
+ private static final String AVRO_FILENAME =
+ new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
/**
* Tests whether binary Avro data files are displayed correctly.
*/
- @Test
+ @Test (timeout = 30000)
public void testDisplayForAvroFiles() throws Exception {
// Create a small Avro data file on the local file system.
createAvroFile(generateWeatherAvroBinaryData());
@@ -51,7 +54,7 @@ public void testDisplayForAvroFiles() throws Exception {
// Prepare and call the Text command's protected getInputStream method
// using reflection.
Configuration conf = new Configuration();
- File localPath = new File(AVRO_FILENAME);
+ URI localPath = new URI(AVRO_FILENAME);
PathData pathData = new PathData(localPath, conf);
Display.Text text = new Display.Text();
text.setConf(conf);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
index f77e7288d6..0602d30272 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
@@ -21,6 +21,8 @@
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.FileWriter;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicReference;
import java.util.ArrayList;
@@ -60,11 +62,15 @@ public void setupTestDir() {
TEST_DIR.mkdirs();
}
- @Test
+ @Test (timeout = 30000)
public void testFstat() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat"));
- NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
+ NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
@@ -72,7 +78,8 @@ public void testFstat() throws Exception {
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",
- NativeIO.Stat.S_IFREG, stat.getMode() & NativeIO.Stat.S_IFMT);
+ NativeIO.POSIX.Stat.S_IFREG,
+ stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
/**
@@ -81,8 +88,12 @@ public void testFstat() throws Exception {
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
- @Test
+ @Test (timeout = 30000)
public void testMultiThreadedFstat() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
final FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat"));
@@ -96,12 +107,13 @@ public void run() {
long et = Time.now() + 5000;
while (Time.now() < et) {
try {
- NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
+ NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"), stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",
- NativeIO.Stat.S_IFREG, stat.getMode() & NativeIO.Stat.S_IFMT);
+ NativeIO.POSIX.Stat.S_IFREG,
+ stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
} catch (Throwable t) {
thrown.set(t);
}
@@ -122,26 +134,123 @@ public void run() {
}
}
- @Test
+ @Test (timeout = 30000)
public void testFstatClosedFd() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testfstat2"));
fos.close();
try {
- NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
+ NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
} catch (NativeIOException nioe) {
LOG.info("Got expected exception", nioe);
assertEquals(Errno.EBADF, nioe.getErrno());
}
}
- @Test
+ @Test (timeout = 30000)
+ public void testSetFilePointer() throws Exception {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ LOG.info("Set a file pointer on Windows");
+ try {
+ File testfile = new File(TEST_DIR, "testSetFilePointer");
+ assertTrue("Create test subject",
+ testfile.exists() || testfile.createNewFile());
+ FileWriter writer = new FileWriter(testfile);
+ try {
+ for (int i = 0; i < 200; i++)
+ if (i < 100)
+ writer.write('a');
+ else
+ writer.write('b');
+ writer.flush();
+ } catch (Exception writerException) {
+ fail("Got unexpected exception: " + writerException.getMessage());
+ } finally {
+ writer.close();
+ }
+
+ FileDescriptor fd = NativeIO.Windows.createFile(
+ testfile.getCanonicalPath(),
+ NativeIO.Windows.GENERIC_READ,
+ NativeIO.Windows.FILE_SHARE_READ |
+ NativeIO.Windows.FILE_SHARE_WRITE |
+ NativeIO.Windows.FILE_SHARE_DELETE,
+ NativeIO.Windows.OPEN_EXISTING);
+ NativeIO.Windows.setFilePointer(fd, 120, NativeIO.Windows.FILE_BEGIN);
+ FileReader reader = new FileReader(fd);
+ try {
+ int c = reader.read();
+ assertTrue("Unexpected character: " + c, c == 'b');
+ } catch (Exception readerException) {
+ fail("Got unexpected exception: " + readerException.getMessage());
+ } finally {
+ reader.close();
+ }
+ } catch (Exception e) {
+ fail("Got unexpected exception: " + e.getMessage());
+ }
+ }
+
+ @Test (timeout = 30000)
+ public void testCreateFile() throws Exception {
+ if (!Path.WINDOWS) {
+ return;
+ }
+
+ LOG.info("Open a file on Windows with SHARE_DELETE shared mode");
+ try {
+ File testfile = new File(TEST_DIR, "testCreateFile");
+ assertTrue("Create test subject",
+ testfile.exists() || testfile.createNewFile());
+
+ FileDescriptor fd = NativeIO.Windows.createFile(
+ testfile.getCanonicalPath(),
+ NativeIO.Windows.GENERIC_READ,
+ NativeIO.Windows.FILE_SHARE_READ |
+ NativeIO.Windows.FILE_SHARE_WRITE |
+ NativeIO.Windows.FILE_SHARE_DELETE,
+ NativeIO.Windows.OPEN_EXISTING);
+
+ FileInputStream fin = new FileInputStream(fd);
+ try {
+ fin.read();
+
+ File newfile = new File(TEST_DIR, "testRenamedFile");
+
+ boolean renamed = testfile.renameTo(newfile);
+ assertTrue("Rename failed.", renamed);
+
+ fin.read();
+ } catch (Exception e) {
+ fail("Got unexpected exception: " + e.getMessage());
+ }
+ finally {
+ fin.close();
+ }
+ } catch (Exception e) {
+ fail("Got unexpected exception: " + e.getMessage());
+ }
+
+ }
+
+ @Test (timeout = 30000)
public void testOpenMissingWithoutCreate() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
LOG.info("Open a missing file without O_CREAT and it should fail");
try {
- FileDescriptor fd = NativeIO.open(
+ FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "doesntexist").getAbsolutePath(),
- NativeIO.O_WRONLY, 0700);
+ NativeIO.POSIX.O_WRONLY, 0700);
fail("Able to open a new file without O_CREAT");
} catch (NativeIOException nioe) {
LOG.info("Got expected exception", nioe);
@@ -149,12 +258,16 @@ public void testOpenMissingWithoutCreate() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testOpenWithCreate() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
LOG.info("Test creating a file with O_CREAT");
- FileDescriptor fd = NativeIO.open(
+ FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testWorkingOpen").getAbsolutePath(),
- NativeIO.O_WRONLY | NativeIO.O_CREAT, 0700);
+ NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT, 0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos = new FileOutputStream(fd);
@@ -165,9 +278,9 @@ public void testOpenWithCreate() throws Exception {
LOG.info("Test exclusive create");
try {
- fd = NativeIO.open(
+ fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testWorkingOpen").getAbsolutePath(),
- NativeIO.O_WRONLY | NativeIO.O_CREAT | NativeIO.O_EXCL, 0700);
+ NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL, 0700);
fail("Was able to create existing file with O_EXCL");
} catch (NativeIOException nioe) {
LOG.info("Got expected exception for failed exclusive create", nioe);
@@ -179,12 +292,16 @@ public void testOpenWithCreate() throws Exception {
* Test that opens and closes a file 10000 times - this would crash with
* "Too many open files" if we leaked fds using this access pattern.
*/
- @Test
+ @Test (timeout = 30000)
public void testFDDoesntLeak() throws IOException {
+ if (Path.WINDOWS) {
+ return;
+ }
+
for (int i = 0; i < 10000; i++) {
- FileDescriptor fd = NativeIO.open(
+ FileDescriptor fd = NativeIO.POSIX.open(
new File(TEST_DIR, "testNoFdLeak").getAbsolutePath(),
- NativeIO.O_WRONLY | NativeIO.O_CREAT, 0700);
+ NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT, 0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos = new FileOutputStream(fd);
@@ -196,10 +313,14 @@ public void testFDDoesntLeak() throws IOException {
/**
* Test basic chmod operation
*/
- @Test
+ @Test (timeout = 30000)
public void testChmod() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
try {
- NativeIO.chmod("/this/file/doesnt/exist", 777);
+ NativeIO.POSIX.chmod("/this/file/doesnt/exist", 777);
fail("Chmod of non-existent file didn't fail");
} catch (NativeIOException nioe) {
assertEquals(Errno.ENOENT, nioe.getErrno());
@@ -208,21 +329,26 @@ public void testChmod() throws Exception {
File toChmod = new File(TEST_DIR, "testChmod");
assertTrue("Create test subject",
toChmod.exists() || toChmod.mkdir());
- NativeIO.chmod(toChmod.getAbsolutePath(), 0777);
+ NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0777);
assertPermissions(toChmod, 0777);
- NativeIO.chmod(toChmod.getAbsolutePath(), 0000);
+ NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0000);
assertPermissions(toChmod, 0000);
- NativeIO.chmod(toChmod.getAbsolutePath(), 0644);
+ NativeIO.POSIX.chmod(toChmod.getAbsolutePath(), 0644);
assertPermissions(toChmod, 0644);
}
- @Test
+ @Test (timeout = 30000)
public void testPosixFadvise() throws Exception {
+ if (Path.WINDOWS) {
+ return;
+ }
+
FileInputStream fis = new FileInputStream("/dev/zero");
try {
- NativeIO.posix_fadvise(fis.getFD(), 0, 0,
- NativeIO.POSIX_FADV_SEQUENTIAL);
+ NativeIO.POSIX.posix_fadvise(
+ fis.getFD(), 0, 0,
+ NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
} catch (UnsupportedOperationException uoe) {
// we should just skip the unit test on machines where we don't
// have fadvise support
@@ -235,8 +361,9 @@ public void testPosixFadvise() throws Exception {
}
try {
- NativeIO.posix_fadvise(fis.getFD(), 0, 1024,
- NativeIO.POSIX_FADV_SEQUENTIAL);
+ NativeIO.POSIX.posix_fadvise(
+ fis.getFD(), 0, 1024,
+ NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
@@ -244,8 +371,9 @@ public void testPosixFadvise() throws Exception {
}
try {
- NativeIO.posix_fadvise(null, 0, 1024,
- NativeIO.POSIX_FADV_SEQUENTIAL);
+ NativeIO.POSIX.posix_fadvise(
+ null, 0, 1024,
+ NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
fail("Did not throw on null file");
} catch (NullPointerException npe) {
@@ -253,14 +381,15 @@ public void testPosixFadvise() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testSyncFileRange() throws Exception {
FileOutputStream fos = new FileOutputStream(
new File(TEST_DIR, "testSyncFileRange"));
try {
fos.write("foo".getBytes());
- NativeIO.sync_file_range(fos.getFD(), 0, 1024,
- NativeIO.SYNC_FILE_RANGE_WRITE);
+ NativeIO.POSIX.sync_file_range(
+ fos.getFD(), 0, 1024,
+ NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
// no way to verify that this actually has synced,
// but if it doesn't throw, we can assume it worked
} catch (UnsupportedOperationException uoe) {
@@ -271,8 +400,9 @@ public void testSyncFileRange() throws Exception {
fos.close();
}
try {
- NativeIO.sync_file_range(fos.getFD(), 0, 1024,
- NativeIO.SYNC_FILE_RANGE_WRITE);
+ NativeIO.POSIX.sync_file_range(
+ fos.getFD(), 0, 1024,
+ NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
fail("Did not throw on bad file");
} catch (NativeIOException nioe) {
assertEquals(Errno.EBADF, nioe.getErrno());
@@ -286,17 +416,25 @@ private void assertPermissions(File f, int expected) throws IOException {
assertEquals(expected, perms.toShort());
}
- @Test
+ @Test (timeout = 30000)
public void testGetUserName() throws IOException {
- assertFalse(NativeIO.getUserName(0).isEmpty());
+ if (Path.WINDOWS) {
+ return;
+ }
+
+ assertFalse(NativeIO.POSIX.getUserName(0).isEmpty());
}
- @Test
+ @Test (timeout = 30000)
public void testGetGroupName() throws IOException {
- assertFalse(NativeIO.getGroupName(0).isEmpty());
+ if (Path.WINDOWS) {
+ return;
+ }
+
+ assertFalse(NativeIO.POSIX.getGroupName(0).isEmpty());
}
- @Test
+ @Test (timeout = 30000)
public void testRenameTo() throws Exception {
final File TEST_DIR = new File(new File(
System.getProperty("test.build.data","build/test/data")), "renameTest");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index 858e33c3d1..12f4b313ec 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.util.Shell;
public class TestUserGroupInformation {
final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
@@ -90,17 +91,17 @@ public void resetUgi() {
UserGroupInformation.setLoginUser(null);
}
- @Test
+ @Test (timeout = 30000)
public void testSimpleLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
}
- @Test
+ @Test (timeout = 30000)
public void testTokenLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.TOKEN, false);
}
- @Test
+ @Test (timeout = 30000)
public void testProxyLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.PROXY, false);
}
@@ -129,7 +130,7 @@ private void tryLoginAuthenticationMethod(AuthenticationMethod method,
}
}
- @Test
+ @Test (timeout = 30000)
public void testGetRealAuthenticationMethod() {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user1");
ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
@@ -140,7 +141,7 @@ public void testGetRealAuthenticationMethod() {
assertEquals(AuthenticationMethod.SIMPLE, ugi.getRealAuthenticationMethod());
}
/** Test login method */
- @Test
+ @Test (timeout = 30000)
public void testLogin() throws Exception {
// login from unix
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
@@ -167,7 +168,7 @@ public UserGroupInformation run() throws IOException {
* given user name - get all the groups.
* Needs to happen before creating the test users
*/
- @Test
+ @Test (timeout = 30000)
public void testGetServerSideGroups() throws IOException,
InterruptedException {
// get the user name
@@ -175,19 +176,38 @@ public void testGetServerSideGroups() throws IOException,
BufferedReader br = new BufferedReader
(new InputStreamReader(pp.getInputStream()));
String userName = br.readLine().trim();
+ // If on windows domain, token format is DOMAIN\\user and we want to
+ // extract only the user name
+ if(Shell.WINDOWS) {
+ int sp = userName.lastIndexOf('\\');
+ if (sp != -1) {
+ userName = userName.substring(sp + 1);
+ }
+ // user names are case insensitive on Windows. Make consistent
+ userName = userName.toLowerCase();
+ }
// get the groups
- pp = Runtime.getRuntime().exec("id -Gn " + userName);
+ pp = Runtime.getRuntime().exec(Shell.WINDOWS ?
+ Shell.WINUTILS + " groups -F" : "id -Gn");
br = new BufferedReader(new InputStreamReader(pp.getInputStream()));
String line = br.readLine();
+
System.out.println(userName + ":" + line);
Set groups = new LinkedHashSet ();
- for(String s: line.split("[\\s]")) {
+ String[] tokens = line.split(Shell.TOKEN_SEPARATOR_REGEX);
+ for(String s: tokens) {
groups.add(s);
}
final UserGroupInformation login = UserGroupInformation.getCurrentUser();
- assertEquals(userName, login.getShortUserName());
+ String loginUserName = login.getShortUserName();
+ if(Shell.WINDOWS) {
+ // user names are case insensitive on Windows. Make consistent
+ loginUserName = loginUserName.toLowerCase();
+ }
+ assertEquals(userName, loginUserName);
+
String[] gi = login.getGroupNames();
assertEquals(groups.size(), gi.length);
for(int i=0; i < gi.length; i++) {
@@ -208,7 +228,7 @@ public Object run() throws IOException {
}
/** test constructor */
- @Test
+ @Test (timeout = 30000)
public void testConstructor() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("user2/cron@HADOOP.APACHE.ORG",
@@ -234,7 +254,7 @@ private void testConstructorFailures(String userName) {
assertTrue(gotException);
}
- @Test
+ @Test (timeout = 30000)
public void testEquals() throws Exception {
UserGroupInformation uugi =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
@@ -252,7 +272,7 @@ public void testEquals() throws Exception {
assertEquals(uugi.hashCode(), ugi3.hashCode());
}
- @Test
+ @Test (timeout = 30000)
public void testEqualsWithRealUser() throws Exception {
UserGroupInformation realUgi1 = UserGroupInformation.createUserForTesting(
"RealUser", GROUP_NAMES);
@@ -265,7 +285,7 @@ public void testEqualsWithRealUser() throws Exception {
assertFalse(remoteUgi.equals(proxyUgi1));
}
- @Test
+ @Test (timeout = 30000)
public void testGettingGroups() throws Exception {
UserGroupInformation uugi =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
@@ -275,7 +295,7 @@ public void testGettingGroups() throws Exception {
}
@SuppressWarnings("unchecked") // from Mockito mocks
- @Test
+ @Test (timeout = 30000)
public void testAddToken() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
@@ -313,7 +333,7 @@ public void testAddToken() throws Exception {
}
@SuppressWarnings("unchecked") // from Mockito mocks
- @Test
+ @Test (timeout = 30000)
public void testGetCreds() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
@@ -339,7 +359,7 @@ public void testGetCreds() throws Exception {
}
@SuppressWarnings("unchecked") // from Mockito mocks
- @Test
+ @Test (timeout = 30000)
public void testAddCreds() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
@@ -364,7 +384,7 @@ public void testAddCreds() throws Exception {
assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
}
- @Test
+ @Test (timeout = 30000)
public void testGetCredsNotSame()
throws Exception {
UserGroupInformation ugi =
@@ -392,7 +412,7 @@ private void checkTokens(UserGroupInformation ugi, Token> ... tokens) {
}
@SuppressWarnings("unchecked") // from Mockito mocks
- @Test
+ @Test (timeout = 30000)
public void testAddNamedToken() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
@@ -413,7 +433,7 @@ public void testAddNamedToken() throws Exception {
}
@SuppressWarnings("unchecked") // from Mockito mocks
- @Test
+ @Test (timeout = 30000)
public void testUGITokens() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("TheDoctor",
@@ -459,7 +479,7 @@ public Collection> run() throws IOException {
assertTrue(otherSet.contains(t2));
}
- @Test
+ @Test (timeout = 30000)
public void testTokenIdentifiers() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
"TheDoctor", new String[] { "TheTARDIS" });
@@ -487,7 +507,7 @@ public Collection run() throws IOException {
assertEquals(2, otherSet.size());
}
- @Test
+ @Test (timeout = 30000)
public void testTestAuthMethod() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// verify the reverse mappings works
@@ -499,7 +519,7 @@ public void testTestAuthMethod() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testUGIAuthMethod() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final AuthenticationMethod am = AuthenticationMethod.KERBEROS;
@@ -515,7 +535,7 @@ public Object run() throws IOException {
});
}
- @Test
+ @Test (timeout = 30000)
public void testUGIAuthMethodInRealUser() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi = UserGroupInformation.createProxyUser(
@@ -550,7 +570,7 @@ public Object run() throws IOException {
Assert.assertEquals(proxyUgi3, proxyUgi4);
}
- @Test
+ @Test (timeout = 30000)
public void testLoginObjectInSubject() throws Exception {
UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
UserGroupInformation anotherUgi = new UserGroupInformation(loginUgi
@@ -563,7 +583,7 @@ public void testLoginObjectInSubject() throws Exception {
Assert.assertTrue(login1 == login2);
}
- @Test
+ @Test (timeout = 30000)
public void testLoginModuleCommit() throws Exception {
UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
User user1 = loginUgi.getSubject().getPrincipals(User.class).iterator()
@@ -597,7 +617,7 @@ public static void verifyLoginMetrics(long success, int failure)
* with it, but that Subject was not created by Hadoop (ie it has no
* associated User principal)
*/
- @Test
+ @Test (timeout = 30000)
public void testUGIUnderNonHadoopContext() throws Exception {
Subject nonHadoopSubject = new Subject();
Subject.doAs(nonHadoopSubject, new PrivilegedExceptionAction() {
@@ -611,7 +631,7 @@ public Void run() throws IOException {
}
/** Test hasSufficientTimeElapsed method */
- @Test
+ @Test (timeout = 30000)
public void testHasSufficientTimeElapsed() throws Exception {
// Make hasSufficientTimeElapsed public
Method method = UserGroupInformation.class
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
index e8ff9fffd8..7dcc4aedb6 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskChecker.java
@@ -30,24 +30,29 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.Shell;
public class TestDiskChecker {
final FsPermission defaultPerm = new FsPermission("755");
final FsPermission invalidPerm = new FsPermission("000");
- @Test public void testMkdirs_dirExists() throws Throwable {
+ @Test (timeout = 30000)
+ public void testMkdirs_dirExists() throws Throwable {
_mkdirs(true, defaultPerm, defaultPerm);
}
- @Test public void testMkdirs_noDir() throws Throwable {
+ @Test (timeout = 30000)
+ public void testMkdirs_noDir() throws Throwable {
_mkdirs(false, defaultPerm, defaultPerm);
}
- @Test public void testMkdirs_dirExists_badUmask() throws Throwable {
+ @Test (timeout = 30000)
+ public void testMkdirs_dirExists_badUmask() throws Throwable {
_mkdirs(true, defaultPerm, invalidPerm);
}
- @Test public void testMkdirs_noDir_badUmask() throws Throwable {
+ @Test (timeout = 30000)
+ public void testMkdirs_noDir_badUmask() throws Throwable {
_mkdirs(false, defaultPerm, invalidPerm);
}
@@ -78,23 +83,28 @@ private void _mkdirs(boolean exists, FsPermission before, FsPermission after)
}
}
- @Test public void testCheckDir_normal() throws Throwable {
+ @Test (timeout = 30000)
+ public void testCheckDir_normal() throws Throwable {
_checkDirs(true, new FsPermission("755"), true);
}
- @Test public void testCheckDir_notDir() throws Throwable {
+ @Test (timeout = 30000)
+ public void testCheckDir_notDir() throws Throwable {
_checkDirs(false, new FsPermission("000"), false);
}
- @Test public void testCheckDir_notReadable() throws Throwable {
+ @Test (timeout = 30000)
+ public void testCheckDir_notReadable() throws Throwable {
_checkDirs(true, new FsPermission("000"), false);
}
- @Test public void testCheckDir_notWritable() throws Throwable {
+ @Test (timeout = 30000)
+ public void testCheckDir_notWritable() throws Throwable {
_checkDirs(true, new FsPermission("444"), false);
}
- @Test public void testCheckDir_notListable() throws Throwable {
+ @Test (timeout = 30000)
+ public void testCheckDir_notListable() throws Throwable {
_checkDirs(true, new FsPermission("666"), false); // not listable
}
@@ -130,27 +140,27 @@ private void _checkDirs(boolean isDir, FsPermission perm, boolean success)
* permission for result of mapper.
*/
- @Test
+ @Test (timeout = 30000)
public void testCheckDir_normal_local() throws Throwable {
_checkDirs(true, "755", true);
}
- @Test
+ @Test (timeout = 30000)
public void testCheckDir_notDir_local() throws Throwable {
_checkDirs(false, "000", false);
}
- @Test
+ @Test (timeout = 30000)
public void testCheckDir_notReadable_local() throws Throwable {
_checkDirs(true, "000", false);
}
- @Test
+ @Test (timeout = 30000)
public void testCheckDir_notWritable_local() throws Throwable {
_checkDirs(true, "444", false);
}
- @Test
+ @Test (timeout = 30000)
public void testCheckDir_notListable_local() throws Throwable {
_checkDirs(true, "666", false);
}
@@ -160,8 +170,8 @@ private void _checkDirs(boolean isDir, String perm, boolean success)
File localDir = File.createTempFile("test", "tmp");
localDir.delete();
localDir.mkdir();
- Runtime.getRuntime().exec(
- "chmod " + perm + " " + localDir.getAbsolutePath()).waitFor();
+ Shell.execCommand(Shell.getSetPermissionCommand(perm, false,
+ localDir.getAbsolutePath()));
try {
DiskChecker.checkDir(localDir);
assertTrue("checkDir success", success);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java
index 920d9a2c67..9b767a812b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericOptionsParser.java
@@ -44,7 +44,9 @@ public void testFilesOption() throws Exception {
String[] args = new String[2];
// pass a files option
args[0] = "-files";
- args[1] = tmpFile.toString();
+ // Convert a file to a URI as File.toString() is not a valid URI on
+ // all platforms and GenericOptionsParser accepts only valid URIs
+ args[1] = tmpFile.toURI().toString();
new GenericOptionsParser(conf, args);
String files = conf.get("tmpfiles");
assertNotNull("files is null", files);
@@ -53,7 +55,7 @@ public void testFilesOption() throws Exception {
// pass file as uri
Configuration conf1 = new Configuration();
- URI tmpURI = new URI(tmpFile.toString() + "#link");
+ URI tmpURI = new URI(tmpFile.toURI().toString() + "#link");
args[0] = "-files";
args[1] = tmpURI.toString();
new GenericOptionsParser(conf1, args);
@@ -148,7 +150,7 @@ public void testTokenCacheOption() throws IOException {
String[] args = new String[2];
// pass a files option
args[0] = "-tokenCacheFile";
- args[1] = tmpFile.toString();
+ args[1] = tmpFile.toURI().toString();
// test non existing file
Throwable th = null;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
index 4c247f85f1..ab436f8bbb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShell.java
@@ -81,6 +81,10 @@ public void testShellCommandExecutorToString() throws Throwable {
}
public void testShellCommandTimeout() throws Throwable {
+ if(Shell.WINDOWS) {
+ // setExecutable does not work on Windows
+ return;
+ }
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File shellFile = new File(rootDir, "timeout.sh");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 3dcf8dd397..4f06a31649 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -25,7 +25,10 @@
import static org.junit.Assert.fail;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.regex.Pattern;
import org.apache.hadoop.test.UnitTestcaseTimeLimit;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
@@ -43,7 +46,7 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
final private static String ESCAPED_STR_WITH_BOTH2 =
"\\,A\\\\\\,\\,B\\\\\\\\\\,";
- @Test
+ @Test (timeout = 30000)
public void testEscapeString() throws Exception {
assertEquals(NULL_STR, StringUtils.escapeString(NULL_STR));
assertEquals(EMPTY_STR, StringUtils.escapeString(EMPTY_STR));
@@ -57,7 +60,7 @@ public void testEscapeString() throws Exception {
StringUtils.escapeString(STR_WITH_BOTH2));
}
- @Test
+ @Test (timeout = 30000)
public void testSplit() throws Exception {
assertEquals(NULL_STR, StringUtils.split(NULL_STR));
String[] splits = StringUtils.split(EMPTY_STR);
@@ -87,7 +90,7 @@ public void testSplit() throws Exception {
assertEquals(ESCAPED_STR_WITH_BOTH2, splits[0]);
}
- @Test
+ @Test (timeout = 30000)
public void testSimpleSplit() throws Exception {
final String[] TO_TEST = {
"a/b/c",
@@ -103,7 +106,7 @@ public void testSimpleSplit() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testUnescapeString() throws Exception {
assertEquals(NULL_STR, StringUtils.unEscapeString(NULL_STR));
assertEquals(EMPTY_STR, StringUtils.unEscapeString(EMPTY_STR));
@@ -135,7 +138,7 @@ public void testUnescapeString() throws Exception {
StringUtils.unEscapeString(ESCAPED_STR_WITH_BOTH2));
}
- @Test
+ @Test (timeout = 30000)
public void testTraditionalBinaryPrefix() throws Exception {
//test string2long(..)
String[] symbol = {"k", "m", "g", "t", "p", "e"};
@@ -261,7 +264,7 @@ public void testTraditionalBinaryPrefix() throws Exception {
assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
}
- @Test
+ @Test (timeout = 30000)
public void testJoin() {
List s = new ArrayList();
s.add("a");
@@ -273,7 +276,7 @@ public void testJoin() {
assertEquals("a:b:c", StringUtils.join(":", s.subList(0, 3)));
}
- @Test
+ @Test (timeout = 30000)
public void testGetTrimmedStrings() throws Exception {
String compactDirList = "/spindle1/hdfs,/spindle2/hdfs,/spindle3/hdfs";
String spacedDirList = "/spindle1/hdfs, /spindle2/hdfs, /spindle3/hdfs";
@@ -295,7 +298,7 @@ public void testGetTrimmedStrings() throws Exception {
assertArrayEquals(emptyArray, estring);
}
- @Test
+ @Test (timeout = 30000)
public void testCamelize() {
// common use cases
assertEquals("Map", StringUtils.camelize("MAP"));
@@ -331,7 +334,7 @@ public void testCamelize() {
assertEquals("Zz", StringUtils.camelize("zZ"));
}
- @Test
+ @Test (timeout = 30000)
public void testStringToURI() {
String[] str = new String[] { "file://" };
try {
@@ -342,7 +345,7 @@ public void testStringToURI() {
}
}
- @Test
+ @Test (timeout = 30000)
public void testSimpleHostName() {
assertEquals("Should return hostname when FQDN is specified",
"hadoop01",
@@ -355,6 +358,49 @@ public void testSimpleHostName() {
StringUtils.simpleHostname("10.10.5.68"));
}
+ @Test (timeout = 5000)
+ public void testReplaceTokensShellEnvVars() {
+ Pattern pattern = StringUtils.SHELL_ENV_VAR_PATTERN;
+ Map replacements = new HashMap();
+ replacements.put("FOO", "one");
+ replacements.put("BAZ", "two");
+ replacements.put("NUMBERS123", "one-two-three");
+ replacements.put("UNDER_SCORES", "___");
+
+ assertEquals("one", StringUtils.replaceTokens("$FOO", pattern,
+ replacements));
+ assertEquals("two", StringUtils.replaceTokens("$BAZ", pattern,
+ replacements));
+ assertEquals("", StringUtils.replaceTokens("$BAR", pattern, replacements));
+ assertEquals("", StringUtils.replaceTokens("", pattern, replacements));
+ assertEquals("one-two-three", StringUtils.replaceTokens("$NUMBERS123",
+ pattern, replacements));
+ assertEquals("___", StringUtils.replaceTokens("$UNDER_SCORES", pattern,
+ replacements));
+ assertEquals("//one//two//", StringUtils.replaceTokens("//$FOO/$BAR/$BAZ//",
+ pattern, replacements));
+ }
+
+ @Test (timeout = 5000)
+ public void testReplaceTokensWinEnvVars() {
+ Pattern pattern = StringUtils.WIN_ENV_VAR_PATTERN;
+ Map replacements = new HashMap();
+ replacements.put("foo", "zoo");
+ replacements.put("baz", "zaz");
+
+ assertEquals("zoo", StringUtils.replaceTokens("%foo%", pattern,
+ replacements));
+ assertEquals("zaz", StringUtils.replaceTokens("%baz%", pattern,
+ replacements));
+ assertEquals("", StringUtils.replaceTokens("%bar%", pattern,
+ replacements));
+ assertEquals("", StringUtils.replaceTokens("", pattern, replacements));
+ assertEquals("zoo__zaz", StringUtils.replaceTokens("%foo%_%bar%_%baz%",
+ pattern, replacements));
+ assertEquals("begin zoo__zaz end", StringUtils.replaceTokens(
+ "begin %foo%_%bar%_%baz% end", pattern, replacements));
+ }
+
// Benchmark for StringUtils split
public static void main(String []args) {
final String TO_SPLIT = "foo,bar,baz,blah,blah";
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
new file mode 100644
index 0000000000..29140db3b9
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import static org.junit.Assert.*;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test cases for helper Windows winutils.exe utility.
+ */
+public class TestWinUtils {
+
+ private static final Log LOG = LogFactory.getLog(TestWinUtils.class);
+ private static File TEST_DIR = new File(System.getProperty("test.build.data",
+ "/tmp"), TestWinUtils.class.getSimpleName());
+
+ @Before
+ public void setUp() {
+ TEST_DIR.mkdirs();
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ FileUtil.fullyDelete(TEST_DIR);
+ }
+
+ // Helper routine that writes the given content to the file.
+ private void writeFile(File file, String content) throws IOException {
+ byte[] data = content.getBytes();
+ FileOutputStream os = new FileOutputStream(file);
+ os.write(data);
+ os.close();
+ }
+
+ // Helper routine that reads the first 100 bytes from the file.
+ private String readFile(File file) throws IOException {
+ FileInputStream fos = new FileInputStream(file);
+ byte[] b = new byte[100];
+ fos.read(b);
+ return b.toString();
+ }
+
+ @Test (timeout = 30000)
+ public void testLs() throws IOException {
+ if (!Shell.WINDOWS) {
+ // Not supported on non-Windows platforms
+ return;
+ }
+
+ final String content = "6bytes";
+ final int contentSize = content.length();
+ File testFile = new File(TEST_DIR, "file1");
+ writeFile(testFile, content);
+
+ // Verify permissions and file name return tokens
+ String output = Shell.execCommand(
+ Shell.WINUTILS, "ls", testFile.getCanonicalPath());
+ String[] outputArgs = output.split("[ \r\n]");
+ assertTrue(outputArgs[0].equals("-rwx------"));
+ assertTrue(outputArgs[outputArgs.length - 1]
+ .equals(testFile.getCanonicalPath()));
+
+ // Verify most tokens when using a formatted output (other tokens
+ // will be verified with chmod/chown)
+ output = Shell.execCommand(
+ Shell.WINUTILS, "ls", "-F", testFile.getCanonicalPath());
+ outputArgs = output.split("[|\r\n]");
+ assertEquals(9, outputArgs.length);
+ assertTrue(outputArgs[0].equals("-rwx------"));
+ assertEquals(contentSize, Long.parseLong(outputArgs[4]));
+ assertTrue(outputArgs[8].equals(testFile.getCanonicalPath()));
+
+ testFile.delete();
+ assertFalse(testFile.exists());
+ }
+
+ @Test (timeout = 30000)
+ public void testGroups() throws IOException {
+ if (!Shell.WINDOWS) {
+ // Not supported on non-Windows platforms
+ return;
+ }
+
+ String currentUser = System.getProperty("user.name");
+
+ // Verify that groups command returns information about the current user
+ // groups when invoked with no args
+ String outputNoArgs = Shell.execCommand(
+ Shell.WINUTILS, "groups").trim();
+ String output = Shell.execCommand(
+ Shell.WINUTILS, "groups", currentUser).trim();
+ assertEquals(output, outputNoArgs);
+
+ // Verify that groups command with the -F flag returns the same information
+ String outputFormat = Shell.execCommand(
+ Shell.WINUTILS, "groups", "-F", currentUser).trim();
+ outputFormat = outputFormat.replace("|", " ");
+ assertEquals(output, outputFormat);
+ }
+
+ private void chmod(String mask, File file) throws IOException {
+ Shell.execCommand(
+ Shell.WINUTILS, "chmod", mask, file.getCanonicalPath());
+ }
+
+ private void chmodR(String mask, File file) throws IOException {
+ Shell.execCommand(
+ Shell.WINUTILS, "chmod", "-R", mask, file.getCanonicalPath());
+ }
+
+ private String ls(File file) throws IOException {
+ return Shell.execCommand(
+ Shell.WINUTILS, "ls", file.getCanonicalPath());
+ }
+
+ private String lsF(File file) throws IOException {
+ return Shell.execCommand(
+ Shell.WINUTILS, "ls", "-F", file.getCanonicalPath());
+ }
+
+ private void assertPermissions(File file, String expected)
+ throws IOException {
+ String output = ls(file).split("[ \r\n]")[0];
+ assertEquals(expected, output);
+ }
+
+ @Test (timeout = 30000)
+ private void testChmodInternal(String mode, String expectedPerm)
+ throws IOException {
+ File a = new File(TEST_DIR, "file1");
+ assertTrue(a.createNewFile());
+
+ // Reset permissions on the file to default
+ chmod("700", a);
+
+ // Apply the mode mask
+ chmod(mode, a);
+
+ // Compare the output
+ assertPermissions(a, expectedPerm);
+
+ a.delete();
+ assertFalse(a.exists());
+ }
+
+ @Test (timeout = 30000)
+ private void testNewFileChmodInternal(String expectedPerm) throws IOException {
+ // Create a new directory
+ File dir = new File(TEST_DIR, "dir1");
+
+ assertTrue(dir.mkdir());
+
+ // Set permission use chmod
+ chmod("755", dir);
+
+ // Create a child file in the directory
+ File child = new File(dir, "file1");
+ assertTrue(child.createNewFile());
+
+ // Verify the child file has correct permissions
+ assertPermissions(child, expectedPerm);
+
+ child.delete();
+ dir.delete();
+ assertFalse(dir.exists());
+ }
+
+ @Test (timeout = 30000)
+ private void testChmodInternalR(String mode, String expectedPerm,
+ String expectedPermx) throws IOException {
+ // Setup test folder hierarchy
+ File a = new File(TEST_DIR, "a");
+ assertTrue(a.mkdir());
+ chmod("700", a);
+ File aa = new File(a, "a");
+ assertTrue(aa.createNewFile());
+ chmod("600", aa);
+ File ab = new File(a, "b");
+ assertTrue(ab.mkdir());
+ chmod("700", ab);
+ File aba = new File(ab, "a");
+ assertTrue(aba.mkdir());
+ chmod("700", aba);
+ File abb = new File(ab, "b");
+ assertTrue(abb.createNewFile());
+ chmod("600", abb);
+ File abx = new File(ab, "x");
+ assertTrue(abx.createNewFile());
+ chmod("u+x", abx);
+
+ // Run chmod recursive
+ chmodR(mode, a);
+
+ // Verify outcome
+ assertPermissions(a, "d" + expectedPermx);
+ assertPermissions(aa, "-" + expectedPerm);
+ assertPermissions(ab, "d" + expectedPermx);
+ assertPermissions(aba, "d" + expectedPermx);
+ assertPermissions(abb, "-" + expectedPerm);
+ assertPermissions(abx, "-" + expectedPermx);
+
+ assertTrue(FileUtil.fullyDelete(a));
+ }
+
+ @Test (timeout = 30000)
+ public void testBasicChmod() throws IOException {
+ if (!Shell.WINDOWS) {
+ // Not supported on non-Windows platforms
+ return;
+ }
+
+ // - Create a file.
+ // - Change mode to 377 so owner does not have read permission.
+ // - Verify the owner truly does not have the permissions to read.
+ File a = new File(TEST_DIR, "a");
+ a.createNewFile();
+ chmod("377", a);
+
+ try {
+ readFile(a);
+ assertFalse("readFile should have failed!", true);
+ } catch (IOException ex) {
+ LOG.info("Expected: Failed read from a file with permissions 377");
+ }
+ // restore permissions
+ chmod("700", a);
+
+ // - Create a file.
+ // - Change mode to 577 so owner does not have write permission.
+ // - Verify the owner truly does not have the permissions to write.
+ chmod("577", a);
+
+ try {
+ writeFile(a, "test");
+ assertFalse("writeFile should have failed!", true);
+ } catch (IOException ex) {
+ LOG.info("Expected: Failed write to a file with permissions 577");
+ }
+ // restore permissions
+ chmod("700", a);
+ assertTrue(a.delete());
+
+ // - Copy WINUTILS to a new executable file, a.exe.
+ // - Change mode to 677 so owner does not have execute permission.
+ // - Verify the owner truly does not have the permissions to execute the file.
+
+ File winutilsFile = new File(Shell.WINUTILS);
+ File aExe = new File(TEST_DIR, "a.exe");
+ FileUtils.copyFile(winutilsFile, aExe);
+ chmod("677", aExe);
+
+ try {
+ Shell.execCommand(aExe.getCanonicalPath(), "ls");
+ assertFalse("executing " + aExe + " should have failed!", true);
+ } catch (IOException ex) {
+ LOG.info("Expected: Failed to execute a file with permissions 677");
+ }
+ assertTrue(aExe.delete());
+ }
+
+ @Test (timeout = 30000)
+ public void testChmod() throws IOException {
+ if (!Shell.WINDOWS) {
+ // Not supported on non-Windows platforms
+ return;
+ }
+
+ testChmodInternal("7", "-------rwx");
+ testChmodInternal("70", "----rwx---");
+ testChmodInternal("u-x,g+r,o=g", "-rw-r--r--");
+ testChmodInternal("u-x,g+rw", "-rw-rw----");
+ testChmodInternal("u-x,g+rwx-x,o=u", "-rw-rw-rw-");
+ testChmodInternal("+", "-rwx------");
+
+ // Recursive chmod tests
+ testChmodInternalR("755", "rwxr-xr-x", "rwxr-xr-x");
+ testChmodInternalR("u-x,g+r,o=g", "rw-r--r--", "rw-r--r--");
+ testChmodInternalR("u-x,g+rw", "rw-rw----", "rw-rw----");
+ testChmodInternalR("u-x,g+rwx-x,o=u", "rw-rw-rw-", "rw-rw-rw-");
+ testChmodInternalR("a+rX", "rw-r--r--", "rwxr-xr-x");
+
+ // Test a new file created in a chmod'ed directory has expected permission
+ testNewFileChmodInternal("-rwx------");
+ }
+
+ private void chown(String userGroup, File file) throws IOException {
+ Shell.execCommand(
+ Shell.WINUTILS, "chown", userGroup, file.getCanonicalPath());
+ }
+
+ private void assertOwners(File file, String expectedUser,
+ String expectedGroup) throws IOException {
+ String [] args = lsF(file).trim().split("[\\|]");
+ assertEquals(expectedUser.toLowerCase(), args[2].toLowerCase());
+ assertEquals(expectedGroup.toLowerCase(), args[3].toLowerCase());
+ }
+
+ @Test (timeout = 30000)
+ public void testChown() throws IOException {
+ if (!Shell.WINDOWS) {
+ // Not supported on non-Windows platforms
+ return;
+ }
+
+ File a = new File(TEST_DIR, "a");
+ assertTrue(a.createNewFile());
+ String username = System.getProperty("user.name");
+ // username including the domain aka DOMAIN\\user
+ String qualifiedUsername = Shell.execCommand("whoami").trim();
+ String admins = "Administrators";
+ String qualifiedAdmins = "BUILTIN\\Administrators";
+
+ chown(username + ":" + admins, a);
+ assertOwners(a, qualifiedUsername, qualifiedAdmins);
+
+ chown(username, a);
+ chown(":" + admins, a);
+ assertOwners(a, qualifiedUsername, qualifiedAdmins);
+
+ chown(":" + admins, a);
+ chown(username + ":", a);
+ assertOwners(a, qualifiedUsername, qualifiedAdmins);
+
+ assertTrue(a.delete());
+ assertFalse(a.exists());
+ }
+}
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 2e03c0ebab..44ed401264 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -107,7 +107,7 @@
fi
}
- ROOT=`cd ${basedir}/..;pwd`
+ ROOT=`cd ../..;pwd`
echo
echo "Current directory `pwd`"
echo
@@ -151,7 +151,8 @@
fi
}
- run tar czf hadoop-${project.version}.tar.gz hadoop-${project.version}
+ run tar cf hadoop-${project.version}.tar hadoop-${project.version}
+ run gzip hadoop-${project.version}.tar
echo
echo "Hadoop dist tar available at: ${project.build.directory}/hadoop-${project.version}.tar.gz"
echo
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index fb5febbe18..c590f5ec00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -539,15 +539,8 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
- cd $BUILD_DIR/tomcat.exp
- tar xzf ${basedir}/downloads/apache-tomcat-${tomcat.version}.tar.gz
+ cd "${project.build.directory}/tomcat.exp"
+ gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
@@ -582,15 +575,8 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
- cd $BUILD_DIR
- tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
+ cd "${project.build.directory}"
+ tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.branch-trunk-win.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.branch-trunk-win.txt
new file mode 100644
index 0000000000..3f6c402abe
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.branch-trunk-win.txt
@@ -0,0 +1,13 @@
+branch-trunk-win changes - unreleased
+
+ HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
+ Bikas Saha, Lauren Yang, Chuan Liu, Thejas M Nair and Ivan Mitic via suresh)
+
+ HDFS-4163. HDFS distribution build fails on Windows. (Chris Nauroth via
+ suresh)
+
+ HDFS-4316. branch-trunk-win contains test code accidentally added during
+ work on fixing tests on Windows. (Chris Nauroth via suresh)
+
+ HDFS-4297. Fix issues related to datanode concurrent reading and writing on
+ Windows. (Arpit Agarwal, Chuan Liu via suresh)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 4116cd4136..a5f87e4bef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -515,6 +515,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
CHANGES.txtCHANGES.HDFS-1623.txt
+ CHANGES.branch-trunk-win.txt.idea/**src/main/conf/*src/main/docs/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 7f7836f7dd..709520f4a7 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -146,9 +146,6 @@ else
CLASS="$COMMAND"
fi
-if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-fi
export CLASSPATH=$CLASSPATH
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.cmd
new file mode 100644
index 0000000000..f3aa7338ee
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.cmd
@@ -0,0 +1,43 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem included in all the hdfs scripts with source command
+@rem should not be executed directly
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+if exist %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd (
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+) else if exist %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd %*
+) else if exist %HADOOP_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_HOME%\libexec\hadoop-config.cmd %*
+) else (
+ echo Hadoop common not found.
+)
+
+:eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
new file mode 100644
index 0000000000..70af80c7d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -0,0 +1,171 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+:main
+ if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
+ call %HADOOP_CONF_DIR%\hadoop-env.cmd
+ )
+
+ set hdfs-command=%1
+ call :make_command_arguments %*
+
+ if not defined hdfs-command (
+ goto print_usage
+ )
+
+ call :%hdfs-command% %hdfs-command-arguments%
+ set java_arguments=%JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% %CLASS% %hdfs-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:namenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_NAMENODE_OPTS%
+ goto :eof
+
+:zkfc
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSZKFailoverController
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ZKFC_OPTS%
+ goto :eof
+
+:secondarynamenode
+ set CLASS=org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_SECONDARYNAMENODE_OPTS%
+ goto :eof
+
+:datanode
+ set CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode
+ set HADOOP_OPTS=%HADOOP_OPTS% -server %HADOOP_DATANODE_OPTS%
+ goto :eof
+
+:dfs
+ set CLASS=org.apache.hadoop.fs.FsShell
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:dfsadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:haadmin
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+ set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:fsck
+ set CLASS=org.apache.hadoop.hdfs.tools.DFSck
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS%
+ goto :eof
+
+:balancer
+ set CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_BALANCER_OPTS%
+ goto :eof
+
+:jmxget
+ set CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+ goto :eof
+
+:oiv
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+ goto :eof
+
+:oev
+ set CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+ goto :eof
+
+:fetchdt
+ set CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+ goto :eof
+
+:getconf
+ set CLASS=org.apache.hadoop.hdfs.tools.GetConf
+ goto :eof
+
+:groups
+ set CLASS=org.apache.hadoop.hdfs.tools.GetGroups
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _hdfsarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _hdfsarguments (
+ set _hdfsarguments=%1
+ ) else (
+ set _hdfsarguments=!_hdfsarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set hdfs-command-arguments=%_hdfsarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: hdfs [--config confdir] COMMAND
+ @echo where COMMAND is one of:
+ @echo dfs run a filesystem command on the file systems supported in Hadoop.
+ @echo namenode -format format the DFS filesystem
+ @echo secondarynamenode run the DFS secondary namenode
+ @echo namenode run the DFS namenode
+ @echo zkfc run the ZK Failover Controller daemon
+ @echo datanode run a DFS datanode
+ @echo dfsadmin run a DFS admin client
+ @echo fsck run a DFS filesystem checking utility
+ @echo balancer run a cluster balancing utility
+ @echo jmxget get JMX exported values from NameNode or DataNode.
+ @echo oiv apply the offline fsimage viewer to an fsimage
+ @echo oev apply the offline edits viewer to an edits file
+ @echo fetchdt fetch a delegation token from the NameNode
+ @echo getconf get config values from configuration
+ @echo groups get the groups which users belong to
+ @echo Use -help to see options
+ @echo.
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.cmd
new file mode 100644
index 0000000000..9f20e5afa3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hdfs-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+start "Apache Hadoop Distribution" hadoop namenode
+start "Apache Hadoop Distribution" hadoop datanode
+
+endlocal
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.cmd
new file mode 100644
index 0000000000..f0cf015080
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.cmd
@@ -0,0 +1,41 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop namenode"
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - hadoop datanode"
+
+endlocal
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
index 19cc1b592b..ffb3219833 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/site.xml
@@ -76,7 +76,6 @@ See http://forrest.apache.org/docs/linking.html for more info.
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index cc32224b70..3cf1679b6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -602,13 +602,13 @@ private void dropOsCacheBehindWriter(long offsetInBlock) {
offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {
long twoWindowsAgo = lastCacheDropOffset - CACHE_DROP_LAG_BYTES;
if (twoWindowsAgo > 0 && dropCacheBehindWrites) {
- NativeIO.posixFadviseIfPossible(outFd, 0, lastCacheDropOffset,
- NativeIO.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.posixFadviseIfPossible(outFd, 0, lastCacheDropOffset,
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
}
if (syncBehindWrites) {
- NativeIO.syncFileRangeIfPossible(outFd, lastCacheDropOffset, CACHE_DROP_LAG_BYTES,
- NativeIO.SYNC_FILE_RANGE_WRITE);
+ NativeIO.POSIX.syncFileRangeIfPossible(outFd, lastCacheDropOffset, CACHE_DROP_LAG_BYTES,
+ NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
}
lastCacheDropOffset += CACHE_DROP_LAG_BYTES;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index fdade84f0e..0e1e35c733 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -338,9 +338,9 @@ public void close() throws IOException {
if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
// drop the last few MB of the file from cache
try {
- NativeIO.posixFadviseIfPossible(
+ NativeIO.POSIX.posixFadviseIfPossible(
blockInFd, lastCacheDropOffset, offset - lastCacheDropOffset,
- NativeIO.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
} catch (Exception e) {
LOG.warn("Unable to drop cache on file close", e);
}
@@ -637,7 +637,8 @@ long sendBlock(DataOutputStream out, OutputStream baseStream,
if (isLongRead() && blockInFd != null) {
// Advise that this file descriptor will be accessed sequentially.
- NativeIO.posixFadviseIfPossible(blockInFd, 0, 0, NativeIO.POSIX_FADV_SEQUENTIAL);
+ NativeIO.POSIX.posixFadviseIfPossible(
+ blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
}
// Trigger readahead of beginning of file if configured.
@@ -725,9 +726,9 @@ private void manageOsCache() throws IOException {
offset >= nextCacheDropOffset) {
long dropLength = offset - lastCacheDropOffset;
if (dropLength >= 1024) {
- NativeIO.posixFadviseIfPossible(blockInFd,
+ NativeIO.POSIX.posixFadviseIfPossible(blockInFd,
lastCacheDropOffset, dropLength,
- NativeIO.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
}
lastCacheDropOffset += CACHE_DROP_INTERVAL_BYTES;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index caf970de6a..86e9797fcf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -41,6 +41,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -91,6 +92,15 @@
@InterfaceAudience.Private
class FsDatasetImpl implements FsDatasetSpi {
static final Log LOG = LogFactory.getLog(FsDatasetImpl.class);
+ private final static boolean isNativeIOAvailable;
+ static {
+ isNativeIOAvailable = NativeIO.isAvailable();
+ if (Path.WINDOWS && !isNativeIOAvailable) {
+ LOG.warn("Data node cannot fully support concurrent reading"
+ + " and writing without native code extensions on Windows.");
+ }
+ }
+
@Override // FsDatasetSpi
public List getVolumes() {
@@ -148,6 +158,11 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
if (meta == null || !meta.exists()) {
return null;
}
+ if (isNativeIOAvailable) {
+ return new LengthInputStream(
+ NativeIO.getShareDeleteFileInputStream(meta),
+ meta.length());
+ }
return new LengthInputStream(new FileInputStream(meta), meta.length());
}
@@ -323,18 +338,22 @@ private File getBlockFileNoExistsCheck(ExtendedBlock b)
public InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
File blockFile = getBlockFileNoExistsCheck(b);
- RandomAccessFile blockInFile;
- try {
- blockInFile = new RandomAccessFile(blockFile, "r");
- } catch (FileNotFoundException fnfe) {
- throw new IOException("Block " + b + " is not valid. " +
- "Expected block file at " + blockFile + " does not exist.");
- }
+ if (isNativeIOAvailable) {
+ return NativeIO.getShareDeleteFileInputStream(blockFile, seekOffset);
+ } else {
+ RandomAccessFile blockInFile;
+ try {
+ blockInFile = new RandomAccessFile(blockFile, "r");
+ } catch (FileNotFoundException fnfe) {
+ throw new IOException("Block " + b + " is not valid. " +
+ "Expected block file at " + blockFile + " does not exist.");
+ }
- if (seekOffset > 0) {
- blockInFile.seek(seekOffset);
+ if (seekOffset > 0) {
+ blockInFile.seek(seekOffset);
+ }
+ return new FileInputStream(blockInFile.getFD());
}
- return new FileInputStream(blockInFile.getFD());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html
index c0cafc6408..759c093aa5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/overview.html
@@ -60,9 +60,7 @@
Platforms
Hadoop was been demonstrated on GNU/Linux clusters with 2000 nodes.
- Win32 is supported as a development platform. Distributed operation
- has not been well tested on Win32, so this is not a production
- platform.
+ Windows is also a supported platform.
@@ -84,15 +82,6 @@
Requisite Software
-
Additional requirements for Windows
-
-
-
- Cygwin - Required for shell support in
- addition to the required software above.
-
-
-
Installing Required Software
If your platform does not have the required software listed above, you
@@ -104,13 +93,6 @@
Installing Required Software
$ sudo apt-get install rsync
-
On Windows, if you did not install the required software when you
-installed cygwin, start the cygwin installer and select the packages:
-
-
openssh - the "Net" category
-
rsync - the "Net" category
-
-
Getting Started
First, you need to get a copy of the Hadoop code.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
index 51bb15f45d..3407e9cf8e 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/tests/test-libhdfs.sh
@@ -82,7 +82,7 @@ unset IFS
findlibjvm () {
javabasedir=$JAVA_HOME
case $OS_NAME in
- cygwin* | mingw* | pw23* )
+ mingw* | pw23* )
lib_jvm_dir=`find $javabasedir -follow \( \
\( -name client -type d -prune \) -o \
\( -name "jvm.dll" -exec dirname {} \; \) \) 2> /dev/null | tr "\n" " "`
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 4b26e77d80..12f15685be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -103,7 +103,7 @@ static void show(String s) {
System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
}
- @Test
+ @Test (timeout = 30000)
public void testZeroSizeFile() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -146,7 +146,7 @@ public void testZeroSizeFile() throws IOException {
}
}
- @Test
+ @Test (timeout = 30000)
public void testRecrusiveRm() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -172,7 +172,7 @@ public void testRecrusiveRm() throws IOException {
}
}
- @Test
+ @Test (timeout = 30000)
public void testDu() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -222,7 +222,8 @@ public void testDu() throws IOException {
}
}
- @Test
+
+ @Test (timeout = 30000)
public void testPut() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -321,7 +322,7 @@ public void checkPermission(Permission perm) {
/** check command error outputs and exit statuses. */
- @Test
+ @Test (timeout = 30000)
public void testErrOutPut() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@@ -471,7 +472,7 @@ public void testErrOutPut() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testURIPaths() throws Exception {
Configuration srcConf = new HdfsConfiguration();
Configuration dstConf = new HdfsConfiguration();
@@ -564,7 +565,7 @@ public void testURIPaths() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testText() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
@@ -680,7 +681,7 @@ private void textTest(Path root, Configuration conf) throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testCopyToLocal() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -778,7 +779,7 @@ static String createTree(FileSystem fs, String name) throws IOException {
return path;
}
- @Test
+ @Test (timeout = 30000)
public void testCount() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -945,7 +946,7 @@ private void confirmOwner(String owner, String group,
}
}
- @Test
+ @Test (timeout = 30000)
public void testFilePermissions() throws IOException {
Configuration conf = new HdfsConfiguration();
@@ -1011,7 +1012,7 @@ public void testFilePermissions() throws IOException {
/**
* Tests various options of DFSShell.
*/
- @Test
+ @Test (timeout = 120000)
public void testDFSShell() throws IOException {
Configuration conf = new HdfsConfiguration();
/* This tests some properties of ChecksumFileSystem as well.
@@ -1391,7 +1392,7 @@ static interface TestGetRunner {
String run(int exitcode, String... options) throws IOException;
}
- @Test
+ @Test (timeout = 30000)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
@@ -1435,73 +1436,96 @@ public Object run() throws Exception {
}
}
- @Test
+ @Test (timeout = 30000)
public void testGet() throws IOException {
DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
+
+ final String fname = "testGet.txt";
+ Path root = new Path("/test/get");
+ final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration();
- // Race can happen here: block scanner is reading the file when test tries
- // to corrupt the test file, which will fail the test on Windows platform.
- // Disable block scanner to avoid this race.
- conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
-
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
- DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+ TestGetRunner runner = new TestGetRunner() {
+ private int count = 0;
+ private FsShell shell = new FsShell(conf);
+
+ public String run(int exitcode, String... options) throws IOException {
+ String dst = TEST_ROOT_DIR + "/" + fname+ ++count;
+ String[] args = new String[options.length + 3];
+ args[0] = "-get";
+ args[args.length - 2] = remotef.toString();
+ args[args.length - 1] = dst;
+ for(int i = 0; i < options.length; i++) {
+ args[i + 1] = options[i];
+ }
+ show("args=" + Arrays.asList(args));
+
+ try {
+ assertEquals(exitcode, shell.run(args));
+ } catch (Exception e) {
+ assertTrue(StringUtils.stringifyException(e), false);
+ }
+ return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
+ }
+ };
+
+ File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
+ MiniDFSCluster cluster = null;
+ DistributedFileSystem dfs = null;
try {
- final String fname = "testGet.txt";
- final File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
- final String localfcontent = DFSTestUtil.readFile(localf);
- final Path root = mkdir(dfs, new Path("/test/get"));
- final Path remotef = new Path(root, fname);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
+ .build();
+ dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+ mkdir(dfs, root);
dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
-
- final FsShell shell = new FsShell();
- shell.setConf(conf);
- TestGetRunner runner = new TestGetRunner() {
- private int count = 0;
-
- @Override
- public String run(int exitcode, String... options) throws IOException {
- String dst = TEST_ROOT_DIR + "/" + fname+ ++count;
- String[] args = new String[options.length + 3];
- args[0] = "-get";
- args[args.length - 2] = remotef.toString();
- args[args.length - 1] = dst;
- for(int i = 0; i < options.length; i++) {
- args[i + 1] = options[i];
- }
- show("args=" + Arrays.asList(args));
-
- try {
- assertEquals(exitcode, shell.run(args));
- } catch (Exception e) {
- assertTrue(StringUtils.stringifyException(e), false);
- }
- return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null;
- }
- };
+ String localfcontent = DFSTestUtil.readFile(localf);
assertEquals(localfcontent, runner.run(0));
assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
- //find and modify the block files
+ // find block files to modify later
List files = getBlockFiles(cluster);
+
+ // Shut down cluster and then corrupt the block files by overwriting a
+ // portion with junk data. We must shut down the cluster so that threads
+ // in the data node do not hold locks on the block files while we try to
+ // write into them. Particularly on Windows, the data node's use of the
+ // FileChannel.transferTo method can cause block files to be memory mapped
+ // in read-only mode during the transfer to a client, and this causes a
+ // locking conflict. The call to shutdown the cluster blocks until all
+ // DataXceiver threads exit, preventing this problem.
+ dfs.close();
+ cluster.shutdown();
+
show("files=" + files);
corrupt(files);
+ // Start the cluster again, but do not reformat, so prior files remain.
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false)
+ .build();
+ dfs = (DistributedFileSystem)cluster.getFileSystem();
+
assertEquals(null, runner.run(1));
String corruptedcontent = runner.run(0, "-ignoreCrc");
assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
-
- localf.delete();
} finally {
- try {dfs.close();} catch (Exception e) {}
- cluster.shutdown();
+ if (null != dfs) {
+ try {
+ dfs.close();
+ } catch (Exception e) {
+ }
+ }
+ if (null != cluster) {
+ cluster.shutdown();
+ }
+ localf.delete();
}
}
- @Test
+ @Test (timeout = 30000)
public void testLsr() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
@@ -1559,7 +1583,7 @@ private static String runLsr(final FsShell shell, String root, int returnvalue
* and return -1 exit code.
* @throws Exception
*/
- @Test
+ @Test (timeout = 30000)
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
@@ -1569,7 +1593,7 @@ public void testInvalidShell() throws Exception {
}
// force Copy Option is -f
- @Test
+ @Test (timeout = 30000)
public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
@@ -1696,7 +1720,7 @@ private void deleteFileUsingTrash(
* Test that the server trash configuration is respected when
* the client configuration is not set.
*/
- @Test
+ @Test (timeout = 30000)
public void testServerConfigRespected() throws Exception {
deleteFileUsingTrash(true, false);
}
@@ -1705,7 +1729,7 @@ public void testServerConfigRespected() throws Exception {
* Test that server trash configuration is respected even when the
* client configuration is set.
*/
- @Test
+ @Test (timeout = 30000)
public void testServerConfigRespectedWithClient() throws Exception {
deleteFileUsingTrash(true, true);
}
@@ -1714,7 +1738,7 @@ public void testServerConfigRespectedWithClient() throws Exception {
* Test that the client trash configuration is respected when
* the server configuration is not set.
*/
- @Test
+ @Test (timeout = 30000)
public void testClientConfigRespected() throws Exception {
deleteFileUsingTrash(false, true);
}
@@ -1722,7 +1746,7 @@ public void testClientConfigRespected() throws Exception {
/**
* Test that trash is disabled by default.
*/
- @Test
+ @Test (timeout = 30000)
public void testNoTrashConfig() throws Exception {
deleteFileUsingTrash(false, false);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
index 97659eeab3..c1aa9d1f09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
@@ -151,7 +151,7 @@ private void waitForBlocks(FileSystem fileSys, Path name)
/**
* Test that that writes to an incomplete block are available to a reader
*/
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockRead()
throws IOException {
// create a new file in the root, write data, do no close
@@ -174,7 +174,7 @@ public void testUnfinishedBlockRead()
* would result in too small a buffer to do the buffer-copy needed
* for partial chunks.
*/
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
// check that / exists
Path path = new Path("/");
@@ -200,7 +200,7 @@ public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
// use a small block size and a large write so that DN is busy creating
// new blocks. This makes it almost 100% sure we can reproduce
// case of client getting a DN that hasn't yet created the blocks
- @Test
+ @Test (timeout = 30000)
public void testImmediateReadOfNewFile()
throws IOException {
final int blockSize = 64 * 1024;
@@ -277,12 +277,12 @@ public void run() {
// for some reason, using tranferTo evokes the race condition more often
// so test separately
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorTransferTo() throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, DEFAULT_WRITE_SIZE);
}
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
@@ -290,18 +290,17 @@ public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
// fails due to issue w/append, disable
@Ignore
- @Test
public void _testUnfinishedBlockCRCErrorTransferToAppend()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
}
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorNormalTransfer() throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, DEFAULT_WRITE_SIZE);
}
- @Test
+ @Test (timeout = 30000)
public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
@@ -309,7 +308,6 @@ public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
// fails due to issue w/append, disable
@Ignore
- @Test
public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -338,33 +336,33 @@ private void runTestUnfinishedBlockCRCError(
final AtomicBoolean writerDone = new AtomicBoolean(false);
final AtomicBoolean writerStarted = new AtomicBoolean(false);
final AtomicBoolean error = new AtomicBoolean(false);
- final FSDataOutputStream initialOutputStream = fileSystem.create(file);
- final Thread writer = new Thread(new Runnable() {
- private FSDataOutputStream outputStream = initialOutputStream;
+ final Thread writer = new Thread(new Runnable() {
@Override
public void run() {
try {
- for (int i = 0; !error.get() && i < numWrites; i++) {
- try {
+ FSDataOutputStream outputStream = fileSystem.create(file);
+ if (syncType == SyncType.APPEND) {
+ outputStream.close();
+ outputStream = fileSystem.append(file);
+ }
+ try {
+ for (int i = 0; !error.get() && i < numWrites; i++) {
final byte[] writeBuf =
- DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
+ DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
outputStream.write(writeBuf);
if (syncType == SyncType.SYNC) {
outputStream.hflush();
- } else { // append
- outputStream.close();
- outputStream = fileSystem.append(file);
}
writerStarted.set(true);
- } catch (IOException e) {
- error.set(true);
- LOG.error("error writing to file", e);
}
+ } catch (IOException e) {
+ error.set(true);
+ LOG.error("error writing to file", e);
+ } finally {
+ outputStream.close();
}
-
writerDone.set(true);
- outputStream.close();
} catch (Exception e) {
LOG.error("error in writer", e);
@@ -415,7 +413,6 @@ public void run() {
Thread.currentThread().interrupt();
}
- initialOutputStream.close();
}
private boolean validateSequentialBytes(byte[] buf, int startPos, int len) {
diff --git a/hadoop-mapreduce-project/CHANGES.branch-trunk-win.txt b/hadoop-mapreduce-project/CHANGES.branch-trunk-win.txt
new file mode 100644
index 0000000000..0fae532fe0
--- /dev/null
+++ b/hadoop-mapreduce-project/CHANGES.branch-trunk-win.txt
@@ -0,0 +1,17 @@
+branch-trunk-win changes - unreleased
+
+ MAPREDUCE-4739. Some MapReduce tests fail to find winutils.
+ (Chris Nauroth via suresh)
+
+ MAPREDUCE-4780. MapReduce distribution build fails on Windows.
+ (Chris Nauroth via suresh)
+
+ MAPREDUCE-4790. MapReduce build script would be more readable using abspath.
+ (Chris Nauroth via suresh)
+
+ MAPREDUCE-4869. Fix TestMapReduceChildJVM. (Chris Nauroth via acmurthy)
+
+ MAPREDUCE-4870. Fix TestMRJobsWithHistoryService. (Chris Nauroth via acmurthy)
+
+ MAPREDUCE-4983. Fixed various platform specific assumptions in various tests,
+ so that they can pass on Windows too. (Chris Nauroth via vinodkv)
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 263d3d8793..0faa091dc3 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -135,10 +135,6 @@ for f in $HADOOP_MAPRED_HOME/modules/*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
-if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-fi
-
if [ "$COMMAND" = "classpath" ] ; then
echo $CLASSPATH
exit
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
index 375c69fd0c..1052324e1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
@@ -164,7 +164,6 @@ public static List getVMCommand(
Vector vargs = new Vector(8);
- vargs.add("exec");
vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
// Add child (task) java-vm options.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java
index ea0a342d62..566be8bbd1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.junit.Test;
@@ -37,7 +38,7 @@ public class TestMapReduceChildJVM {
private static final Log LOG = LogFactory.getLog(TestMapReduceChildJVM.class);
- @Test
+ @Test (timeout = 30000)
public void testCommandLine() throws Exception {
MyMRApp app = new MyMRApp(1, 0, true, this.getClass().getName(), true);
@@ -46,10 +47,10 @@ public void testCommandLine() throws Exception {
app.verifyCompleted();
Assert.assertEquals(
- "[exec $JAVA_HOME/bin/java" +
+ "[" + envVar("JAVA_HOME") + "/bin/java" +
" -Djava.net.preferIPv4Stack=true" +
" -Dhadoop.metrics.log.level=WARN" +
- " -Xmx200m -Djava.io.tmpdir=$PWD/tmp" +
+ " -Xmx200m -Djava.io.tmpdir=" + envVar("PWD") + "/tmp" +
" -Dlog4j.configuration=container-log4j.properties" +
" -Dyarn.app.mapreduce.container.log.dir=" +
" -Dyarn.app.mapreduce.container.log.filesize=0" +
@@ -88,4 +89,16 @@ public void handle(ContainerLauncherEvent event) {
};
}
}
+
+ /**
+ * Returns platform-specific string for retrieving the value of an environment
+ * variable with the given name. On Unix, this returns $name. On Windows,
+ * this returns %name%.
+ *
+ * @param name String environment variable name
+ * @return String for retrieving value of environment variable
+ */
+ private static String envVar(String name) {
+ return Shell.WINDOWS ? '%' + name + '%' : '$' + name;
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index b196c18f12..05497cc0c7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -22,6 +22,7 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
@@ -39,6 +40,8 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
@@ -69,31 +72,35 @@ public static void cleanupTestDirs() throws IOException {
}
private static void delete(File dir) throws IOException {
- Path p = new Path("file://"+dir.getAbsolutePath());
Configuration conf = new Configuration();
- FileSystem fs = p.getFileSystem(conf);
+ FileSystem fs = FileSystem.getLocal(conf);
+ Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
fs.delete(p, true);
}
- @Test public void testJobIDtoString() {
+ @Test (timeout = 120000)
+ public void testJobIDtoString() {
JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
jid.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
assertEquals("job_0_0000", MRApps.toString(jid));
}
- @Test public void testToJobID() {
+ @Test (timeout = 120000)
+ public void testToJobID() {
JobId jid = MRApps.toJobID("job_1_1");
assertEquals(1, jid.getAppId().getClusterTimestamp());
assertEquals(1, jid.getAppId().getId());
assertEquals(1, jid.getId()); // tests against some proto.id and not a job.id field
}
- @Test(expected=IllegalArgumentException.class) public void testJobIDShort() {
+ @Test (timeout = 120000, expected=IllegalArgumentException.class)
+ public void testJobIDShort() {
MRApps.toJobID("job_0_0_0");
}
//TODO_get.set
- @Test public void testTaskIDtoString() {
+ @Test (timeout = 120000)
+ public void testTaskIDtoString() {
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class));
@@ -108,7 +115,8 @@ private static void delete(File dir) throws IOException {
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
}
- @Test public void testToTaskID() {
+ @Test (timeout = 120000)
+ public void testToTaskID() {
TaskId tid = MRApps.toTaskID("task_1_2_r_3");
assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
assertEquals(2, tid.getJobId().getAppId().getId());
@@ -120,16 +128,19 @@ private static void delete(File dir) throws IOException {
assertEquals(TaskType.MAP, tid.getTaskType());
}
- @Test(expected=IllegalArgumentException.class) public void testTaskIDShort() {
+ @Test(timeout = 120000, expected=IllegalArgumentException.class)
+ public void testTaskIDShort() {
MRApps.toTaskID("task_0_0000_m");
}
- @Test(expected=IllegalArgumentException.class) public void testTaskIDBadType() {
+ @Test(timeout = 120000, expected=IllegalArgumentException.class)
+ public void testTaskIDBadType() {
MRApps.toTaskID("task_0_0000_x_000000");
}
//TODO_get.set
- @Test public void testTaskAttemptIDtoString() {
+ @Test (timeout = 120000)
+ public void testTaskAttemptIDtoString() {
TaskAttemptId taid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class));
taid.getTaskId().setTaskType(TaskType.MAP);
@@ -138,7 +149,8 @@ private static void delete(File dir) throws IOException {
assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid));
}
- @Test public void testToTaskAttemptID() {
+ @Test (timeout = 120000)
+ public void testToTaskAttemptID() {
TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_m_2_3");
assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
assertEquals(1, taid.getTaskId().getJobId().getAppId().getId());
@@ -147,11 +159,13 @@ private static void delete(File dir) throws IOException {
assertEquals(3, taid.getId());
}
- @Test(expected=IllegalArgumentException.class) public void testTaskAttemptIDShort() {
+ @Test(timeout = 120000, expected=IllegalArgumentException.class)
+ public void testTaskAttemptIDShort() {
MRApps.toTaskAttemptID("attempt_0_0_0_m_0");
}
- @Test public void testGetJobFileWithUser() {
+ @Test (timeout = 120000)
+ public void testGetJobFileWithUser() {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
String jobFile = MRApps.getJobFile(conf, "dummy-user",
@@ -161,49 +175,57 @@ private static void delete(File dir) throws IOException {
"/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);
}
- @Test public void testSetClasspath() throws IOException {
+ @Test (timeout = 120000)
+ public void testSetClasspath() throws IOException {
Job job = Job.getInstance();
Map environment = new HashMap();
MRApps.setClasspath(environment, job.getConfiguration());
- assertTrue(environment.get("CLASSPATH").startsWith("$PWD:"));
+ assertTrue(environment.get("CLASSPATH").startsWith(
+ ApplicationConstants.Environment.PWD.$() + File.pathSeparator));
String yarnAppClasspath =
job.getConfiguration().get(
YarnConfiguration.YARN_APPLICATION_CLASSPATH);
if (yarnAppClasspath != null) {
- yarnAppClasspath = yarnAppClasspath.replaceAll(",\\s*", ":").trim();
+ yarnAppClasspath = yarnAppClasspath.replaceAll(",\\s*", File.pathSeparator)
+ .trim();
}
assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
String mrAppClasspath =
job.getConfiguration().get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH);
if (mrAppClasspath != null) {
- mrAppClasspath = mrAppClasspath.replaceAll(",\\s*", ":").trim();
+ mrAppClasspath = mrAppClasspath.replaceAll(",\\s*", File.pathSeparator)
+ .trim();
}
assertTrue(environment.get("CLASSPATH").contains(mrAppClasspath));
}
- @Test public void testSetClasspathWithArchives () throws IOException {
+ @Test (timeout = 120000)
+ public void testSetClasspathWithArchives () throws IOException {
File testTGZ = new File(testWorkDir, "test.tgz");
FileOutputStream out = new FileOutputStream(testTGZ);
out.write(0);
out.close();
Job job = Job.getInstance();
Configuration conf = job.getConfiguration();
- conf.set(MRJobConfig.CLASSPATH_ARCHIVES, "file://"
- + testTGZ.getAbsolutePath());
- conf.set(MRJobConfig.CACHE_ARCHIVES, "file://"
- + testTGZ.getAbsolutePath() + "#testTGZ");
+ String testTGZQualifiedPath = FileSystem.getLocal(conf).makeQualified(new Path(
+ testTGZ.getAbsolutePath())).toString();
+ conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
+ conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
Map environment = new HashMap();
MRApps.setClasspath(environment, conf);
- assertTrue(environment.get("CLASSPATH").startsWith("$PWD:"));
+ assertTrue(environment.get("CLASSPATH").startsWith(
+ ApplicationConstants.Environment.PWD.$() + File.pathSeparator));
String confClasspath = job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
if (confClasspath != null) {
- confClasspath = confClasspath.replaceAll(",\\s*", ":").trim();
+ confClasspath = confClasspath.replaceAll(",\\s*", File.pathSeparator)
+ .trim();
}
assertTrue(environment.get("CLASSPATH").contains(confClasspath));
assertTrue(environment.get("CLASSPATH").contains("testTGZ"));
}
- @Test public void testSetClasspathWithUserPrecendence() {
+ @Test (timeout = 120000)
+ public void testSetClasspathWithUserPrecendence() {
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);
Map env = new HashMap();
@@ -213,11 +235,16 @@ private static void delete(File dir) throws IOException {
fail("Got exception while setting classpath");
}
String env_str = env.get("CLASSPATH");
- assertSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",
- env_str.indexOf("$PWD:job.jar/job.jar:job.jar/classes/:job.jar/lib/*:$PWD/*"), 0);
+ String expectedClasspath = StringUtils.join(File.pathSeparator,
+ Arrays.asList(ApplicationConstants.Environment.PWD.$(), "job.jar/job.jar",
+ "job.jar/classes/", "job.jar/lib/*",
+ ApplicationConstants.Environment.PWD.$() + "/*"));
+ assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",
+ env_str.startsWith(expectedClasspath));
}
- @Test public void testSetClasspathWithNoUserPrecendence() {
+ @Test (timeout = 120000)
+ public void testSetClasspathWithNoUserPrecendence() {
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
Map env = new HashMap();
@@ -227,31 +254,36 @@ private static void delete(File dir) throws IOException {
fail("Got exception while setting classpath");
}
String env_str = env.get("CLASSPATH");
- int index =
- env_str.indexOf("job.jar/job.jar:job.jar/classes/:job.jar/lib/*:$PWD/*");
- assertNotSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not"
- + " in the classpath!", index, -1);
- assertNotSame("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
- index, 0);
+ String expectedClasspath = StringUtils.join(File.pathSeparator,
+ Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
+ ApplicationConstants.Environment.PWD.$() + "/*"));
+ assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in"
+ + " the classpath!", env_str.contains(expectedClasspath));
+ assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
+ env_str.startsWith(expectedClasspath));
}
- @Test public void testSetClasspathWithJobClassloader() throws IOException {
+ @Test (timeout = 120000)
+ public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
Map env = new HashMap();
MRApps.setClasspath(env, conf);
String cp = env.get("CLASSPATH");
String appCp = env.get("APP_CLASSPATH");
- assertSame("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is"
- + " in the classpath!", cp.indexOf("jar:job"), -1);
- assertSame("MAPREDUCE_JOB_CLASSLOADER true, but PWD is"
- + " in the classpath!", cp.indexOf("PWD"), -1);
- assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not"
- + " in the app classpath!",
- "$PWD:job.jar/job.jar:job.jar/classes/:job.jar/lib/*:$PWD/*", appCp);
+ assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the"
+ + " classpath!", cp.contains("jar" + File.pathSeparator + "job"));
+ assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",
+ cp.contains("PWD"));
+ String expectedAppClasspath = StringUtils.join(File.pathSeparator,
+ Arrays.asList(ApplicationConstants.Environment.PWD.$(), "job.jar/job.jar",
+ "job.jar/classes/", "job.jar/lib/*",
+ ApplicationConstants.Environment.PWD.$() + "/*"));
+ assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
+ + " classpath!", expectedAppClasspath, appCp);
}
- @Test
+ @Test (timeout = 30000)
public void testSetupDistributedCacheEmpty() throws IOException {
Configuration conf = new Configuration();
Map localResources = new HashMap();
@@ -261,7 +293,7 @@ public void testSetupDistributedCacheEmpty() throws IOException {
}
@SuppressWarnings("deprecation")
- @Test(expected = InvalidJobConfException.class)
+ @Test(timeout = 120000, expected = InvalidJobConfException.class)
public void testSetupDistributedCacheConflicts() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
@@ -292,7 +324,7 @@ public void testSetupDistributedCacheConflicts() throws Exception {
}
@SuppressWarnings("deprecation")
- @Test(expected = InvalidJobConfException.class)
+ @Test(timeout = 120000, expected = InvalidJobConfException.class)
public void testSetupDistributedCacheConflictsFiles() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
@@ -320,7 +352,7 @@ public void testSetupDistributedCacheConflictsFiles() throws Exception {
}
@SuppressWarnings("deprecation")
- @Test
+ @Test (timeout = 30000)
public void testSetupDistributedCache() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java
index 105d364621..99130ca331 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRClientClusterFactory.java
@@ -45,7 +45,7 @@ public static MiniMRClientCluster create(Class> caller, int noOfNMs,
FileSystem fs = FileSystem.get(conf);
- Path testRootDir = new Path("target", caller.getName() + "-tmpDir")
+ Path testRootDir = new Path("target", caller.getSimpleName() + "-tmpDir")
.makeQualified(fs);
Path appJar = new Path(testRootDir, "MRAppJar.jar");
@@ -66,9 +66,9 @@ public static MiniMRClientCluster create(Class> caller, int noOfNMs,
job.addFileToClassPath(remoteCallerJar);
MiniMRYarnCluster miniMRYarnCluster = new MiniMRYarnCluster(caller
- .getName(), noOfNMs);
+ .getSimpleName(), noOfNMs);
job.getConfiguration().set("minimrclientcluster.caller.name",
- caller.getName());
+ caller.getSimpleName());
job.getConfiguration().setInt("minimrclientcluster.nodemanagers.number",
noOfNMs);
miniMRYarnCluster.init(job.getConfiguration());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java
index 982d405221..bb4a2de801 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapProgress.java
@@ -61,8 +61,12 @@
*/
public class TestMapProgress extends TestCase {
public static final Log LOG = LogFactory.getLog(TestMapProgress.class);
- private static String TEST_ROOT_DIR = new File(System.getProperty(
- "test.build.data", "/tmp")).getAbsolutePath() + "/mapPahseprogress";
+ private static String TEST_ROOT_DIR;
+ static {
+ String root = new File(System.getProperty("test.build.data", "/tmp"))
+ .getAbsolutePath();
+ TEST_ROOT_DIR = new Path(root, "mapPhaseprogress").toString();
+ }
static class FakeUmbilical implements TaskUmbilicalProtocol {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index 4a30c3cfa6..348f3794ef 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -25,6 +25,8 @@
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.apache.commons.io.FileUtils;
@@ -103,6 +105,8 @@ public class TestMRJobs {
private static Path TEST_ROOT_DIR = new Path("target",
TestMRJobs.class.getName() + "-tmpDir").makeQualified(localFs);
static Path APP_JAR = new Path(TEST_ROOT_DIR, "MRAppJar.jar");
+ private static final String OUTPUT_ROOT_DIR = "/tmp/" +
+ TestMRJobs.class.getSimpleName();
@BeforeClass
public static void setup() throws IOException {
@@ -140,7 +144,7 @@ public static void tearDown() {
}
}
- @Test
+ @Test (timeout = 30000)
public void testSleepJob() throws IOException, InterruptedException,
ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJob().");
@@ -211,7 +215,7 @@ protected void verifyTaskProgress(Job job) throws InterruptedException,
}
}
- @Test
+ @Test (timeout = 30000)
public void testRandomWriter() throws IOException, InterruptedException,
ClassNotFoundException {
@@ -226,8 +230,7 @@ public void testRandomWriter() throws IOException, InterruptedException,
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
Job job = randomWriterJob.createJob(mrCluster.getConfig());
- Path outputDir =
- new Path(mrCluster.getTestWorkDir().getAbsolutePath(), "random-output");
+ Path outputDir = new Path(OUTPUT_ROOT_DIR, "random-output");
FileOutputFormat.setOutputPath(job, outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
@@ -274,7 +277,7 @@ protected void verifyRandomWriterCounters(Job job)
&& counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
- @Test
+ @Test (timeout = 30000)
public void testFailingMapper() throws IOException, InterruptedException,
ClassNotFoundException {
@@ -342,9 +345,8 @@ protected Job runFailingMapperJob()
job.setMapperClass(FailingMapper.class);
job.setNumReduceTasks(0);
- FileOutputFormat.setOutputPath(job,
- new Path(mrCluster.getTestWorkDir().getAbsolutePath(),
- "failmapper-output"));
+ FileOutputFormat.setOutputPath(job, new Path(OUTPUT_ROOT_DIR,
+ "failmapper-output"));
job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
job.submit();
String trackingUrl = job.getTrackingURL();
@@ -357,7 +359,7 @@ protected Job runFailingMapperJob()
return job;
}
- //@Test
+ //@Test (timeout = 30000)
public void testSleepJobWithSecurityOn() throws IOException,
InterruptedException, ClassNotFoundException {
@@ -425,14 +427,22 @@ public void setup(Context context) throws IOException {
Assert.assertEquals(2, archives.length);
// Check lengths of the files
- Assert.assertEquals(1, localFs.getFileStatus(files[1]).getLen());
- Assert.assertTrue(localFs.getFileStatus(files[2]).getLen() > 1);
+ Map filesMap = pathsToMap(files);
+ Assert.assertTrue(filesMap.containsKey("distributed.first.symlink"));
+ Assert.assertEquals(1, localFs.getFileStatus(
+ filesMap.get("distributed.first.symlink")).getLen());
+ Assert.assertTrue(filesMap.containsKey("distributed.second.jar"));
+ Assert.assertTrue(localFs.getFileStatus(
+ filesMap.get("distributed.second.jar")).getLen() > 1);
// Check extraction of the archive
- Assert.assertTrue(localFs.exists(new Path(archives[0],
- "distributed.jar.inside3")));
- Assert.assertTrue(localFs.exists(new Path(archives[1],
- "distributed.jar.inside4")));
+ Map archivesMap = pathsToMap(archives);
+ Assert.assertTrue(archivesMap.containsKey("distributed.third.jar"));
+ Assert.assertTrue(localFs.exists(new Path(
+ archivesMap.get("distributed.third.jar"), "distributed.jar.inside3")));
+ Assert.assertTrue(archivesMap.containsKey("distributed.fourth.jar"));
+ Assert.assertTrue(localFs.exists(new Path(
+ archivesMap.get("distributed.fourth.jar"), "distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
@@ -460,6 +470,23 @@ public void setup(Context context) throws IOException {
Assert.assertTrue(FileUtils.isSymlink(jobJarDir));
Assert.assertTrue(jobJarDir.isDirectory());
}
+
+ /**
+ * Returns a mapping of the final component of each path to the corresponding
+ * Path instance. This assumes that every given Path has a unique string in
+ * the final path component, which is true for these tests.
+ *
+ * @param paths Path[] to map
+ * @return Map mapping the final component of each path to the
+ * corresponding Path instance
+ */
+ private static Map pathsToMap(Path[] paths) {
+ Map map = new HashMap();
+ for (Path path: paths) {
+ map.put(path.getName(), path);
+ }
+ return map;
+ }
}
public void _testDistributedCache(String jobJarPath) throws Exception {
@@ -515,7 +542,7 @@ public void _testDistributedCache(String jobJarPath) throws Exception {
trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
}
- @Test
+ @Test (timeout = 30000)
public void testDistributedCache() throws Exception {
// Test with a local (file:///) Job Jar
Path localJobJarPath = makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
index 0808eed922..fc842b0493 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
@@ -20,6 +20,7 @@
import java.io.File;
import java.io.IOException;
+import java.util.EnumSet;
import java.util.List;
import junit.framework.Assert;
@@ -58,6 +59,9 @@ public class TestMRJobsWithHistoryService {
private static final Log LOG =
LogFactory.getLog(TestMRJobsWithHistoryService.class);
+ private static final EnumSet TERMINAL_RM_APP_STATES =
+ EnumSet.of(RMAppState.FINISHED, RMAppState.FAILED, RMAppState.KILLED);
+
private static MiniMRYarnCluster mrCluster;
private static Configuration conf = new Configuration();
@@ -108,7 +112,7 @@ public void tearDown() {
}
}
- @Test
+ @Test (timeout = 30000)
public void testJobHistoryData() throws IOException, InterruptedException,
AvroRemoteException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
@@ -129,12 +133,24 @@ public void testJobHistoryData() throws IOException, InterruptedException,
Counters counterMR = job.getCounters();
JobId jobId = TypeConverter.toYarn(job.getJobID());
ApplicationId appID = jobId.getAppId();
+ int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
- if (mrCluster.getResourceManager().getRMContext().getRMApps()
- .get(appID).getState().equals(RMAppState.FINISHED))
+ pollElapsed += 1000;
+
+ if (TERMINAL_RM_APP_STATES.contains(
+ mrCluster.getResourceManager().getRMContext().getRMApps().get(appID)
+ .getState())) {
break;
+ }
+
+ if (pollElapsed >= 60000) {
+ LOG.warn("application did not reach terminal state within 60 seconds");
+ break;
+ }
}
+ Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager()
+ .getRMContext().getRMApps().get(appID).getState());
Counters counterHS = job.getCounters();
//TODO the Assert below worked. need to check
//Should we compare each field or convert to V2 counter and compare
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testshell/ExternalMapReduce.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testshell/ExternalMapReduce.java
index 5b8647e6f9..45fb071e29 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testshell/ExternalMapReduce.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testshell/ExternalMapReduce.java
@@ -72,7 +72,7 @@ public void map(WritableComparable key, Writable value,
}
//fork off ls to see if the file exists.
// java file.exists() will not work on
- // cygwin since it is a symlink
+ // Windows since it is a symlink
String[] argv = new String[7];
argv[0] = "ls";
argv[1] = "files_tmp";
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
index d3b181b69b..f0840841fb 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
@@ -69,8 +69,10 @@ public void close() throws Exception {
}
if (manageOsCache && getEndOffset() - getStartOffset() > 0) {
try {
- NativeIO.posixFadviseIfPossible(fd, getStartOffset(), getEndOffset()
- - getStartOffset(), NativeIO.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.posixFadviseIfPossible(
+ fd,
+ getStartOffset(), getEndOffset() - getStartOffset(),
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
} catch (Throwable t) {
LOG.warn("Failed to manage OS cache for " + identifier, t);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
index 6ccbe251f8..9bb3fb0180 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
@@ -71,8 +71,9 @@ public void releaseExternalResources() {
}
if (manageOsCache && getCount() > 0) {
try {
- NativeIO.posixFadviseIfPossible(fd, getPosition(), getCount(),
- NativeIO.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.posixFadviseIfPossible(
+ fd, getPosition(), getCount(),
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
} catch (Throwable t) {
LOG.warn("Failed to manage OS cache for " + identifier, t);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index b3846c90b7..d95c27e356 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -176,6 +176,10 @@
org.apache.maven.pluginsmaven-surefire-plugin
+
+
+ ${basedir}/../../../hadoop-common-project/hadoop-common/target
+ listener
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index cbc90eebbd..568a8f6c30 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -182,15 +182,8 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
- cd $BUILD_DIR
- tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
+ cd "${project.build.directory}"
+ tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
@@ -217,6 +210,7 @@
.eclipse.templates/CHANGES.txt
+ CHANGES.branch-trunk-win.txtlib/jdiff/**
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index e732fb2bb4..342fbfec33 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -335,13 +335,7 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
+ BUILD_DIR="${project.build.directory}"
TAR='tar cf -'
UNTAR='tar xfBp -'
LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib"
@@ -355,6 +349,13 @@
$$TAR *snappy* | (cd $${TARGET_DIR}/; $$UNTAR)
fi
fi
+ BIN_DIR="${BUILD_DIR}/bin"
+ if [ -d $${BIN_DIR} ] ; then
+ TARGET_BIN_DIR="${BUILD_DIR}/${project.artifactId}-${project.version}/bin"
+ mkdir -p $${TARGET_BIN_DIR}
+ cd $${BIN_DIR}
+ $$TAR * | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
+ fi
@@ -372,15 +373,8 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
- cd ${BUILD_DIR}
- tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
+ cd "${project.build.directory}"
+ tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e859b80399..2cefe62976 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -810,6 +810,8 @@
900-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError
+
+ ${basedir}/../../hadoop-common-project/hadoop-common/target${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${basedir}/../../hadoop-common-project/hadoop-common/target/native/target/usr/local/lib/4
@@ -883,6 +885,28 @@
Mac_OS_X-${sun.arch.data.model}
+
+ native-win
+
+
+ Windows
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+
+ ${env.PATH};${basedir}/../../hadoop-common-project/hadoop-common/target/bin
+
+
+
+
+
+ test-patch
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java
index ba8cf090d3..823433c4c0 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingTaskLog.java
@@ -83,7 +83,7 @@ String[] genArgs() {
* (b) hadoop.tasklog.totalLogFileSize
* for the children of java tasks in streaming jobs.
*/
- @Test
+ @Test (timeout = 30000)
public void testStreamingTaskLogWithHadoopCmd() {
try {
final int numSlaves = 1;
@@ -124,8 +124,8 @@ private File createScript(String script) throws IOException {
"echo $HADOOP_ROOT_LOGGER $HADOOP_CLIENT_OPTS").getBytes());
in.close();
- Shell.execCommand(new String[]{"chmod", "+x",
- scriptFile.getAbsolutePath()});
+ Shell.execCommand(Shell.getSetPermissionCommand("+x", false,
+ scriptFile.getAbsolutePath()));
return scriptFile;
}
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java
index 2c9547ad82..dba676a32d 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestSymLink.java
@@ -53,7 +53,7 @@ public class TestSymLink
String cacheString = "This is just the cache string";
StreamJob job;
- @Test
+ @Test (timeout = 60000)
public void testSymLink() throws Exception
{
boolean mayExit = false;
diff --git a/hadoop-yarn-project/CHANGES.branch-trunk-win.txt b/hadoop-yarn-project/CHANGES.branch-trunk-win.txt
new file mode 100644
index 0000000000..7316926d4d
--- /dev/null
+++ b/hadoop-yarn-project/CHANGES.branch-trunk-win.txt
@@ -0,0 +1,29 @@
+branch-trunk-win changes - unreleased
+
+ YARN-158. Yarn creating package-info.java must not depend on sh.
+ (Chris Nauroth via suresh)
+
+ YARN-176. Some YARN tests fail to find winutils. (Chris Nauroth via suresh)
+
+ YARN-207. YARN distribution build fails on Windows. (Chris Nauroth via
+ suresh)
+
+ YARN-199. Yarn cmd line scripts for windows. (Ivan Mitic via suresh)
+
+ YARN-213. YARN build script would be more readable using abspath.
+ (Chris Nauroth via suresh)
+
+ YARN-233. Added support for running containers in MS Windows to YARN. (Chris
+ Nauroth via acmurthy)
+
+ YARN-234. Added support for process tree and resource calculator in MS Windows
+ to YARN. (Chris Nauroth via acmurthy)
+
+ YARN-259. Fix LocalDirsHandlerService to use Path rather than URIs. (Xuan
+ Gong via acmurthy)
+
+ YARN-316. YARN container launch may exceed maximum Windows command line
+ length due to long classpath. (Chris Nauroth via suresh)
+
+ YARN-359. Fixing commands for container signalling in Windows. (Chris Nauroth
+ via vinodkv)
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.cmd
new file mode 100644
index 0000000000..989510b5e3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo starting yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem start resourceManager
+start "Apache Hadoop Distribution" yarn resourcemanager
+@rem start nodeManager
+start "Apache Hadoop Distribution" yarn nodemanager
+@rem start proxyserver
+@rem start "Apache Hadoop Distribution" yarn proxyserver
+
+endlocal
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.cmd
new file mode 100644
index 0000000000..09143379dd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/bin/stop-yarn.cmd
@@ -0,0 +1,47 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+setlocal enabledelayedexpansion
+
+echo stopping yarn daemons
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %HADOOP_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+@rem stop resourceManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn resourcemanager"
+@rem stop nodeManager
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn nodemanager"
+@rem stop proxy server
+Taskkill /FI "WINDOWTITLE eq Apache Hadoop Distribution - yarn proxyserver"
+
+endlocal
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 4694c4d30b..e0e3b09628 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -72,11 +72,6 @@ function print_usage(){
echo "Most commands print help when invoked w/o parameters."
}
-cygwin=false
-case "`uname`" in
-CYGWIN*) cygwin=true;;
-esac
-
# if no args specified, show usage
if [ $# = 0 ]; then
print_usage
@@ -177,9 +172,6 @@ unset IFS
# figure out which class to run
if [ "$COMMAND" = "classpath" ] ; then
- if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
- fi
echo $CLASSPATH
exit
elif [ "$COMMAND" = "rmadmin" ] ; then
@@ -227,19 +219,6 @@ else
CLASS=$COMMAND
fi
-# cygwin path translation
-if $cygwin; then
- CLASSPATH=`cygpath -p -w "$CLASSPATH"`
- HADOOP_YARN_HOME=`cygpath -w "$HADOOP_YARN_HOME"`
- YARN_LOG_DIR=`cygpath -w "$YARN_LOG_DIR"`
- TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
-fi
-
-# cygwin path translation
-if $cygwin; then
- JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
-fi
-
YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.cmd
new file mode 100644
index 0000000000..41c143424b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.cmd
@@ -0,0 +1,72 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem included in all the hdfs scripts with source command
+@rem should not be executed directly
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+if exist %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd (
+ call %HADOOP_LIBEXEC_DIR%\hadoop-config.cmd %*
+) else if exist %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_COMMON_HOME%\libexec\hadoop-config.cmd %*
+) else if exist %HADOOP_HOME%\libexec\hadoop-config.cmd (
+ call %HADOOP_HOME%\libexec\hadoop-config.cmd %*
+) else (
+ echo Hadoop common not found.
+)
+
+@rem
+@rem Allow alternate conf dir location.
+@rem
+
+if "%1" == "--config" (
+ shift
+ set YARN_CONF_DIR=%2
+ shift
+)
+
+if not defined YARN_CONF_DIR (
+ if not defined HADOOP_CONF_DIR (
+ set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+ ) else (
+ set YARN_CONF_DIR=%HADOOP_CONF_DIR%
+ )
+)
+
+@rem
+@rem check to see it is specified whether to use the slaves or the
+@rem masters file
+@rem
+
+if "%1" == "--hosts" (
+ set YARN_SLAVES=%YARN_CONF_DIR%\%2
+ shift
+ shift
+)
+
+:eof
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
new file mode 100644
index 0000000000..031cce7bd6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -0,0 +1,250 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem The Hadoop command script
+@rem
+@rem Environment Variables
+@rem
+@rem JAVA_HOME The java implementation to use. Overrides JAVA_HOME.
+@rem
+@rem YARN_CLASSPATH Extra Java CLASSPATH entries.
+@rem
+@rem YARN_HEAPSIZE The maximum amount of heap to use, in MB.
+@rem Default is 1000.
+@rem
+@rem YARN_{COMMAND}_HEAPSIZE overrides YARN_HEAPSIZE for a given command
+@rem eg YARN_NODEMANAGER_HEAPSIZE sets the heap
+@rem size for the NodeManager. If you set the
+@rem heap size in YARN_{COMMAND}_OPTS or YARN_OPTS
+@rem they take precedence.
+@rem
+@rem YARN_OPTS Extra Java runtime options.
+@rem
+@rem YARN_CLIENT_OPTS when the respective command is run.
+@rem YARN_{COMMAND}_OPTS etc YARN_NODEMANAGER_OPTS applies to NodeManager
+@rem for e.g. YARN_CLIENT_OPTS applies to
+@rem more than one command (fs, dfs, fsck,
+@rem dfsadmin etc)
+@rem
+@rem YARN_CONF_DIR Alternate conf dir. Default is ${HADOOP_YARN_HOME}/conf.
+@rem
+@rem YARN_ROOT_LOGGER The root appender. Default is INFO,console
+@rem
+
+setlocal enabledelayedexpansion
+
+if not defined HADOOP_BIN_PATH (
+ set HADOOP_BIN_PATH=%~dp0
+)
+
+if "%HADOOP_BIN_PATH:~-1%" == "\" (
+ set HADOOP_BIN_PATH=%HADOOP_BIN_PATH:~0,-1%
+)
+
+set DEFAULT_LIBEXEC_DIR=%HADOOP_BIN_PATH%\..\libexec
+if not defined HADOOP_LIBEXEC_DIR (
+ set HADOOP_LIBEXEC_DIR=%DEFAULT_LIBEXEC_DIR%
+)
+
+call %DEFAULT_LIBEXEC_DIR%\yarn-config.cmd %*
+if "%1" == "--config" (
+ shift
+ shift
+)
+
+:main
+ if exist %YARN_CONF_DIR%\yarn-env.cmd (
+ call %YARN_CONF_DIR%\yarn-env.cmd
+ )
+
+ set yarn-command=%1
+ call :make_command_arguments %*
+
+ if not defined yarn-command (
+ goto print_usage
+ )
+
+ @rem JAVA and JAVA_HEAP_MAX and set in hadoop-config.cmd
+
+ if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+ )
+
+ @rem CLASSPATH initially contains HADOOP_CONF_DIR & YARN_CONF_DIR
+ if not defined HADOOP_CONF_DIR (
+ echo No HADOOP_CONF_DIR set.
+ echo Please specify it either in yarn-env.cmd or in the environment.
+ goto :eof
+ )
+
+ set CLASSPATH=%HADOOP_CONF_DIR%;%YARN_CONF_DIR%;%CLASSPATH%
+
+ @rem for developers, add Hadoop classes to CLASSPATH
+ if exist %HADOOP_YARN_HOME%\yarn-api\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-api\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-mapreduce\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-mapreduce\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-master-worker\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-master-worker\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-nodemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-common\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-resourcemanager\target\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\test\classes (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes
+ )
+
+ if exist %HADOOP_YARN_HOME%\build\tools (
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\tools
+ )
+
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_DIR%\*
+ set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
+
+ call :%yarn-command% %yarn-command-arguments%
+
+ set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments%
+ call %JAVA% %java_arguments%
+
+goto :eof
+
+:classpath
+ @echo %CLASSPATH%
+ goto :eof
+
+:rmadmin
+ set CLASS=org.apache.hadoop.yarn.server.resourcemanager.tools.RMAdmin
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:application
+ set CLASS=org.apache.hadoop.yarn.client.cli.ApplicationCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:node
+ set CLASS=org.apache.hadoop.yarn.client.cli.NodeCLI
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:resourcemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\rm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.resourcemanager.ResourceManager
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_RESOURCEMANAGER_OPTS%
+ if defined YARN_RESOURCEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_RESOURCEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:nodemanager
+ set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\nm-config\log4j.properties
+ set CLASS=org.apache.hadoop.yarn.server.nodemanager.NodeManager
+ set YARN_OPTS=%YARN_OPTS% -server %HADOOP_NODEMANAGER_OPTS%
+ if defined YARN_NODEMANAGER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_NODEMANAGER_HEAPSIZE%m
+ )
+ goto :eof
+
+:proxyserver
+ set CLASS=org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer
+ set YARN_OPTS=%YARN_OPTS% %HADOOP_PROXYSERVER_OPTS%
+ if defined YARN_PROXYSERVER_HEAPSIZE (
+ set JAVA_HEAP_MAX=-Xmx%YARN_PROXYSERVER_HEAPSIZE%m
+ )
+ goto :eof
+
+:version
+ set CLASS=org.apache.hadoop.util.VersionInfo
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:jar
+ set CLASS=org.apache.hadoop.util.RunJar
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:logs
+ set CLASS=org.apache.hadoop.yarn.logaggregation.LogDumper
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+:daemonlog
+ set CLASS=org.apache.hadoop.log.LogLevel
+ set YARN_OPTS=%YARN_OPTS% %YARN_CLIENT_OPTS%
+ goto :eof
+
+@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
+:make_command_arguments
+ if "%1" == "--config" (
+ shift
+ shift
+ )
+ if [%2] == [] goto :eof
+ shift
+ set _yarnarguments=
+ :MakeCmdArgsLoop
+ if [%1]==[] goto :EndLoop
+
+ if not defined _yarnarguments (
+ set _yarnarguments=%1
+ ) else (
+ set _yarnarguments=!_yarnarguments! %1
+ )
+ shift
+ goto :MakeCmdArgsLoop
+ :EndLoop
+ set yarn-command-arguments=%_yarnarguments%
+ goto :eof
+
+:print_usage
+ @echo Usage: yarn [--config confdir] COMMAND
+ @echo where COMMAND is one of:
+ @echo resourcemanager run the ResourceManager
+ @echo nodemanager run a nodemanager on each slave
+ @echo historyserver run job history servers as a standalone daemon
+ @echo rmadmin admin tools
+ @echo version print the version
+ @echo jar ^ run a jar file
+ @echo application prints application(s) report/kill application
+ @echo node prints node report(s)
+ @echo logs dump container logs
+ @echo classpath prints the class path needed to get the
+ @echo Hadoop jar and the required libraries
+ @echo daemonlog get/set the log level for each daemon
+ @echo or
+ @echo CLASSNAME run the class named CLASSNAME
+ @echo Most commands print help when invoked w/o parameters.
+
+endlocal
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.cmd b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.cmd
new file mode 100644
index 0000000000..3329f8fdc3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.cmd
@@ -0,0 +1,60 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements. See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License. You may obtain a copy of the License at
+@rem
+@rem http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem User for YARN daemons
+if not defined HADOOP_YARN_USER (
+ set HADOOP_YARN_USER=%yarn%
+)
+
+if not defined YARN_CONF_DIR (
+ set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+)
+
+if defined YARN_HEAPSIZE (
+ @rem echo run with Java heapsize %YARN_HEAPSIZE%
+ set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+)
+
+if not defined YARN_LOG_DIR (
+ set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
+)
+
+if not defined YARN_LOGFILE (
+ set YARN_LOGFILE=yarn.log
+)
+
+@rem default policy file for service-level authorization
+if not defined YARN_POLICYFILE (
+ set YARN_POLICYFILE=hadoop-policy.xml
+)
+
+if not defined YARN_ROOT_LOGGER (
+ set YARN_ROOT_LOGGER=INFO,console
+)
+
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
+if defined JAVA_LIBRARY_PATH (
+ set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE%
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index c86b3f9b86..4934b0c801 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Shell;
/**
* This is the API for the applications comprising of constants that YARN sets
@@ -192,7 +193,11 @@ public String toString() {
}
public String $() {
- return "$" + variable;
+ if (Shell.WINDOWS) {
+ return "%" + variable + "%";
+ } else {
+ return "$" + variable;
+ }
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index 1237a0d247..78eee7ea6b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -32,6 +32,28 @@
hadoop-yarn-applications-distributedshellhadoop-yarn-applications-unmanaged-am-launcher
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+
+ ${basedir}/../../../../hadoop-common-project/hadoop-common/target
+
+
+
+ listener
+ org.apache.hadoop.test.TimedOutTestsListener
+
+
+
+
+
+
+
clover
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 7123042d8c..9e4ed6d3fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -36,6 +36,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@@ -59,32 +60,30 @@ public class ProcfsBasedProcessTree extends ResourceCalculatorProcessTree {
public static final String PROCFS_STAT_FILE = "stat";
public static final String PROCFS_CMDLINE_FILE = "cmdline";
public static final long PAGE_SIZE;
+ public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
+
static {
- ShellCommandExecutor shellExecutor =
- new ShellCommandExecutor(new String[]{"getconf", "PAGESIZE"});
+ long jiffiesPerSecond = -1;
long pageSize = -1;
try {
- shellExecutor.execute();
- pageSize = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- } finally {
- PAGE_SIZE = pageSize;
- }
- }
- public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
- static {
- ShellCommandExecutor shellExecutor =
- new ShellCommandExecutor(new String[]{"getconf", "CLK_TCK"});
- long jiffiesPerSecond = -1;
- try {
- shellExecutor.execute();
- jiffiesPerSecond = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
+ if(Shell.LINUX) {
+ ShellCommandExecutor shellExecutorClk = new ShellCommandExecutor(
+ new String[] { "getconf", "CLK_TCK" });
+ shellExecutorClk.execute();
+ jiffiesPerSecond = Long.parseLong(shellExecutorClk.getOutput().replace("\n", ""));
+
+ ShellCommandExecutor shellExecutorPage = new ShellCommandExecutor(
+ new String[] { "getconf", "PAGESIZE" });
+ shellExecutorPage.execute();
+ pageSize = Long.parseLong(shellExecutorPage.getOutput().replace("\n", ""));
+
+ }
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
} finally {
JIFFY_LENGTH_IN_MILLIS = jiffiesPerSecond != -1 ?
Math.round(1000D / jiffiesPerSecond) : -1;
+ PAGE_SIZE = pageSize;
}
}
@@ -126,8 +125,7 @@ public ProcfsBasedProcessTree(String pid, String procfsDir) {
*/
public static boolean isAvailable() {
try {
- String osName = System.getProperty("os.name");
- if (!osName.startsWith("Linux")) {
+ if (!Shell.LINUX) {
LOG.info("ProcfsBasedProcessTree currently is supported only on "
+ "Linux.");
return false;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index 2e43812494..a3f27969c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Shell;
/**
* Plugin to calculate resource information on the system.
@@ -31,6 +32,18 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class ResourceCalculatorPlugin extends Configured {
+
+ protected String processPid = null;
+
+ /**
+ * set the pid of the process for which getProcResourceValues
+ * will be invoked
+ *
+ * @param pid
+ */
+ public void setProcessPid(String pid) {
+ processPid = pid;
+ }
/**
* Obtain the total size of the virtual memory present in the system.
@@ -109,10 +122,12 @@ public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
// No class given, try a os specific class
try {
- String osName = System.getProperty("os.name");
- if (osName.startsWith("Linux")) {
+ if (Shell.LINUX) {
return new LinuxResourceCalculatorPlugin();
}
+ if (Shell.WINDOWS) {
+ return new WindowsResourceCalculatorPlugin();
+ }
} catch (SecurityException se) {
// Failed to get Operating System name.
return null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
index 2ecc1ce251..3606c453f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorProcessTree.java
@@ -145,14 +145,11 @@ public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree(
}
// No class given, try a os specific class
- try {
- String osName = System.getProperty("os.name");
- if (osName.startsWith("Linux")) {
- return new ProcfsBasedProcessTree(pid);
- }
- } catch (SecurityException se) {
- // Failed to get Operating System name.
- return null;
+ if (ProcfsBasedProcessTree.isAvailable()) {
+ return new ProcfsBasedProcessTree(pid);
+ }
+ if (WindowsBasedProcessTree.isAvailable()) {
+ return new WindowsBasedProcessTree(pid);
}
// Not supported on this system.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
new file mode 100644
index 0000000000..da179ffb9b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsBasedProcessTree.java
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+
+
+public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
+
+ static final Log LOG = LogFactory
+ .getLog(WindowsBasedProcessTree.class);
+
+ static class ProcessInfo {
+ String pid; // process pid
+ long vmem; // virtual memory
+ long workingSet; // working set, RAM used
+ long cpuTimeMs; // total cpuTime in millisec
+ long cpuTimeMsDelta; // delta of cpuTime since last update
+ int age = 1;
+ }
+
+ private String taskProcessId = null;
+ private long cpuTimeMs = 0;
+ private Map processTree =
+ new HashMap();
+
+ public static boolean isAvailable() {
+ if (Shell.WINDOWS) {
+ ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
+ new String[] { Shell.WINUTILS, "help" });
+ try {
+ shellExecutor.execute();
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ } finally {
+ String output = shellExecutor.getOutput();
+ if (output != null &&
+ output.contains("Prints to stdout a list of processes in the task")) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public WindowsBasedProcessTree(String pid) {
+ super(pid);
+ taskProcessId = pid;
+ }
+
+ // helper method to override while testing
+ String getAllProcessInfoFromShell() {
+ ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
+ new String[] { Shell.WINUTILS, "task", "processList", taskProcessId });
+ try {
+ shellExecutor.execute();
+ return shellExecutor.getOutput();
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return null;
+ }
+
+ /**
+ * Parses string of process info lines into ProcessInfo objects
+ * @param processesInfoStr
+ * @return Map of pid string to ProcessInfo objects
+ */
+ Map createProcessInfo(String processesInfoStr) {
+ String[] processesStr = processesInfoStr.split("\r\n");
+ Map allProcs = new HashMap();
+ final int procInfoSplitCount = 4;
+ for (String processStr : processesStr) {
+ if (processStr != null) {
+ String[] procInfo = processStr.split(",");
+ if (procInfo.length == procInfoSplitCount) {
+ try {
+ ProcessInfo pInfo = new ProcessInfo();
+ pInfo.pid = procInfo[0];
+ pInfo.vmem = Long.parseLong(procInfo[1]);
+ pInfo.workingSet = Long.parseLong(procInfo[2]);
+ pInfo.cpuTimeMs = Long.parseLong(procInfo[3]);
+ allProcs.put(pInfo.pid, pInfo);
+ } catch (NumberFormatException nfe) {
+ LOG.debug("Error parsing procInfo." + nfe);
+ }
+ } else {
+ LOG.debug("Expected split length of proc info to be "
+ + procInfoSplitCount + ". Got " + procInfo.length);
+ }
+ }
+ }
+ return allProcs;
+ }
+
+ @Override
+ public void updateProcessTree() {
+ if(taskProcessId != null) {
+ // taskProcessId can be null in some tests
+ String processesInfoStr = getAllProcessInfoFromShell();
+ if (processesInfoStr != null && processesInfoStr.length() > 0) {
+ Map allProcessInfo = createProcessInfo(processesInfoStr);
+
+ for (Map.Entry entry : allProcessInfo.entrySet()) {
+ String pid = entry.getKey();
+ ProcessInfo pInfo = entry.getValue();
+ ProcessInfo oldInfo = processTree.get(pid);
+ if (oldInfo != null) {
+ // existing process, update age and replace value
+ pInfo.age += oldInfo.age;
+ // calculate the delta since the last refresh. totals are being kept
+ // in the WindowsBasedProcessTree object
+ pInfo.cpuTimeMsDelta = pInfo.cpuTimeMs - oldInfo.cpuTimeMs;
+ } else {
+ // new process. delta cpu == total cpu
+ pInfo.cpuTimeMsDelta = pInfo.cpuTimeMs;
+ }
+ }
+ processTree.clear();
+ processTree = allProcessInfo;
+ } else {
+ // clearing process tree to mimic semantics of existing Procfs impl
+ processTree.clear();
+ }
+ }
+ }
+
+ @Override
+ public boolean checkPidPgrpidForMatch() {
+ // This is always true on Windows, because the pid doubles as a job object
+ // name for task management.
+ return true;
+ }
+
+ @Override
+ public String getProcessTreeDump() {
+ StringBuilder ret = new StringBuilder();
+ // The header.
+ ret.append(String.format("\t|- PID " + "CPU_TIME(MILLIS) "
+ + "VMEM(BYTES) WORKING_SET(BYTES)\n"));
+ for (ProcessInfo p : processTree.values()) {
+ if (p != null) {
+ ret.append(String.format("\t|- %s %d %d %d\n", p.pid,
+ p.cpuTimeMs, p.vmem, p.workingSet));
+ }
+ }
+ return ret.toString();
+ }
+
+ @Override
+ public long getCumulativeVmem(int olderThanAge) {
+ long total = 0;
+ for (ProcessInfo p : processTree.values()) {
+ if ((p != null) && (p.age > olderThanAge)) {
+ total += p.vmem;
+ }
+ }
+ return total;
+ }
+
+ @Override
+ public long getCumulativeRssmem(int olderThanAge) {
+ long total = 0;
+ for (ProcessInfo p : processTree.values()) {
+ if ((p != null) && (p.age > olderThanAge)) {
+ total += p.workingSet;
+ }
+ }
+ return total;
+ }
+
+ @Override
+ public long getCumulativeCpuTime() {
+ for (ProcessInfo p : processTree.values()) {
+ cpuTimeMs += p.cpuTimeMsDelta;
+ }
+ return cpuTimeMs;
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
new file mode 100644
index 0000000000..53c8cc9647
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/WindowsResourceCalculatorPlugin.java
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+
+public class WindowsResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+ static final Log LOG = LogFactory
+ .getLog(WindowsResourceCalculatorPlugin.class);
+
+ long vmemSize;
+ long memSize;
+ long vmemAvailable;
+ long memAvailable;
+ int numProcessors;
+ long cpuFrequencyKhz;
+ long cumulativeCpuTimeMs;
+ float cpuUsage;
+
+ long lastRefreshTime;
+ private final int refreshIntervalMs = 1000;
+
+ WindowsBasedProcessTree pTree = null;
+
+ public WindowsResourceCalculatorPlugin() {
+ lastRefreshTime = 0;
+ reset();
+ }
+
+ void reset() {
+ vmemSize = -1;
+ memSize = -1;
+ vmemAvailable = -1;
+ memAvailable = -1;
+ numProcessors = -1;
+ cpuFrequencyKhz = -1;
+ cumulativeCpuTimeMs = -1;
+ cpuUsage = -1;
+ }
+
+ String getSystemInfoInfoFromShell() {
+ ShellCommandExecutor shellExecutor = new ShellCommandExecutor(
+ new String[] { Shell.WINUTILS, "systeminfo" });
+ try {
+ shellExecutor.execute();
+ return shellExecutor.getOutput();
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return null;
+ }
+
+ void refreshIfNeeded() {
+ long now = System.currentTimeMillis();
+ if (now - lastRefreshTime > refreshIntervalMs) {
+ long refreshInterval = now - lastRefreshTime;
+ lastRefreshTime = now;
+ long lastCumCpuTimeMs = cumulativeCpuTimeMs;
+ reset();
+ String sysInfoStr = getSystemInfoInfoFromShell();
+ if (sysInfoStr != null) {
+ final int sysInfoSplitCount = 7;
+ String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
+ .split(",");
+ if (sysInfo.length == sysInfoSplitCount) {
+ try {
+ vmemSize = Long.parseLong(sysInfo[0]);
+ memSize = Long.parseLong(sysInfo[1]);
+ vmemAvailable = Long.parseLong(sysInfo[2]);
+ memAvailable = Long.parseLong(sysInfo[3]);
+ numProcessors = Integer.parseInt(sysInfo[4]);
+ cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
+ cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
+ if (lastCumCpuTimeMs != -1) {
+ cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
+ / (refreshInterval * 1.0f);
+ }
+
+ } catch (NumberFormatException nfe) {
+ LOG.warn("Error parsing sysInfo." + nfe);
+ }
+ } else {
+ LOG.warn("Expected split length of sysInfo to be "
+ + sysInfoSplitCount + ". Got " + sysInfo.length);
+ }
+ }
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getVirtualMemorySize() {
+ refreshIfNeeded();
+ return vmemSize;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getPhysicalMemorySize() {
+ refreshIfNeeded();
+ return memSize;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ refreshIfNeeded();
+ return vmemAvailable;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ refreshIfNeeded();
+ return memAvailable;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public int getNumProcessors() {
+ refreshIfNeeded();
+ return numProcessors;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCpuFrequency() {
+ refreshIfNeeded();
+ return -1;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public long getCumulativeCpuTime() {
+ refreshIfNeeded();
+ return cumulativeCpuTimeMs;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public float getCpuUsage() {
+ refreshIfNeeded();
+ return cpuUsage;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 528e03e4ea..db3fd29558 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
@@ -104,17 +105,21 @@ public void setup() throws IOException {
new Path(TEST_ROOT_DIR.getAbsolutePath()), true);
}
- @Test
+ @Test (timeout = 30000)
public void testProcessTree() throws Exception {
+ if (!Shell.LINUX) {
+ System.out
+ .println("ProcfsBasedProcessTree is not available on this system. Not testing");
+ return;
+
+ }
try {
- if (!ProcfsBasedProcessTree.isAvailable()) {
- System.out
- .println("ProcfsBasedProcessTree is not available on this system. Not testing");
- return;
- }
+ Assert.assertTrue(ProcfsBasedProcessTree.isAvailable());
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
+ Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",
+ false);
return;
}
// create shell script
@@ -328,7 +333,7 @@ public String getStatLine() {
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
- @Test
+ @Test (timeout = 30000)
public void testCpuAndMemoryForProcessTree() throws IOException {
// test processes
@@ -402,7 +407,7 @@ public void testCpuAndMemoryForProcessTree() throws IOException {
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
- @Test
+ @Test (timeout = 30000)
public void testMemForOlderProcesses() throws IOException {
// initial list of processes
String[] pids = { "100", "200", "300", "400" };
@@ -509,7 +514,7 @@ public void testMemForOlderProcesses() throws IOException {
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
- @Test
+ @Test (timeout = 30000)
public void testDestroyProcessTree() throws IOException {
// test process
String pid = "100";
@@ -535,7 +540,7 @@ public void testDestroyProcessTree() throws IOException {
*
* @throws IOException
*/
- @Test
+ @Test (timeout = 30000)
public void testProcessTreeDump()
throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
new file mode 100644
index 0000000000..ef1ee39c49
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsBasedProcessTree.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.Shell;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+
+public class TestWindowsBasedProcessTree extends TestCase {
+ private static final Log LOG = LogFactory
+ .getLog(TestWindowsBasedProcessTree.class);
+
+ class WindowsBasedProcessTreeTester extends WindowsBasedProcessTree {
+ String infoStr = null;
+ public WindowsBasedProcessTreeTester(String pid) {
+ super(pid);
+ }
+ @Override
+ String getAllProcessInfoFromShell() {
+ return infoStr;
+ }
+ }
+
+ @Test (timeout = 30000)
+ public void testTree() {
+ if( !Shell.WINDOWS) {
+ LOG.info("Platform not Windows. Not testing");
+ return;
+ }
+ assertTrue("WindowsBasedProcessTree should be available on Windows",
+ WindowsBasedProcessTree.isAvailable());
+
+
+ WindowsBasedProcessTreeTester pTree = new WindowsBasedProcessTreeTester("-1");
+ pTree.infoStr = "3524,1024,1024,500\r\n2844,1024,1024,500\r\n";
+ pTree.updateProcessTree();
+ assertTrue(pTree.getCumulativeVmem() == 2048);
+ assertTrue(pTree.getCumulativeVmem(0) == 2048);
+ assertTrue(pTree.getCumulativeRssmem() == 2048);
+ assertTrue(pTree.getCumulativeRssmem(0) == 2048);
+ assertTrue(pTree.getCumulativeCpuTime() == 1000);
+
+ pTree.infoStr = "3524,1024,1024,1000\r\n2844,1024,1024,1000\r\n1234,1024,1024,1000\r\n";
+ pTree.updateProcessTree();
+ assertTrue(pTree.getCumulativeVmem() == 3072);
+ assertTrue(pTree.getCumulativeVmem(1) == 2048);
+ assertTrue(pTree.getCumulativeRssmem() == 3072);
+ assertTrue(pTree.getCumulativeRssmem(1) == 2048);
+ assertTrue(pTree.getCumulativeCpuTime() == 3000);
+
+ pTree.infoStr = "3524,1024,1024,1500\r\n2844,1024,1024,1500\r\n";
+ pTree.updateProcessTree();
+ assertTrue(pTree.getCumulativeVmem() == 2048);
+ assertTrue(pTree.getCumulativeVmem(2) == 2048);
+ assertTrue(pTree.getCumulativeRssmem() == 2048);
+ assertTrue(pTree.getCumulativeRssmem(2) == 2048);
+ assertTrue(pTree.getCumulativeCpuTime() == 4000);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java
new file mode 100644
index 0000000000..70dde32318
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestWindowsResourceCalculatorPlugin.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import junit.framework.TestCase;
+import org.junit.Test;
+
+public class TestWindowsResourceCalculatorPlugin extends TestCase {
+
+
+ class WindowsResourceCalculatorPluginTester extends WindowsResourceCalculatorPlugin {
+ private String infoStr = null;
+ @Override
+ String getSystemInfoInfoFromShell() {
+ return infoStr;
+ }
+ }
+
+ @Test (timeout = 30000)
+ public void testParseSystemInfoString() {
+ WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
+ // info str derived from windows shell command has \r\n termination
+ tester.infoStr = "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
+ // call a method to refresh values
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has been refreshed
+ assertTrue(tester.vmemSize == 17177038848L);
+ assertTrue(tester.memSize == 8589467648L);
+ assertTrue(tester.vmemAvailable == 15232745472L);
+ assertTrue(tester.memAvailable == 6400417792L);
+ assertTrue(tester.numProcessors == 1);
+ assertTrue(tester.cpuFrequencyKhz == 2805000L);
+ assertTrue(tester.cumulativeCpuTimeMs == 6261812L);
+ assertTrue(tester.cpuUsage == -1);
+ }
+
+ @Test (timeout = 20000)
+ public void testRefreshAndCpuUsage() throws InterruptedException {
+ WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
+ // info str derived from windows shell command has \r\n termination
+ tester.infoStr = "17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has been refreshed
+ assertTrue(tester.memAvailable == 6400417792L);
+ assertTrue(tester.cpuUsage == -1);
+
+ tester.infoStr = "17177038848,8589467648,15232745472,5400417792,1,2805000,6261812\r\n";
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has not been refreshed
+ assertTrue(tester.memAvailable == 6400417792L);
+ assertTrue(tester.cpuUsage == -1);
+
+ Thread.sleep(1500);
+ tester.infoStr = "17177038848,8589467648,15232745472,5400417792,1,2805000,6286812\r\n";
+ tester.getAvailablePhysicalMemorySize();
+ // verify information has been refreshed
+ assertTrue(tester.memAvailable == 5400417792L);
+ assertTrue(tester.cpuUsage >= 0.1);
+ }
+
+ @Test (timeout = 20000)
+ public void testErrorInGetSystemInfo() {
+ WindowsResourceCalculatorPluginTester tester = new WindowsResourceCalculatorPluginTester();
+ // info str derived from windows shell command has \r\n termination
+ tester.infoStr = null;
+ // call a method to refresh values
+ tester.getAvailablePhysicalMemorySize();
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 9cffde1a65..31fe49c86f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader;
+import org.apache.hadoop.util.Shell;
public abstract class ContainerExecutor implements Configurable {
@@ -182,6 +183,33 @@ protected Path getPidFilePath(ContainerId containerId) {
readLock.unlock();
}
}
+
+ /** Return a command to execute the given command in OS shell.
+ * On Windows, the passed in groupId can be used to launch
+ * and associate the given groupId in a process group. On
+ * non-Windows, groupId is ignored. */
+ protected static String[] getRunCommand(String command,
+ String groupId) {
+ if (Shell.WINDOWS) {
+ return new String[] { Shell.WINUTILS, "task", "create", groupId,
+ "cmd /c " + command };
+ } else {
+ return new String[] { "bash", "-c", command };
+ }
+ }
+
+ /** Return a command for determining if process with specified pid is alive. */
+ protected static String[] getCheckProcessIsAliveCommand(String pid) {
+ return Shell.WINDOWS ?
+ new String[] { Shell.WINUTILS, "task", "isAlive", pid } :
+ new String[] { "kill", "-0", pid };
+ }
+
+ /** Return a command to send a signal to a given pid */
+ protected static String[] getSignalKillCommand(int code, String pid) {
+ return Shell.WINDOWS ? new String[] { Shell.WINUTILS, "task", "kill", pid } :
+ new String[] { "kill", "-" + code, pid };
+ }
/**
* Is the container still active?
@@ -253,6 +281,9 @@ public String getProcessId(ContainerId containerID) {
public static final boolean isSetsidAvailable = isSetsidSupported();
private static boolean isSetsidSupported() {
+ if (Shell.WINDOWS) {
+ return true;
+ }
ShellCommandExecutor shexec = null;
boolean setsidSupported = true;
try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index 27730f421b..cc3fc76697 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -37,6 +37,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -53,10 +55,9 @@ public class DefaultContainerExecutor extends ContainerExecutor {
private static final Log LOG = LogFactory
.getLog(DefaultContainerExecutor.class);
- private final FileContext lfs;
+ private static final int WIN_MAX_PATH = 260;
- private static final String WRAPPER_LAUNCH_SCRIPT =
- "default_container_executor.sh";
+ private final FileContext lfs;
public DefaultContainerExecutor() {
try {
@@ -145,15 +146,24 @@ public int launchContainer(Container container,
lfs.util().copy(nmPrivateTokensPath, tokenDst);
// Create new local launch wrapper script
- Path wrapperScriptDst = new Path(containerWorkDir, WRAPPER_LAUNCH_SCRIPT);
- DataOutputStream wrapperScriptOutStream =
- lfs.create(wrapperScriptDst,
- EnumSet.of(CREATE, OVERWRITE));
+ LocalWrapperScriptBuilder sb = Shell.WINDOWS ?
+ new WindowsLocalWrapperScriptBuilder(containerIdStr, containerWorkDir) :
+ new UnixLocalWrapperScriptBuilder(containerWorkDir);
+
+ // Fail fast if attempting to launch the wrapper script would fail due to
+ // Windows path length limitation.
+ if (Shell.WINDOWS &&
+ sb.getWrapperScriptPath().toString().length() > WIN_MAX_PATH) {
+ throw new IOException(String.format(
+ "Cannot launch container using script at path %s, because it exceeds " +
+ "the maximum supported path length of %d characters. Consider " +
+ "configuring shorter directories in %s.", sb.getWrapperScriptPath(),
+ WIN_MAX_PATH, YarnConfiguration.NM_LOCAL_DIRS));
+ }
Path pidFile = getPidFilePath(containerId);
if (pidFile != null) {
- writeLocalWrapperScript(wrapperScriptOutStream, launchDst.toUri()
- .getPath().toString(), pidFile.toString());
+ sb.writeLocalWrapperScript(launchDst, pidFile);
} else {
LOG.info("Container " + containerIdStr
+ " was marked as inactive. Returning terminated error");
@@ -166,12 +176,13 @@ public int launchContainer(Container container,
try {
lfs.setPermission(launchDst,
ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
- lfs.setPermission(wrapperScriptDst,
+ lfs.setPermission(sb.getWrapperScriptPath(),
ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);
// Setup command to run
- String[] command = {"bash",
- wrapperScriptDst.toUri().getPath().toString()};
+ String[] command = getRunCommand(sb.getWrapperScriptPath().toString(),
+ containerIdStr);
+
LOG.info("launchContainer: " + Arrays.toString(command));
shExec = new ShellCommandExecutor(
command,
@@ -202,28 +213,85 @@ public int launchContainer(Container container,
return 0;
}
- private void writeLocalWrapperScript(DataOutputStream out,
- String launchScriptDst, String pidFilePath) throws IOException {
- // We need to do a move as writing to a file is not atomic
- // Process reading a file being written to may get garbled data
- // hence write pid to tmp file first followed by a mv
- StringBuilder sb = new StringBuilder("#!/bin/bash\n\n");
- sb.append("echo $$ > " + pidFilePath + ".tmp\n");
- sb.append("/bin/mv -f " + pidFilePath + ".tmp " + pidFilePath + "\n");
- sb.append(ContainerExecutor.isSetsidAvailable? "exec setsid" : "exec");
- sb.append(" /bin/bash ");
- sb.append("\"");
- sb.append(launchScriptDst);
- sb.append("\"\n");
- PrintStream pout = null;
- try {
- pout = new PrintStream(out);
- pout.append(sb);
- } finally {
- if (out != null) {
- out.close();
+ private abstract class LocalWrapperScriptBuilder {
+
+ private final Path wrapperScriptPath;
+
+ public Path getWrapperScriptPath() {
+ return wrapperScriptPath;
+ }
+
+ public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException {
+ DataOutputStream out = null;
+ PrintStream pout = null;
+
+ try {
+ out = lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE));
+ pout = new PrintStream(out);
+ writeLocalWrapperScript(launchDst, pidFile, pout);
+ } finally {
+ IOUtils.cleanup(LOG, pout, out);
}
}
+
+ protected abstract void writeLocalWrapperScript(Path launchDst, Path pidFile,
+ PrintStream pout);
+
+ protected LocalWrapperScriptBuilder(Path wrapperScriptPath) {
+ this.wrapperScriptPath = wrapperScriptPath;
+ }
+ }
+
+ private final class UnixLocalWrapperScriptBuilder
+ extends LocalWrapperScriptBuilder {
+
+ public UnixLocalWrapperScriptBuilder(Path containerWorkDir) {
+ super(new Path(containerWorkDir, "default_container_executor.sh"));
+ }
+
+ @Override
+ public void writeLocalWrapperScript(Path launchDst, Path pidFile,
+ PrintStream pout) {
+
+ // We need to do a move as writing to a file is not atomic
+ // Process reading a file being written to may get garbled data
+ // hence write pid to tmp file first followed by a mv
+ pout.println("#!/bin/bash");
+ pout.println();
+ pout.println("echo $$ > " + pidFile.toString() + ".tmp");
+ pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile);
+ String exec = ContainerExecutor.isSetsidAvailable? "exec setsid" : "exec";
+ pout.println(exec + " /bin/bash -c \"" +
+ launchDst.toUri().getPath().toString() + "\"");
+ }
+ }
+
+ private final class WindowsLocalWrapperScriptBuilder
+ extends LocalWrapperScriptBuilder {
+
+ private final String containerIdStr;
+
+ public WindowsLocalWrapperScriptBuilder(String containerIdStr,
+ Path containerWorkDir) {
+
+ super(new Path(containerWorkDir, "default_container_executor.cmd"));
+ this.containerIdStr = containerIdStr;
+ }
+
+ @Override
+ public void writeLocalWrapperScript(Path launchDst, Path pidFile,
+ PrintStream pout) {
+
+ // On Windows, the pid is the container ID, so that it can also serve as
+ // the name of the job object created by winutils for task management.
+ // Write to temp file followed by atomic move.
+ String normalizedPidFile = new File(pidFile.toString()).getPath();
+ pout.println("@echo " + containerIdStr + " > " + normalizedPidFile +
+ ".tmp");
+ pout.println("@move /Y " + normalizedPidFile + ".tmp " +
+ normalizedPidFile);
+ pout.println("@call " + launchDst.toString());
+ }
}
@Override
@@ -234,17 +302,13 @@ public boolean signalContainer(String user, String pid, Signal signal)
: pid;
LOG.debug("Sending signal " + signal.getValue() + " to pid " + sigpid
+ " as user " + user);
- try {
- sendSignal(sigpid, Signal.NULL);
- } catch (ExitCodeException e) {
+ if (!containerIsAlive(sigpid)) {
return false;
}
try {
- sendSignal(sigpid, signal);
+ killContainer(sigpid, signal);
} catch (IOException e) {
- try {
- sendSignal(sigpid, Signal.NULL);
- } catch (IOException ignore) {
+ if (!containerIsAlive(sigpid)) {
return false;
}
throw e;
@@ -252,6 +316,24 @@ public boolean signalContainer(String user, String pid, Signal signal)
return true;
}
+ /**
+ * Returns true if the process with the specified pid is alive.
+ *
+ * @param pid String pid
+ * @return boolean true if the process is alive
+ */
+ private boolean containerIsAlive(String pid) throws IOException {
+ try {
+ new ShellCommandExecutor(getCheckProcessIsAliveCommand(pid)).execute();
+ // successful execution means process is alive
+ return true;
+ }
+ catch (ExitCodeException e) {
+ // failure (non-zero exit code) means process is not alive
+ return false;
+ }
+ }
+
/**
* Send a specified signal to the specified pid
*
@@ -259,11 +341,9 @@ public boolean signalContainer(String user, String pid, Signal signal)
* @param signal signal to send
* (for logging).
*/
- protected void sendSignal(String pid, Signal signal) throws IOException {
- ShellCommandExecutor shexec = null;
- String[] arg = { "kill", "-" + signal.getValue(), pid };
- shexec = new ShellCommandExecutor(arg);
- shexec.execute();
+ private void killContainer(String pid, Signal signal) throws IOException {
+ new ShellCommandExecutor(getSignalKillCommand(signal.getValue(), pid))
+ .execute();
}
@Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index 96a58dda19..517b365d06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.net.URI;
-import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
@@ -305,7 +304,7 @@ public static String[] validatePaths(String[] paths) {
ArrayList validPaths = new ArrayList();
for (int i = 0; i < paths.length; ++i) {
try {
- URI uriPath = new URI(paths[i]);
+ URI uriPath = (new Path(paths[i])).toUri();
if (uriPath.getScheme() == null
|| uriPath.getScheme().equals(FILE_SCHEME)) {
validPaths.add(uriPath.getPath());
@@ -316,7 +315,7 @@ public static String[] validatePaths(String[] paths) {
+ " is not a valid path. Path should be with " + FILE_SCHEME
+ " scheme or without scheme");
}
- } catch (URISyntaxException e) {
+ } catch (IllegalArgumentException e) {
LOG.warn(e.getMessage());
throw new YarnException(paths[i]
+ " is not a valid path. Path should be with " + FILE_SCHEME
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index b06788341f..e1d66bde40 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -23,6 +23,7 @@
import java.io.DataOutputStream;
import java.io.IOException;
+import java.io.File;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.ArrayList;
@@ -37,6 +38,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
@@ -69,7 +71,8 @@ public class ContainerLaunch implements Callable {
private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
- public static final String CONTAINER_SCRIPT = "launch_container.sh";
+ public static final String CONTAINER_SCRIPT = Shell.WINDOWS ?
+ "launch_container.cmd" : "launch_container.sh";
public static final String FINAL_CONTAINER_TOKENS_FILE = "container_tokens";
private static final String PID_FILE_NAME_FMT = "%s.pid";
@@ -130,7 +133,7 @@ public Integer call() {
for (String str : command) {
// TODO: Should we instead work via symlinks without this grammar?
newCmds.add(str.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR,
- containerLogDir.toUri().getPath()));
+ containerLogDir.toString()));
}
launchContext.setCommands(newCmds);
@@ -141,7 +144,7 @@ public Integer call() {
entry.setValue(
value.replace(
ApplicationConstants.LOG_DIR_EXPANSION_VAR,
- containerLogDir.toUri().getPath())
+ containerLogDir.toString())
);
}
// /////////////////////////// End of variable expansion
@@ -411,28 +414,17 @@ private String getAppPrivateDir(String appIdStr) {
+ appIdStr;
}
- private static class ShellScriptBuilder {
-
- private final StringBuilder sb;
-
- public ShellScriptBuilder() {
- this(new StringBuilder("#!/bin/bash\n\n"));
- }
-
- protected ShellScriptBuilder(StringBuilder sb) {
- this.sb = sb;
- }
-
- public ShellScriptBuilder env(String key, String value) {
- line("export ", key, "=\"", value, "\"");
- return this;
- }
-
- public ShellScriptBuilder symlink(Path src, String dst) throws IOException {
- return symlink(src, new Path(dst));
- }
-
- public ShellScriptBuilder symlink(Path src, Path dst) throws IOException {
+ private static abstract class ShellScriptBuilder {
+
+ private static final String LINE_SEPARATOR =
+ System.getProperty("line.separator");
+ private final StringBuilder sb = new StringBuilder();
+
+ public abstract void command(List command);
+
+ public abstract void env(String key, String value);
+
+ public final void symlink(Path src, Path dst) throws IOException {
if (!src.isAbsolute()) {
throw new IOException("Source must be absolute");
}
@@ -440,28 +432,89 @@ public ShellScriptBuilder symlink(Path src, Path dst) throws IOException {
throw new IOException("Destination must be relative");
}
if (dst.toUri().getPath().indexOf('/') != -1) {
- line("mkdir -p ", dst.getParent().toString());
+ mkdir(dst.getParent());
}
- line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
- return this;
+ link(src, dst);
}
-
- public void write(PrintStream out) throws IOException {
- out.append(sb);
- }
-
- public void line(String... command) {
- for (String s : command) {
- sb.append(s);
- }
- sb.append("\n");
- }
-
+
@Override
public String toString() {
return sb.toString();
}
+ public final void write(PrintStream out) throws IOException {
+ out.append(sb);
+ }
+
+ protected final void line(String... command) {
+ for (String s : command) {
+ sb.append(s);
+ }
+ sb.append(LINE_SEPARATOR);
+ }
+
+ protected abstract void link(Path src, Path dst) throws IOException;
+
+ protected abstract void mkdir(Path path);
+ }
+
+ private static final class UnixShellScriptBuilder extends ShellScriptBuilder {
+
+ public UnixShellScriptBuilder(){
+ line("#!/bin/bash");
+ line();
+ }
+
+ @Override
+ public void command(List command) {
+ line("exec /bin/bash -c \"", StringUtils.join(" ", command), "\"");
+ }
+
+ @Override
+ public void env(String key, String value) {
+ line("export ", key, "=\"", value, "\"");
+ }
+
+ @Override
+ protected void link(Path src, Path dst) throws IOException {
+ line("ln -sf \"", src.toUri().getPath(), "\" \"", dst.toString(), "\"");
+ }
+
+ @Override
+ protected void mkdir(Path path) {
+ line("mkdir -p ", path.toString());
+ }
+ }
+
+ private static final class WindowsShellScriptBuilder
+ extends ShellScriptBuilder {
+
+ public WindowsShellScriptBuilder() {
+ line("@setlocal");
+ line();
+ }
+
+ @Override
+ public void command(List command) {
+ line("@call ", StringUtils.join(" ", command));
+ }
+
+ @Override
+ public void env(String key, String value) {
+ line("@set ", key, "=", value);
+ }
+
+ @Override
+ protected void link(Path src, Path dst) throws IOException {
+ line(String.format("@%s symlink \"%s\" \"%s\"", Shell.WINUTILS,
+ new File(dst.toString()).getPath(),
+ new File(src.toUri().getPath()).getPath()));
+ }
+
+ @Override
+ protected void mkdir(Path path) {
+ line("@if not exist ", path.toString(), " mkdir ", path.toString());
+ }
}
private static void putEnvIfNotNull(
@@ -479,7 +532,7 @@ private static void putEnvIfAbsent(
}
public void sanitizeEnv(Map environment,
- Path pwd, List appDirs) {
+ Path pwd, List appDirs) throws IOException {
/**
* Non-modifiable environment variables
*/
@@ -513,6 +566,14 @@ public void sanitizeEnv(Map environment,
environment.put("JVM_PID", "$$");
}
+ // TODO: Remove Windows check and use this approach on all platforms after
+ // additional testing. See YARN-358.
+ if (Shell.WINDOWS) {
+ String inputClassPath = environment.get(Environment.CLASSPATH.name());
+ environment.put(Environment.CLASSPATH.name(),
+ FileUtil.createJarWithClassPath(inputClassPath, pwd));
+ }
+
/**
* Modifiable environment variables
*/
@@ -537,7 +598,8 @@ static void writeLaunchEnv(OutputStream out,
Map environment, Map> resources,
List command)
throws IOException {
- ShellScriptBuilder sb = new ShellScriptBuilder();
+ ShellScriptBuilder sb = Shell.WINDOWS ? new WindowsShellScriptBuilder() :
+ new UnixShellScriptBuilder();
if (environment != null) {
for (Map.Entry env : environment.entrySet()) {
sb.env(env.getKey().toString(), env.getValue().toString());
@@ -546,21 +608,13 @@ static void writeLaunchEnv(OutputStream out,
if (resources != null) {
for (Map.Entry> entry : resources.entrySet()) {
for (String linkName : entry.getValue()) {
- sb.symlink(entry.getKey(), linkName);
+ sb.symlink(entry.getKey(), new Path(linkName));
}
}
}
- ArrayList cmd = new ArrayList(2 * command.size() + 5);
- cmd.add("exec /bin/bash ");
- cmd.add("-c ");
- cmd.add("\"");
- for (String cs : command) {
- cmd.add(cs.toString());
- cmd.add(" ");
- }
- cmd.add("\"");
- sb.line(cmd.toArray(new String[cmd.size()]));
+ sb.command(command);
+
PrintStream pout = null;
try {
pout = new PrintStream(out);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
index 279aa29858..0c7c250523 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/ProcessIdFileReader.java
@@ -25,6 +25,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* Helper functionality to read the pid from a file.
@@ -62,14 +64,28 @@ public static String getProcessId(Path path) throws IOException {
}
String temp = line.trim();
if (!temp.isEmpty()) {
- try {
- Long pid = Long.valueOf(temp);
- if (pid > 0) {
+ if (Shell.WINDOWS) {
+ // On Windows, pid is expected to be a container ID, so find first
+ // line that parses successfully as a container ID.
+ try {
+ ConverterUtils.toContainerId(temp);
processId = temp;
break;
+ } catch (Exception e) {
+ // do nothing
+ }
+ }
+ else {
+ // Otherwise, find first line containing a numeric pid.
+ try {
+ Long pid = Long.valueOf(temp);
+ if (pid > 0) {
+ processId = temp;
+ break;
+ }
+ } catch (Exception e) {
+ // do nothing
}
- } catch (Exception e) {
- // do nothing
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java
index a8e3e8a989..0f9e64f305 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestProcessIdFileReader.java
@@ -26,13 +26,14 @@
import junit.framework.Assert;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader;
import org.junit.Test;
public class TestProcessIdFileReader {
- @Test
+ @Test (timeout = 30000)
public void testNullPath() {
String pid = null;
try {
@@ -44,22 +45,25 @@ public void testNullPath() {
assert(pid == null);
}
- @Test
+ @Test (timeout = 30000)
public void testSimpleGet() throws IOException {
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File testFile = null;
+ String expectedProcessId = Shell.WINDOWS ?
+ "container_1353742680940_0002_01_000001" :
+ "56789";
try {
testFile = new File(rootDir, "temp.txt");
PrintWriter fileWriter = new PrintWriter(testFile);
- fileWriter.println("56789");
+ fileWriter.println(expectedProcessId);
fileWriter.close();
String processId = null;
processId = ProcessIdFileReader.getProcessId(
new Path(rootDir + Path.SEPARATOR + "temp.txt"));
- Assert.assertEquals("56789", processId);
+ Assert.assertEquals(expectedProcessId, processId);
} finally {
if (testFile != null
@@ -70,12 +74,15 @@ public void testSimpleGet() throws IOException {
}
- @Test
+ @Test (timeout = 30000)
public void testComplexGet() throws IOException {
String rootDir = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File testFile = null;
-
+ String processIdInFile = Shell.WINDOWS ?
+ " container_1353742680940_0002_01_000001 " :
+ " 23 ";
+ String expectedProcessId = processIdInFile.trim();
try {
testFile = new File(rootDir, "temp.txt");
PrintWriter fileWriter = new PrintWriter(testFile);
@@ -84,14 +91,14 @@ public void testComplexGet() throws IOException {
fileWriter.println("abc");
fileWriter.println("-123");
fileWriter.println("-123 ");
- fileWriter.println(" 23 ");
+ fileWriter.println(processIdInFile);
fileWriter.println("6236");
fileWriter.close();
String processId = null;
processId = ProcessIdFileReader.getProcessId(
new Path(rootDir + Path.SEPARATOR + "temp.txt"));
- Assert.assertEquals("23", processId);
+ Assert.assertEquals(expectedProcessId, processId);
} finally {
if (testFile != null
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 1bb4dea0dd..452508ff4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -29,6 +29,8 @@
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
@@ -83,15 +85,51 @@ public MiniYARNCluster(String testName, int noOfNodeManagers,
super(testName.replace("$", ""));
this.numLocalDirs = numLocalDirs;
this.numLogDirs = numLogDirs;
- this.testWorkDir = new File("target",
- testName.replace("$", ""));
+ String testSubDir = testName.replace("$", "");
+ File targetWorkDir = new File("target", testSubDir);
try {
FileContext.getLocalFSFileContext().delete(
- new Path(testWorkDir.getAbsolutePath()), true);
+ new Path(targetWorkDir.getAbsolutePath()), true);
} catch (Exception e) {
LOG.warn("COULD NOT CLEANUP", e);
throw new YarnException("could not cleanup test dir", e);
}
+
+ if (Shell.WINDOWS) {
+ // The test working directory can exceed the maximum path length supported
+ // by some Windows APIs and cmd.exe (260 characters). To work around this,
+ // create a symlink in temporary storage with a much shorter path,
+ // targeting the full path to the test working directory. Then, use the
+ // symlink as the test working directory.
+ String targetPath = targetWorkDir.getAbsolutePath();
+ File link = new File(System.getProperty("java.io.tmpdir"),
+ String.valueOf(System.currentTimeMillis()));
+ String linkPath = link.getAbsolutePath();
+
+ try {
+ FileContext.getLocalFSFileContext().delete(new Path(linkPath), true);
+ } catch (IOException e) {
+ throw new YarnException("could not cleanup symlink: " + linkPath, e);
+ }
+
+ // Guarantee target exists before creating symlink.
+ targetWorkDir.mkdirs();
+
+ ShellCommandExecutor shexec = new ShellCommandExecutor(
+ Shell.getSymlinkCommand(targetPath, linkPath));
+ try {
+ shexec.execute();
+ } catch (IOException e) {
+ throw new YarnException(String.format(
+ "failed to create symlink from %s to %s, shell output: %s", linkPath,
+ targetPath, shexec.getOutput()), e);
+ }
+
+ this.testWorkDir = link;
+ } else {
+ this.testWorkDir = targetWorkDir;
+ }
+
resourceManagerWrapper = new ResourceManagerWrapper();
addService(resourceManagerWrapper);
nodeManagers = new CustomNodeManager[noOfNodeManagers];
@@ -192,6 +230,19 @@ public synchronized void stop() {
resourceManager.stop();
}
super.stop();
+
+ if (Shell.WINDOWS) {
+ // On Windows, clean up the short temporary symlink that was created to
+ // work around path length limitation.
+ String testWorkDirPath = testWorkDir.getAbsolutePath();
+ try {
+ FileContext.getLocalFSFileContext().delete(new Path(testWorkDirPath),
+ true);
+ } catch (IOException e) {
+ LOG.warn("could not cleanup symlink: " +
+ testWorkDir.getAbsolutePath());
+ }
+ }
}
}
@@ -220,7 +271,7 @@ private String prepareDirs(String dirType, int numDirs) {
for (int i = 0; i < numDirs; i++) {
dirs[i]= new File(testWorkDir, MiniYARNCluster.this.getName()
+ "-" + dirType + "Dir-nm-" + index + "_" + i);
- dirs[i].mkdir();
+ dirs[i].mkdirs();
LOG.info("Created " + dirType + "Dir in " + dirs[i].getAbsolutePath());
String delimiter = (i > 0) ? "," : "";
dirsString = dirsString.concat(delimiter + dirs[i].getAbsolutePath());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
index 9ba8a9a25f..47f7082ac9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/pom.xml
@@ -28,6 +28,27 @@
hadoop-yarn-serverpom
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+
+ ${basedir}/../../../../hadoop-common-project/hadoop-common/target
+
+
+
+ listener
+ org.apache.hadoop.test.TimedOutTestsListener
+
+
+
+
+
+
+
org.apache.hadoop
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index cbf424b3c8..738d6c5fca 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -159,6 +159,10 @@
org.apache.maven.pluginsmaven-surefire-plugin
+
+
+ ${basedir}/../../../hadoop-common-project/hadoop-common/target
+ listener
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index 8da491dd92..0de9ca4e2d 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -180,15 +180,8 @@
-
- which cygpath 2> /dev/null
- if [ $? = 1 ]; then
- BUILD_DIR="${project.build.directory}"
- else
- BUILD_DIR=`cygpath --unix '${project.build.directory}'`
- fi
- cd $BUILD_DIR
- tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
+ cd "${project.build.directory}"
+ tar cf - ${project.artifactId}-${project.version} | gzip > ${project.artifactId}-${project.version}.tar.gz
@@ -204,6 +197,7 @@
CHANGES.txt
+ CHANGES.branch-trunk-win.txt