From 3334306512b5dc932814fded31a89ba1ee97cd9f Mon Sep 17 00:00:00 2001
From: Alejandro Abdelnur
Date: Thu, 8 Dec 2011 19:25:28 +0000
Subject: [PATCH] HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS
proxy with read/write capabilities. (tucu)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1212060 13f79535-47bb-0310-9956-ffa450edef68
---
.gitignore | 1 +
.../assemblies/hadoop-httpfs-dist.xml | 60 ++
hadoop-common-project/hadoop-common/pom.xml | 5 +
hadoop-dist/pom.xml | 1 +
.../hadoop-hdfs-httpfs/README.txt | 17 +
.../hadoop-hdfs-httpfs/pom.xml | 530 +++++++++++
.../src/main/conf/httpfs-env.sh | 41 +
.../src/main/conf/httpfs-log4j.properties | 35 +
.../src/main/conf/httpfs-site.xml | 17 +
.../fs/http/client/HttpFSFileSystem.java | 863 ++++++++++++++++++
.../client/HttpKerberosAuthenticator.java | 41 +
.../http/client/HttpPseudoAuthenticator.java | 45 +
.../hadoop/fs/http/server/AuthFilter.java | 64 ++
.../hadoop/fs/http/server/FSOperations.java | 717 +++++++++++++++
.../http/server/HttpFSExceptionProvider.java | 91 ++
.../hadoop/fs/http/server/HttpFSParams.java | 536 +++++++++++
.../fs/http/server/HttpFSReleaseFilter.java | 41 +
.../hadoop/fs/http/server/HttpFSServer.java | 604 ++++++++++++
.../fs/http/server/HttpFSServerWebApp.java | 126 +++
.../hadoop/lib/lang/RunnableCallable.java | 96 ++
.../apache/hadoop/lib/lang/XException.java | 134 +++
.../apache/hadoop/lib/server/BaseService.java | 178 ++++
.../org/apache/hadoop/lib/server/Server.java | 766 ++++++++++++++++
.../hadoop/lib/server/ServerException.java | 90 ++
.../org/apache/hadoop/lib/server/Service.java | 79 ++
.../hadoop/lib/server/ServiceException.java | 41 +
.../hadoop/lib/service/FileSystemAccess.java | 42 +
.../service/FileSystemAccessException.java | 52 ++
.../org/apache/hadoop/lib/service/Groups.java | 28 +
.../hadoop/lib/service/Instrumentation.java | 50 +
.../apache/hadoop/lib/service/ProxyUser.java | 28 +
.../apache/hadoop/lib/service/Scheduler.java | 30 +
.../hadoop/FileSystemAccessService.java | 278 ++++++
.../InstrumentationService.java | 403 ++++++++
.../service/scheduler/SchedulerService.java | 129 +++
.../lib/service/security/GroupsService.java | 56 ++
.../service/security/ProxyUserService.java | 176 ++++
.../lib/servlet/FileSystemReleaseFilter.java | 110 +++
.../hadoop/lib/servlet/HostnameFilter.java | 91 ++
.../apache/hadoop/lib/servlet/MDCFilter.java | 101 ++
.../hadoop/lib/servlet/ServerWebApp.java | 159 ++++
.../org/apache/hadoop/lib/util/Check.java | 199 ++++
.../hadoop/lib/util/ConfigurationUtils.java | 157 ++++
.../apache/hadoop/lib/wsrs/BooleanParam.java | 43 +
.../org/apache/hadoop/lib/wsrs/ByteParam.java | 35 +
.../org/apache/hadoop/lib/wsrs/EnumParam.java | 42 +
.../hadoop/lib/wsrs/ExceptionProvider.java | 67 ++
.../hadoop/lib/wsrs/InputStreamEntity.java | 52 ++
.../apache/hadoop/lib/wsrs/IntegerParam.java | 35 +
.../hadoop/lib/wsrs/JSONMapProvider.java | 62 ++
.../apache/hadoop/lib/wsrs/JSONProvider.java | 62 ++
.../org/apache/hadoop/lib/wsrs/LongParam.java | 35 +
.../org/apache/hadoop/lib/wsrs/Param.java | 54 ++
.../apache/hadoop/lib/wsrs/ShortParam.java | 35 +
.../apache/hadoop/lib/wsrs/StringParam.java | 69 ++
.../apache/hadoop/lib/wsrs/UserProvider.java | 79 ++
.../src/main/libexec/httpfs-config.sh | 167 ++++
.../main/resources/default-log4j.properties | 20 +
.../src/main/resources/httpfs-default.xml | 204 +++++
.../src/main/resources/httpfs.properties | 21 +
.../src/main/sbin/httpfs.sh | 62 ++
.../src/main/tomcat/ROOT/WEB-INF/web.xml | 16 +
.../src/main/tomcat/ROOT/index.html | 21 +
.../src/main/tomcat/logging.properties | 67 ++
.../src/main/tomcat/server.xml | 150 +++
.../src/main/webapp/WEB-INF/web.xml | 88 ++
.../src/site/apt/ServerSetup.apt.vm | 121 +++
.../src/site/apt/UsingHttpTools.apt.vm | 91 ++
.../src/site/apt/index.apt.vm | 88 ++
.../src/site/configuration.xsl | 49 +
.../hadoop-hdfs-httpfs/src/site/site.xml | 34 +
.../fs/http/client/TestHttpFSFileSystem.java | 485 ++++++++++
.../fs/http/client/TestWebhdfsFileSystem.java | 55 ++
.../fs/http/server/TestHttpFSServer.java | 164 ++++
.../hadoop/lib/lang/TestRunnableCallable.java | 94 ++
.../hadoop/lib/lang/TestXException.java | 62 ++
.../hadoop/lib/server/TestBaseService.java | 68 ++
.../apache/hadoop/lib/server/TestServer.java | 790 ++++++++++++++++
.../lib/server/TestServerConstructor.java | 76 ++
.../hadoop/TestFileSystemAccessService.java | 306 +++++++
.../TestInstrumentationService.java | 404 ++++++++
.../scheduler/TestSchedulerService.java | 49 +
.../service/security/TestGroupsService.java | 62 ++
.../security/TestProxyUserService.java | 225 +++++
.../lib/servlet/TestHostnameFilter.java | 64 ++
.../hadoop/lib/servlet/TestMDCFilter.java | 117 +++
.../hadoop/lib/servlet/TestServerWebApp.java | 76 ++
.../org/apache/hadoop/lib/util/TestCheck.java | 144 +++
.../lib/util/TestConfigurationUtils.java | 125 +++
.../hadoop/lib/wsrs/TestBooleanParam.java | 50 +
.../apache/hadoop/lib/wsrs/TestByteParam.java | 53 ++
.../apache/hadoop/lib/wsrs/TestEnumParam.java | 52 ++
.../lib/wsrs/TestInputStreamEntity.java | 47 +
.../hadoop/lib/wsrs/TestIntegerParam.java | 52 ++
.../hadoop/lib/wsrs/TestJSONMapProvider.java | 45 +
.../hadoop/lib/wsrs/TestJSONProvider.java | 44 +
.../apache/hadoop/lib/wsrs/TestLongParam.java | 47 +
.../hadoop/lib/wsrs/TestShortParam.java | 53 ++
.../hadoop/lib/wsrs/TestStringParam.java | 64 ++
.../hadoop/lib/wsrs/TestUserProvider.java | 91 ++
.../org/apache/hadoop/test/HFSTestCase.java | 28 +
.../org/apache/hadoop/test/HTestCase.java | 174 ++++
.../test/HadoopUsersConfTestHelper.java | 177 ++++
.../hadoop/test/SysPropsForTestsLoader.java | 70 ++
.../java/org/apache/hadoop/test/TestDir.java | 34 +
.../org/apache/hadoop/test/TestDirHelper.java | 149 +++
.../org/apache/hadoop/test/TestException.java | 30 +
.../hadoop/test/TestExceptionHelper.java | 66 ++
.../apache/hadoop/test/TestHFSTestCase.java | 187 ++++
.../org/apache/hadoop/test/TestHTestCase.java | 154 ++++
.../java/org/apache/hadoop/test/TestHdfs.java | 40 +
.../apache/hadoop/test/TestHdfsHelper.java | 159 ++++
.../org/apache/hadoop/test/TestJetty.java | 40 +
.../apache/hadoop/test/TestJettyHelper.java | 118 +++
.../resources/TestServerWebApp1.properties | 13 +
.../resources/TestServerWebApp2.properties | 15 +
.../src/test/resources/classutils.txt | 1 +
.../test/resources/default-log4j.properties | 22 +
.../src/test/resources/server.properties | 13 +
.../src/test/resources/testserver-default.xml | 20 +
.../src/test/resources/testserver.properties | 13 +
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +
hadoop-hdfs-project/pom.xml | 1 +
hadoop-project/pom.xml | 18 +-
124 files changed, 14685 insertions(+), 1 deletion(-)
create mode 100644 hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Service.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServiceException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Instrumentation.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ProxyUser.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Scheduler.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/ProxyUserService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ExceptionProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONMapProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/default-log4j.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HFSTestCase.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDir.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestException.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfs.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJetty.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp1.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp2.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/classutils.txt
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/default-log4j.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/server.properties
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver-default.xml
create mode 100644 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver.properties
diff --git a/.gitignore b/.gitignore
index f223254b16..93e755ce9c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@
.settings
target
hadoop-hdfs-project/hadoop-hdfs/downloads
+hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
new file mode 100644
index 0000000000..79bad49122
--- /dev/null
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
@@ -0,0 +1,60 @@
+
+
+
+ hadoop-httpfs-dist
+
+ dir
+
+ false
+
+
+
+ ${basedir}/src/main/conf
+ /etc/hadoop
+
+ *
+
+
+
+
+ ${basedir}
+ /
+
+ *.txt
+
+
+
+ ${basedir}/src/main/sbin
+ /sbin
+
+ *
+
+ 0755
+
+
+ ${basedir}/src/main/libexec
+ /libexec
+
+ *
+
+ 0755
+
+
+
+ ${project.build.directory}/site
+ /share/doc/hadoop/httpfs
+
+
+
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index daa82b4a8e..ab8bf4819d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -264,6 +264,11 @@
hadoop-auth
compile
+
+ com.googlecode.json-simple
+ json-simple
+ compile
+
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 3c4bfc46a6..ed6b729a93 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -112,6 +112,7 @@
run cd hadoop-${project.version}
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
+ run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
COMMON_LIB=share/hadoop/common/lib
MODULES=../../../../modules
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt b/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt
new file mode 100644
index 0000000000..c2f4d64e20
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt
@@ -0,0 +1,17 @@
+-----------------------------------------------------------------------------
+HttpFS - Hadoop HDFS over HTTP
+
+HttpFS is a server that provides a REST HTTP gateway to HDFS with full
+filesystem read & write capabilities.
+
+HttpFS can be used to transfer data between clusters running different
+versions of Hadoop (overcoming RPC versioning issues), for example using
+Hadoop DistCP.
+
+HttpFS can be used to access data in HDFS on a cluster behind of a firewall
+(the HttpFS server acts as a gateway and is the only system that is allowed
+to cross the firewall into the cluster).
+
+HttpFS can be used to access data in HDFS using HTTP utilities (such as curl
+and wget) and HTTP libraries Perl from other languages than Java.
+-----------------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
new file mode 100644
index 0000000000..8ae1563541
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -0,0 +1,530 @@
+
+
+
+ 4.0.0
+
+ org.apache.hadoop
+ hadoop-project
+ 0.24.0-SNAPSHOT
+ ../../hadoop-project
+
+ org.apache.hadoop
+ hadoop-hdfs-httpfs
+ 0.24.0-SNAPSHOT
+ war
+
+ Apache Hadoop HttpFS
+ Apache Hadoop HttpFS
+
+
+ 6.0.32
+ REPO NOT AVAIL
+ REPO NOT AVAIL
+ REVISION NOT AVAIL
+ yyyy-MM-dd'T'HH:mm:ssZ
+ ${maven.build.timestamp}
+
+ ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat
+
+
+
+
+
+ junit
+ junit
+ test
+
+
+ org.mockito
+ mockito-all
+ test
+
+
+ com.sun.jersey
+ jersey-server
+ compile
+
+
+ javax.servlet
+ servlet-api
+ provided
+
+
+ javax.servlet.jsp
+ jsp-api
+ provided
+
+
+ commons-codec
+ commons-codec
+ compile
+
+
+ org.jdom
+ jdom
+ compile
+
+
+ com.googlecode.json-simple
+ json-simple
+ compile
+
+
+ org.apache.hadoop
+ hadoop-common
+ compile
+
+
+ javax.xml.stream
+ stax-api
+
+
+ commons-cli
+ commons-cli
+
+
+ commons-httpclient
+ commons-httpclient
+
+
+ tomcat
+ jasper-compiler
+
+
+ tomcat
+ jasper-runtime
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet
+ jsp-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ org.mortbay.jetty
+ jsp-api-2.1
+
+
+ org.mortbay.jetty
+ servlet-api-2.5
+
+
+ net.java.dev.jets3t
+ jets3t
+
+
+ hsqldb
+ hsqldb
+
+
+ org.eclipse.jdt
+ core
+
+
+ commons-el
+ commons-el
+
+
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ compile
+
+
+ commons-cli
+ commons-cli
+
+
+ commons-httpclient
+ commons-httpclient
+
+
+ tomcat
+ jasper-compiler
+
+
+ tomcat
+ jasper-runtime
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet
+ jsp-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ org.mortbay.jetty
+ jsp-api-2.1
+
+
+ org.mortbay.jetty
+ servlet-api-2.5
+
+
+ net.java.dev.jets3t
+ jets3t
+
+
+ hsqldb
+ hsqldb
+
+
+ org.eclipse.jdt
+ core
+
+
+ commons-el
+ commons-el
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ test
+ test-jar
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ test
+ test-jar
+
+
+ log4j
+ log4j
+ compile
+
+
+ org.slf4j
+ slf4j-api
+ compile
+
+
+ org.slf4j
+ slf4j-log4j12
+ compile
+
+
+
+
+
+
+ src/main/resources
+ true
+
+ httpfs.properties
+
+
+
+ src/main/resources
+ false
+
+ httpfs.properties
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+ 1
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+
+ javadoc
+
+ site
+
+ true
+ true
+ false
+
+ ${maven.compile.encoding}
+
+
+ HttpFs API
+ *
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-project-info-reports-plugin
+
+
+
+ false
+
+
+ dependencies
+
+ site
+
+
+
+
+ org.apache.rat
+ apache-rat-plugin
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ create-web-xmls
+ generate-test-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+ site
+ site
+
+ run
+
+
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-war-plugin
+
+
+ default-war
+ package
+
+ war
+
+
+ webhdfs
+ ${project.build.directory}/webhdfs
+
+
+
+
+
+
+
+
+
+ docs
+
+ false
+
+
+
+
+ org.apache.maven.plugins
+ maven-site-plugin
+
+
+ docs
+ prepare-package
+
+ site
+
+
+
+
+
+
+
+
+
+ dist
+
+ false
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+
+
+ org.apache.hadoop
+ hadoop-assemblies
+ ${project.version}
+
+
+
+
+ dist
+ package
+
+ single
+
+
+ ${project.artifactId}-${project.version}
+ false
+ false
+
+ hadoop-httpfs-dist
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ dist
+
+ run
+
+ package
+
+
+
+
+
+
+
+
+
+
+ which cygpath 2> /dev/null
+ if [ $? = 1 ]; then
+ BUILD_DIR="${project.build.directory}"
+ else
+ BUILD_DIR=`cygpath --unix '${project.build.directory}'`
+ fi
+ cd $BUILD_DIR/tomcat.exp
+ tar xzf ${basedir}/downloads/tomcat.tar.gz
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tar
+ package
+
+ run
+
+
+
+
+
+
+ which cygpath 2> /dev/null
+ if [ $? = 1 ]; then
+ BUILD_DIR="${project.build.directory}"
+ else
+ BUILD_DIR=`cygpath --unix '${project.build.directory}'`
+ fi
+ cd $BUILD_DIR
+ tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
new file mode 100644
index 0000000000..84c67b790a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# Set httpfs specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs HttpFS
+# Java System properties for HttpFS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# HttpFS logs directory
+#
+# export HTTPFS_LOG=${HTTPFS_HOME}/logs
+
+# HttpFS temporary directory
+#
+# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
+
+# The HTTP port used by HttpFS
+#
+# export HTTPFS_HTTP_PORT=14000
+
+# The Admin port used by HttpFS
+#
+# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
+
+# The hostname HttpFS server runs on
+#
+# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties
new file mode 100644
index 0000000000..284a81924c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties
@@ -0,0 +1,35 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time
+# Setup sets its value to '${httpfs.home}/logs'
+
+log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log
+log4j.appender.httpfs.Append=true
+log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log
+log4j.appender.httpfsaudit.Append=true
+log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n
+
+log4j.logger.httpfsaudit=INFO, httpfsaudit
+
+log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs
+log4j.logger.org.apache.hadoop.lib=INFO, httpfs
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml
new file mode 100644
index 0000000000..4a718e1668
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml
@@ -0,0 +1,17 @@
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
new file mode 100644
index 0000000000..520c7325fa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -0,0 +1,863 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+import org.json.simple.parser.JSONParser;
+import org.json.simple.parser.ParseException;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.FileNotFoundException;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.lang.reflect.Constructor;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * HttpFSServer implementation of the FileSystemAccess FileSystem.
+ *
+ * This implementation allows a user to access HDFS over HTTP via a HttpFSServer server.
+ */
+public class HttpFSFileSystem extends FileSystem {
+
+ public static final String SERVICE_NAME = "/webhdfs";
+
+ public static final String SERVICE_VERSION = "/v1";
+
+ public static final String SERVICE_PREFIX = SERVICE_NAME + SERVICE_VERSION;
+
+ public static final String OP_PARAM = "op";
+ public static final String DO_AS_PARAM = "doas";
+ public static final String OVERWRITE_PARAM = "overwrite";
+ public static final String REPLICATION_PARAM = "replication";
+ public static final String BLOCKSIZE_PARAM = "blocksize";
+ public static final String PERMISSION_PARAM = "permission";
+ public static final String DESTINATION_PARAM = "destination";
+ public static final String RECURSIVE_PARAM = "recursive";
+ public static final String OWNER_PARAM = "owner";
+ public static final String GROUP_PARAM = "group";
+ public static final String MODIFICATION_TIME_PARAM = "modificationtime";
+ public static final String ACCESS_TIME_PARAM = "accesstime";
+ public static final String RENEWER_PARAM = "renewer";
+
+ public static final String DEFAULT_PERMISSION = "default";
+
+ public static final String RENAME_JSON = "boolean";
+
+ public static final String DELETE_JSON = "boolean";
+
+ public static final String MKDIRS_JSON = "boolean";
+
+ public static final String HOME_DIR_JSON = "Path";
+
+ public static final String SET_REPLICATION_JSON = "boolean";
+
+ public static enum FILE_TYPE {
+ FILE, DIRECTORY, SYMLINK;
+
+ public static FILE_TYPE getType(FileStatus fileStatus) {
+ if (fileStatus.isFile()) {
+ return FILE;
+ }
+ if (fileStatus.isDirectory()) {
+ return DIRECTORY;
+ }
+ if (fileStatus.isSymlink()) {
+ return SYMLINK;
+ }
+ throw new IllegalArgumentException("Could not determine filetype for: " +
+ fileStatus.getPath());
+ }
+ }
+
+ public static final String FILE_STATUSES_JSON = "FileStatuses";
+ public static final String FILE_STATUS_JSON = "FileStatus";
+ public static final String PATH_SUFFIX_JSON = "pathSuffix";
+ public static final String TYPE_JSON = "type";
+ public static final String LENGTH_JSON = "length";
+ public static final String OWNER_JSON = "owner";
+ public static final String GROUP_JSON = "group";
+ public static final String PERMISSION_JSON = "permission";
+ public static final String ACCESS_TIME_JSON = "accessTime";
+ public static final String MODIFICATION_TIME_JSON = "modificationTime";
+ public static final String BLOCK_SIZE_JSON = "blockSize";
+ public static final String REPLICATION_JSON = "replication";
+
+ public static final String FILE_CHECKSUM_JSON = "FileChecksum";
+ public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
+ public static final String CHECKSUM_BYTES_JSON = "bytes";
+ public static final String CHECKSUM_LENGTH_JSON = "length";
+
+ public static final String CONTENT_SUMMARY_JSON = "ContentSummary";
+ public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount";
+ public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
+ public static final String CONTENT_SUMMARY_LENGTH_JSON = "length";
+ public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota";
+ public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
+ public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
+
+ public static final String DELEGATION_TOKEN_JSON = "Token";
+ public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
+
+ public static final String ERROR_JSON = "RemoteException";
+ public static final String ERROR_EXCEPTION_JSON = "exception";
+ public static final String ERROR_CLASSNAME_JSON = "javaClassName";
+ public static final String ERROR_MESSAGE_JSON = "message";
+
+ public static final int HTTP_TEMPORARY_REDIRECT = 307;
+
+
+ /**
+ * Get operations.
+ */
+ public enum GetOpValues {
+ OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIR, GETCONTENTSUMMARY, GETFILECHECKSUM,
+ GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
+ }
+
+ /**
+ * Post operations.
+ */
+ public static enum PostOpValues {
+ APPEND
+ }
+
+ /**
+ * Put operations.
+ */
+ public static enum PutOpValues {
+ CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES,
+ RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN
+ }
+
+ /**
+ * Delete operations.
+ */
+ public static enum DeleteOpValues {
+ DELETE
+ }
+
+ private static final String HTTP_GET = "GET";
+ private static final String HTTP_PUT = "PUT";
+ private static final String HTTP_POST = "POST";
+ private static final String HTTP_DELETE = "DELETE";
+
+ private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+ private URI uri;
+ private Path workingDir;
+ private String doAs;
+
+ /**
+ * Convenience method that creates a HttpURLConnection
for the
+ * HttpFSServer file system operations.
+ *
+ * This methods performs and injects any needed authentication credentials
+ * via the {@link #getConnection(URL, String)} method
+ *
+ * @param method the HTTP method.
+ * @param params the query string parameters.
+ * @param path the file path
+ * @param makeQualified if the path should be 'makeQualified'
+ *
+ * @return a HttpURLConnection
for the HttpFSServer server,
+ * authenticated and ready to use for the specified path and file system operation.
+ *
+ * @throws IOException thrown if an IO error occurrs.
+ */
+ private HttpURLConnection getConnection(String method, Map params,
+ Path path, boolean makeQualified) throws IOException {
+ params.put(DO_AS_PARAM, doAs);
+ if (makeQualified) {
+ path = makeQualified(path);
+ }
+ URI uri = path.toUri();
+ StringBuilder sb = new StringBuilder();
+ sb.append(uri.getScheme()).append("://").append(uri.getAuthority()).
+ append(SERVICE_PREFIX).append(uri.getPath());
+
+ String separator = "?";
+ for (Map.Entry entry : params.entrySet()) {
+ sb.append(separator).append(entry.getKey()).append("=").
+ append(URLEncoder.encode(entry.getValue(), "UTF8"));
+ separator = "&";
+ }
+ URL url = new URL(sb.toString());
+ return getConnection(url, method);
+ }
+
+ /**
+ * Convenience method that creates a HttpURLConnection
for the specified URL.
+ *
+ * This methods performs and injects any needed authentication credentials.
+ *
+ * @param url url to connect to.
+ * @param method the HTTP method.
+ *
+ * @return a HttpURLConnection
for the HttpFSServer server, authenticated and ready to use for
+ * the specified path and file system operation.
+ *
+ * @throws IOException thrown if an IO error occurrs.
+ */
+ private HttpURLConnection getConnection(URL url, String method) throws IOException {
+ Class extends Authenticator> klass =
+ getConf().getClass("httpfs.authenticator.class", HttpKerberosAuthenticator.class, Authenticator.class);
+ Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
+ try {
+ HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
+ conn.setRequestMethod(method);
+ if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
+ conn.setDoOutput(true);
+ }
+ return conn;
+ } catch (Exception ex) {
+ throw new IOException(ex);
+ }
+ }
+
+ /**
+ * Convenience method that JSON Parses the InputStream
of a HttpURLConnection
.
+ *
+ * @param conn the HttpURLConnection
.
+ *
+ * @return the parsed JSON object.
+ *
+ * @throws IOException thrown if the InputStream
could not be JSON parsed.
+ */
+ private static Object jsonParse(HttpURLConnection conn) throws IOException {
+ try {
+ JSONParser parser = new JSONParser();
+ return parser.parse(new InputStreamReader(conn.getInputStream()));
+ } catch (ParseException ex) {
+ throw new IOException("JSON parser error, " + ex.getMessage(), ex);
+ }
+ }
+
+ /**
+ * Validates the status of an HttpURLConnection
against an expected HTTP
+ * status code. If the current status code is not the expected one it throws an exception
+ * with a detail message using Server side error messages if available.
+ *
+ * @param conn the HttpURLConnection
.
+ * @param expected the expected HTTP status code.
+ *
+ * @throws IOException thrown if the current status code does not match the expected one.
+ */
+ private static void validateResponse(HttpURLConnection conn, int expected) throws IOException {
+ int status = conn.getResponseCode();
+ if (status != expected) {
+ try {
+ JSONObject json = (JSONObject) jsonParse(conn);
+ json = (JSONObject) json.get(ERROR_JSON);
+ String message = (String) json.get(ERROR_MESSAGE_JSON);
+ String exception = (String) json.get(ERROR_EXCEPTION_JSON);
+ String className = (String) json.get(ERROR_CLASSNAME_JSON);
+
+ try {
+ ClassLoader cl = HttpFSFileSystem.class.getClassLoader();
+ Class klass = cl.loadClass(className);
+ Constructor constr = klass.getConstructor(String.class);
+ throw (IOException) constr.newInstance(message);
+ } catch (IOException ex) {
+ throw ex;
+ } catch (Exception ex) {
+ throw new IOException(MessageFormat.format("{0} - {1}", exception, message));
+ }
+ } catch (IOException ex) {
+ if (ex.getCause() instanceof IOException) {
+ throw (IOException) ex.getCause();
+ }
+ throw new IOException(MessageFormat.format("HTTP status [{0}], {1}", status, conn.getResponseMessage()));
+ }
+ }
+ }
+
+ /**
+ * Called after a new FileSystem instance is constructed.
+ *
+ * @param name a uri whose authority section names the host, port, etc. for this FileSystem
+ * @param conf the configuration
+ */
+ @Override
+ public void initialize(URI name, Configuration conf) throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ doAs = ugi.getUserName();
+ super.initialize(name, conf);
+ try {
+ uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort());
+ } catch (URISyntaxException ex) {
+ throw new IOException(ex);
+ }
+ }
+
+ /**
+ * Returns a URI whose scheme and authority identify this FileSystem.
+ *
+ * @return the URI whose scheme and authority identify this FileSystem.
+ */
+ @Override
+ public URI getUri() {
+ return uri;
+ }
+
+ /**
+ * HttpFSServer subclass of the FSDataInputStream
.
+ *
+ * This implementation does not support the
+ * PositionReadable
and Seekable
methods.
+ */
+ private static class HttpFSDataInputStream extends FilterInputStream implements Seekable, PositionedReadable {
+
+ protected HttpFSDataInputStream(InputStream in, int bufferSize) {
+ super(new BufferedInputStream(in, bufferSize));
+ }
+
+ @Override
+ public int read(long position, byte[] buffer, int offset, int length) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFully(long position, byte[] buffer) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void seek(long pos) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getPos() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+ }
+
+ /**
+ * Opens an FSDataInputStream at the indicated Path.
+ *
+ * IMPORTANT: the returned does not support the
+ * PositionReadable
and Seekable
methods.
+ *
+ * @param f the file name to open
+ * @param bufferSize the size of the buffer to be used.
+ */
+ @Override
+ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.OPEN.toString());
+ HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
+ }
+
+ /**
+ * HttpFSServer subclass of the FSDataOutputStream
.
+ *
+ * This implementation closes the underlying HTTP connection validating the Http connection status
+ * at closing time.
+ */
+ private static class HttpFSDataOutputStream extends FSDataOutputStream {
+ private HttpURLConnection conn;
+ private int closeStatus;
+
+ public HttpFSDataOutputStream(HttpURLConnection conn, OutputStream out, int closeStatus, Statistics stats)
+ throws IOException {
+ super(out, stats);
+ this.conn = conn;
+ this.closeStatus = closeStatus;
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ super.close();
+ } finally {
+ validateResponse(conn, closeStatus);
+ }
+ }
+
+ }
+
+ /**
+ * Converts a FsPermission
to a Unix octal representation.
+ *
+ * @param p the permission.
+ *
+ * @return the Unix string symbolic reprentation.
+ */
+ public static String permissionToString(FsPermission p) {
+ return (p == null) ? DEFAULT_PERMISSION : Integer.toString(p.toShort(), 8);
+ }
+
+ /*
+ * Common handling for uploading data for create and append operations.
+ */
+ private FSDataOutputStream uploadData(String method, Path f, Map params,
+ int bufferSize, int expectedStatus) throws IOException {
+ HttpURLConnection conn = getConnection(method, params, f, true);
+ conn.setInstanceFollowRedirects(false);
+ boolean exceptionAlreadyHandled = false;
+ try {
+ if (conn.getResponseCode() == HTTP_TEMPORARY_REDIRECT) {
+ exceptionAlreadyHandled = true;
+ String location = conn.getHeaderField("Location");
+ if (location != null) {
+ conn = getConnection(new URL(location), method);
+ conn.setRequestProperty("Content-Type", "application/octet-stream");
+ try {
+ OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize);
+ return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics);
+ } catch (IOException ex) {
+ validateResponse(conn, expectedStatus);
+ throw ex;
+ }
+ } else {
+ validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
+ throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]");
+ }
+ } else {
+ throw new IOException(
+ MessageFormat.format("Expected HTTP status was [307], received [{0}]",
+ conn.getResponseCode()));
+ }
+ } catch (IOException ex) {
+ if (exceptionAlreadyHandled) {
+ throw ex;
+ } else {
+ validateResponse(conn, HTTP_TEMPORARY_REDIRECT);
+ throw ex;
+ }
+ }
+ }
+
+
+ /**
+ * Opens an FSDataOutputStream at the indicated Path with write-progress
+ * reporting.
+ *
+ * IMPORTANT: The Progressable
parameter is not used.
+ *
+ * @param f the file name to open.
+ * @param permission file permission.
+ * @param overwrite if a file with this name already exists, then if true,
+ * the file will be overwritten, and if false an error will be thrown.
+ * @param bufferSize the size of the buffer to be used.
+ * @param replication required block replication for the file.
+ * @param blockSize block size.
+ * @param progress progressable.
+ *
+ * @throws IOException
+ * @see #setPermission(Path, FsPermission)
+ */
+ @Override
+ public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
+ short replication, long blockSize, Progressable progress) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.CREATE.toString());
+ params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
+ params.put(REPLICATION_PARAM, Short.toString(replication));
+ params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
+ params.put(PERMISSION_PARAM, permissionToString(permission));
+ return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED);
+ }
+
+
+ /**
+ * Append to an existing file (optional operation).
+ *
+ * IMPORTANT: The Progressable
parameter is not used.
+ *
+ * @param f the existing file to be appended.
+ * @param bufferSize the size of the buffer to be used.
+ * @param progress for reporting progress if it is not null.
+ *
+ * @throws IOException
+ */
+ @Override
+ public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PostOpValues.APPEND.toString());
+ return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK);
+ }
+
+ /**
+ * Renames Path src to Path dst. Can take place on local fs
+ * or remote DFS.
+ */
+ @Override
+ public boolean rename(Path src, Path dst) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.RENAME.toString());
+ params.put(DESTINATION_PARAM, dst.toString());
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ return (Boolean) json.get(RENAME_JSON);
+ }
+
+ /**
+ * Delete a file.
+ *
+ * @deprecated Use delete(Path, boolean) instead
+ */
+ @SuppressWarnings({"deprecation"})
+ @Deprecated
+ @Override
+ public boolean delete(Path f) throws IOException {
+ return delete(f, false);
+ }
+
+ /**
+ * Delete a file.
+ *
+ * @param f the path to delete.
+ * @param recursive if path is a directory and set to
+ * true, the directory is deleted else throws an exception. In
+ * case of a file the recursive can be set to either true or false.
+ *
+ * @return true if delete is successful else false.
+ *
+ * @throws IOException
+ */
+ @Override
+ public boolean delete(Path f, boolean recursive) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, DeleteOpValues.DELETE.toString());
+ params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
+ HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ return (Boolean) json.get(DELETE_JSON);
+ }
+
+ /**
+ * List the statuses of the files/directories in the given path if the path is
+ * a directory.
+ *
+ * @param f given path
+ *
+ * @return the statuses of the files/directories in the given patch
+ *
+ * @throws IOException
+ */
+ @Override
+ public FileStatus[] listStatus(Path f) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString());
+ HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ json = (JSONObject) json.get(FILE_STATUSES_JSON);
+ JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON);
+ FileStatus[] array = new FileStatus[jsonArray.size()];
+ f = makeQualified(f);
+ for (int i = 0; i < jsonArray.size(); i++) {
+ array[i] = createFileStatus(f, (JSONObject) jsonArray.get(i));
+ }
+ return array;
+ }
+
+ /**
+ * Set the current working directory for the given file system. All relative
+ * paths will be resolved relative to it.
+ *
+ * @param newDir new directory.
+ */
+ @Override
+ public void setWorkingDirectory(Path newDir) {
+ workingDir = newDir;
+ }
+
+ /**
+ * Get the current working directory for the given file system
+ *
+ * @return the directory pathname
+ */
+ @Override
+ public Path getWorkingDirectory() {
+ if (workingDir == null) {
+ workingDir = getHomeDirectory();
+ }
+ return workingDir;
+ }
+
+ /**
+ * Make the given file and all non-existent parents into
+ * directories. Has the semantics of Unix 'mkdir -p'.
+ * Existence of the directory hierarchy is not an error.
+ */
+ @Override
+ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.MKDIRS.toString());
+ params.put(PERMISSION_PARAM, permissionToString(permission));
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ return (Boolean) json.get(MKDIRS_JSON);
+ }
+
+ /**
+ * Return a file status object that represents the path.
+ *
+ * @param f The path we want information from
+ *
+ * @return a FileStatus object
+ *
+ * @throws FileNotFoundException when the path does not exist;
+ * IOException see specific implementation
+ */
+ @Override
+ public FileStatus getFileStatus(Path f) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString());
+ HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ json = (JSONObject) json.get(FILE_STATUS_JSON);
+ f = makeQualified(f);
+ return createFileStatus(f, json);
+ }
+
+ /**
+ * Return the current user's home directory in this filesystem.
+ * The default implementation returns "/user/$USER/".
+ */
+ @Override
+ public Path getHomeDirectory() {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.GETHOMEDIR.toString());
+ try {
+ HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ return new Path((String) json.get(HOME_DIR_JSON));
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ /**
+ * Set owner of a path (i.e. a file or a directory).
+ * The parameters username and groupname cannot both be null.
+ *
+ * @param p The path
+ * @param username If it is null, the original username remains unchanged.
+ * @param groupname If it is null, the original groupname remains unchanged.
+ */
+ @Override
+ public void setOwner(Path p, String username, String groupname) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.SETOWNER.toString());
+ params.put(OWNER_PARAM, username);
+ params.put(GROUP_PARAM, groupname);
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ }
+
+ /**
+ * Set permission of a path.
+ *
+ * @param p path.
+ * @param permission permission.
+ */
+ @Override
+ public void setPermission(Path p, FsPermission permission) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString());
+ params.put(PERMISSION_PARAM, permissionToString(permission));
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ }
+
+ /**
+ * Set access time of a file
+ *
+ * @param p The path
+ * @param mtime Set the modification time of this file.
+ * The number of milliseconds since Jan 1, 1970.
+ * A value of -1 means that this call should not set modification time.
+ * @param atime Set the access time of this file.
+ * The number of milliseconds since Jan 1, 1970.
+ * A value of -1 means that this call should not set access time.
+ */
+ @Override
+ public void setTimes(Path p, long mtime, long atime) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.SETTIMES.toString());
+ params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
+ params.put(ACCESS_TIME_PARAM, Long.toString(atime));
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ }
+
+ /**
+ * Set replication for an existing file.
+ *
+ * @param src file name
+ * @param replication new replication
+ *
+ * @return true if successful;
+ * false if file does not exist or is a directory
+ *
+ * @throws IOException
+ */
+ @Override
+ public boolean setReplication(Path src, short replication) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString());
+ params.put(REPLICATION_PARAM, Short.toString(replication));
+ HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) jsonParse(conn);
+ return (Boolean) json.get(SET_REPLICATION_JSON);
+ }
+
+ /**
+ * Creates a FileStatus
object using a JSON file-status payload
+ * received from a HttpFSServer server.
+ *
+ * @param json a JSON file-status payload received from a HttpFSServer server
+ *
+ * @return the corresponding FileStatus
+ */
+ private FileStatus createFileStatus(Path parent, JSONObject json) {
+ String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
+ Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
+ FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON));
+ long len = (Long) json.get(LENGTH_JSON);
+ String owner = (String) json.get(OWNER_JSON);
+ String group = (String) json.get(GROUP_JSON);
+ FsPermission permission =
+ new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 8));
+ long aTime = (Long) json.get(ACCESS_TIME_JSON);
+ long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
+ long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
+ short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
+ FileStatus fileStatus = null;
+
+ switch (type) {
+ case FILE:
+ case DIRECTORY:
+ fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
+ replication, blockSize, mTime, aTime,
+ permission, owner, group, path);
+ break;
+ case SYMLINK:
+ Path symLink = null;
+ fileStatus = new FileStatus(len, false,
+ replication, blockSize, mTime, aTime,
+ permission, owner, group, symLink,
+ path);
+ }
+ return fileStatus;
+ }
+
+ @Override
+ public ContentSummary getContentSummary(Path f) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString());
+ HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+ return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
+ (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
+ (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
+ (Long) json.get(CONTENT_SUMMARY_QUOTA_JSON),
+ (Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON),
+ (Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)
+ );
+ }
+
+ @Override
+ public FileChecksum getFileChecksum(Path f) throws IOException {
+ Map params = new HashMap();
+ params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString());
+ HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
+ return new FileChecksum() {
+ @Override
+ public String getAlgorithmName() {
+ return (String) json.get(CHECKSUM_ALGORITHM_JSON);
+ }
+
+ @Override
+ public int getLength() {
+ return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue();
+ }
+
+ @Override
+ public byte[] getBytes() {
+ return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON));
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java
new file mode 100644
index 0000000000..8f781bbb76
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.client;
+
+
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+
+/**
+ * A KerberosAuthenticator
subclass that fallback to
+ * {@link HttpPseudoAuthenticator}.
+ */
+public class HttpKerberosAuthenticator extends KerberosAuthenticator {
+
+ /**
+ * Returns the fallback authenticator if the server does not use
+ * Kerberos SPNEGO HTTP authentication.
+ *
+ * @return a {@link HttpPseudoAuthenticator} instance.
+ */
+ @Override
+ protected Authenticator getFallBackAuthenticator() {
+ return new HttpPseudoAuthenticator();
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java
new file mode 100644
index 0000000000..9ac75a0aec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.http.client;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
+
+import java.io.IOException;
+
+/**
+ * A PseudoAuthenticator
subclass that uses FileSystemAccess's
+ * UserGroupInformation
to obtain the client user name (the UGI's login user).
+ */
+public class HttpPseudoAuthenticator extends PseudoAuthenticator {
+
+ /**
+ * Return the client user name.
+ *
+ * @return the client user name.
+ */
+ @Override
+ protected String getUserName() {
+ try {
+ return UserGroupInformation.getLoginUser().getUserName();
+ } catch (IOException ex) {
+ throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java
new file mode 100644
index 0000000000..cc33e0af2c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+
+import javax.servlet.FilterConfig;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Subclass of Alfredo's AuthenticationFilter
that obtains its configuration
+ * from HttpFSServer's server configuration.
+ */
+public class AuthFilter extends AuthenticationFilter {
+ private static final String CONF_PREFIX = "httpfs.authentication.";
+
+ /**
+ * Returns the Alfredo configuration from HttpFSServer's configuration.
+ *
+ * It returns all HttpFSServer's configuration properties prefixed with
+ * httpfs.authentication
. The httpfs.authentication
+ * prefix is removed from the returned property names.
+ *
+ * @param configPrefix parameter not used.
+ * @param filterConfig parameter not used.
+ *
+ * @return Alfredo configuration read from HttpFSServer's configuration.
+ */
+ @Override
+ protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
+ Properties props = new Properties();
+ Configuration conf = HttpFSServerWebApp.get().getConfig();
+
+ props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
+ for (Map.Entry entry : conf) {
+ String name = entry.getKey();
+ if (name.startsWith(CONF_PREFIX)) {
+ String value = conf.get(name);
+ name = name.substring(CONF_PREFIX.length());
+ props.setProperty(name, value);
+ }
+ }
+ return props;
+ }
+
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
new file mode 100644
index 0000000000..b0d8a944da
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -0,0 +1,717 @@
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.GlobFilter;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.lib.service.FileSystemAccess;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * FileSystem operation executors used by {@link HttpFSServer}.
+ */
+public class FSOperations {
+
+ /**
+ * Converts a Unix permission octal & symbolic representation
+ * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission.
+ *
+ * @param str Unix permission symbolic representation.
+ *
+ * @return the FileSystemAccess permission. If the given string was
+ * 'default', it returns FsPermission.getDefault()
.
+ */
+ private static FsPermission getPermission(String str) {
+ FsPermission permission;
+ if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) {
+ permission = FsPermission.getDefault();
+ } else if (str.length() == 3) {
+ permission = new FsPermission(Short.parseShort(str, 8));
+ } else {
+ permission = FsPermission.valueOf(str);
+ }
+ return permission;
+ }
+
+ @SuppressWarnings({"unchecked", "deprecation"})
+ private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) {
+ Map json = new LinkedHashMap();
+ json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName());
+ json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString());
+ json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
+ json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
+ json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
+ json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission()));
+ json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
+ json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime());
+ json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
+ json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
+ return json;
+ }
+
+ /**
+ * Converts a FileSystemAccess FileStatus
object into a JSON
+ * object.
+ *
+ * @param status FileSystemAccess file status.
+ *
+ * @return The JSON representation of the file status.
+ */
+ @SuppressWarnings({"unchecked", "deprecation"})
+ private static Map fileStatusToJSON(FileStatus status) {
+ Map json = new LinkedHashMap();
+ json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true));
+ return json;
+ }
+
+ /**
+ * Converts a FileChecksum
object into a JSON array
+ * object.
+ *
+ * @param checksum file checksum.
+ *
+ * @return The JSON representation of the file checksum.
+ */
+ @SuppressWarnings({"unchecked"})
+ private static Map fileChecksumToJSON(FileChecksum checksum) {
+ Map json = new LinkedHashMap();
+ json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName());
+ json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON,
+ org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes()));
+ json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength());
+ Map response = new LinkedHashMap();
+ response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json);
+ return response;
+ }
+
+ /**
+ * Converts a ContentSummary
object into a JSON array
+ * object.
+ *
+ * @param contentSummary the content summary
+ *
+ * @return The JSON representation of the content summary.
+ */
+ @SuppressWarnings({"unchecked"})
+ private static Map contentSummaryToJSON(ContentSummary contentSummary) {
+ Map json = new LinkedHashMap();
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount());
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount());
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength());
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota());
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed());
+ json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota());
+ Map response = new LinkedHashMap();
+ response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
+ return response;
+ }
+
+ /**
+ * Converts a FileSystemAccess FileStatus
array into a JSON array
+ * object.
+ *
+ * @param status FileSystemAccess file status array.
+ * SCHEME://HOST:PORT
in the file status.
+ *
+ * @return The JSON representation of the file status array.
+ */
+ @SuppressWarnings("unchecked")
+ private static Map fileStatusToJSON(FileStatus[] status) {
+ JSONArray json = new JSONArray();
+ if (status != null) {
+ for (FileStatus s : status) {
+ json.add(fileStatusToJSONRaw(s, false));
+ }
+ }
+ Map response = new LinkedHashMap();
+ Map temp = new LinkedHashMap();
+ temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json);
+ response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp);
+ return response;
+ }
+
+ /**
+ * Converts an object into a Json Map with with one key-value entry.
+ *
+ * It assumes the given value is either a JSON primitive type or a
+ * JsonAware
instance.
+ *
+ * @param name name for the key of the entry.
+ * @param value for the value of the entry.
+ *
+ * @return the JSON representation of the key-value pair.
+ */
+ @SuppressWarnings("unchecked")
+ private static JSONObject toJSON(String name, Object value) {
+ JSONObject json = new JSONObject();
+ json.put(name, value);
+ return json;
+ }
+
+ /**
+ * Executor that performs an append FileSystemAccess files system operation.
+ */
+ public static class FSAppend implements FileSystemAccess.FileSystemExecutor {
+ private InputStream is;
+ private Path path;
+
+ /**
+ * Creates an Append executor.
+ *
+ * @param is input stream to append.
+ * @param path path of the file to append.
+ */
+ public FSAppend(InputStream is, String path) {
+ this.is = is;
+ this.path = new Path(path);
+ }
+
+ /**
+ * Executes the filesystem operation.
+ *
+ * @param fs filesystem instance to use.
+ *
+ * @return void.
+ *
+ * @throws IOException thrown if an IO error occured.
+ */
+ @Override
+ public Void execute(FileSystem fs) throws IOException {
+ int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
+ OutputStream os = fs.append(path, bufferSize);
+ IOUtils.copyBytes(is, os, bufferSize, true);
+ os.close();
+ return null;
+ }
+
+ }
+
+ /**
+ * Executor that performs a content-summary FileSystemAccess files system operation.
+ */
+ public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor