diff --git a/.gitignore b/.gitignore index f223254b16..93e755ce9c 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ .settings target hadoop-hdfs-project/hadoop-hdfs/downloads +hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml new file mode 100644 index 0000000000..79bad49122 --- /dev/null +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml @@ -0,0 +1,60 @@ + + + + hadoop-httpfs-dist + + dir + + false + + + + ${basedir}/src/main/conf + /etc/hadoop + + * + + + + + ${basedir} + / + + *.txt + + + + ${basedir}/src/main/sbin + /sbin + + * + + 0755 + + + ${basedir}/src/main/libexec + /libexec + + * + + 0755 + + + + ${project.build.directory}/site + /share/doc/hadoop/httpfs + + + diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index daa82b4a8e..ab8bf4819d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -264,6 +264,11 @@ hadoop-auth compile + + com.googlecode.json-simple + json-simple + compile + diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 3c4bfc46a6..ed6b729a93 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -112,6 +112,7 @@ run cd hadoop-${project.version} run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* . + run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* . run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* . COMMON_LIB=share/hadoop/common/lib MODULES=../../../../modules diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt b/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt new file mode 100644 index 0000000000..c2f4d64e20 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/README.txt @@ -0,0 +1,17 @@ +----------------------------------------------------------------------------- +HttpFS - Hadoop HDFS over HTTP + +HttpFS is a server that provides a REST HTTP gateway to HDFS with full +filesystem read & write capabilities. + +HttpFS can be used to transfer data between clusters running different +versions of Hadoop (overcoming RPC versioning issues), for example using +Hadoop DistCP. + +HttpFS can be used to access data in HDFS on a cluster behind of a firewall +(the HttpFS server acts as a gateway and is the only system that is allowed +to cross the firewall into the cluster). + +HttpFS can be used to access data in HDFS using HTTP utilities (such as curl +and wget) and HTTP libraries Perl from other languages than Java. +----------------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml new file mode 100644 index 0000000000..8ae1563541 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -0,0 +1,530 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 0.24.0-SNAPSHOT + ../../hadoop-project + + org.apache.hadoop + hadoop-hdfs-httpfs + 0.24.0-SNAPSHOT + war + + Apache Hadoop HttpFS + Apache Hadoop HttpFS + + + 6.0.32 + REPO NOT AVAIL + REPO NOT AVAIL + REVISION NOT AVAIL + yyyy-MM-dd'T'HH:mm:ssZ + ${maven.build.timestamp} + + ${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/httpfs/tomcat + + + + + + junit + junit + test + + + org.mockito + mockito-all + test + + + com.sun.jersey + jersey-server + compile + + + javax.servlet + servlet-api + provided + + + javax.servlet.jsp + jsp-api + provided + + + commons-codec + commons-codec + compile + + + org.jdom + jdom + compile + + + com.googlecode.json-simple + json-simple + compile + + + org.apache.hadoop + hadoop-common + compile + + + javax.xml.stream + stax-api + + + commons-cli + commons-cli + + + commons-httpclient + commons-httpclient + + + tomcat + jasper-compiler + + + tomcat + jasper-runtime + + + javax.servlet + servlet-api + + + javax.servlet + jsp-api + + + javax.servlet.jsp + jsp-api + + + org.mortbay.jetty + jetty + + + org.mortbay.jetty + jetty-util + + + org.mortbay.jetty + jsp-api-2.1 + + + org.mortbay.jetty + servlet-api-2.5 + + + net.java.dev.jets3t + jets3t + + + hsqldb + hsqldb + + + org.eclipse.jdt + core + + + commons-el + commons-el + + + + + org.apache.hadoop + hadoop-hdfs + compile + + + commons-cli + commons-cli + + + commons-httpclient + commons-httpclient + + + tomcat + jasper-compiler + + + tomcat + jasper-runtime + + + javax.servlet + servlet-api + + + javax.servlet + jsp-api + + + javax.servlet.jsp + jsp-api + + + org.mortbay.jetty + jetty + + + org.mortbay.jetty + jetty-util + + + org.mortbay.jetty + jsp-api-2.1 + + + org.mortbay.jetty + servlet-api-2.5 + + + net.java.dev.jets3t + jets3t + + + hsqldb + hsqldb + + + org.eclipse.jdt + core + + + commons-el + commons-el + + + + + org.apache.hadoop + hadoop-common + test + test-jar + + + org.apache.hadoop + hadoop-hdfs + test + test-jar + + + log4j + log4j + compile + + + org.slf4j + slf4j-api + compile + + + org.slf4j + slf4j-log4j12 + compile + + + + + + + src/main/resources + true + + httpfs.properties + + + + src/main/resources + false + + httpfs.properties + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + 1 + + + + org.apache.maven.plugins + maven-javadoc-plugin + + + + javadoc + + site + + true + true + false + ${maven.compile.source} + ${maven.compile.encoding} + + + HttpFs API + * + + + + + + + + org.apache.maven.plugins + maven-project-info-reports-plugin + + + + false + + + dependencies + + site + + + + + org.apache.rat + apache-rat-plugin + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + create-web-xmls + generate-test-resources + + run + + + + + + + + + + + + + site + site + + run + + + + + + + + + + + org.apache.maven.plugins + maven-war-plugin + + + default-war + package + + war + + + webhdfs + ${project.build.directory}/webhdfs + + + + + + + + + + docs + + false + + + + + org.apache.maven.plugins + maven-site-plugin + + + docs + prepare-package + + site + + + + + + + + + + dist + + false + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + org.apache.hadoop + hadoop-assemblies + ${project.version} + + + + + dist + package + + single + + + ${project.artifactId}-${project.version} + false + false + + hadoop-httpfs-dist + + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + dist + + run + + package + + + + + + + + + + + which cygpath 2> /dev/null + if [ $? = 1 ]; then + BUILD_DIR="${project.build.directory}" + else + BUILD_DIR=`cygpath --unix '${project.build.directory}'` + fi + cd $BUILD_DIR/tomcat.exp + tar xzf ${basedir}/downloads/tomcat.tar.gz + + + + + + + + + + + + + + + + + + + + + + + tar + package + + run + + + + + + + which cygpath 2> /dev/null + if [ $? = 1 ]; then + BUILD_DIR="${project.build.directory}" + else + BUILD_DIR=`cygpath --unix '${project.build.directory}'` + fi + cd $BUILD_DIR + tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version} + + + + + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh new file mode 100644 index 0000000000..84c67b790a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# + +# Set httpfs specific environment variables here. + +# Settings for the Embedded Tomcat that runs HttpFS +# Java System properties for HttpFS should be specified in this variable +# +# export CATALINA_OPTS= + +# HttpFS logs directory +# +# export HTTPFS_LOG=${HTTPFS_HOME}/logs + +# HttpFS temporary directory +# +# export HTTPFS_TEMP=${HTTPFS_HOME}/temp + +# The HTTP port used by HttpFS +# +# export HTTPFS_HTTP_PORT=14000 + +# The Admin port used by HttpFS +# +# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1` + +# The hostname HttpFS server runs on +# +# export HTTPFS_HTTP_HOSTNAME=`hostname -f` diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties new file mode 100644 index 0000000000..284a81924c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-log4j.properties @@ -0,0 +1,35 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# + +# If the Java System property 'httpfs.log.dir' is not defined at HttpFSServer start up time +# Setup sets its value to '${httpfs.home}/logs' + +log4j.appender.httpfs=org.apache.log4j.DailyRollingFileAppender +log4j.appender.httpfs.DatePattern='.'yyyy-MM-dd +log4j.appender.httpfs.File=${httpfs.log.dir}/httpfs.log +log4j.appender.httpfs.Append=true +log4j.appender.httpfs.layout=org.apache.log4j.PatternLayout +log4j.appender.httpfs.layout.ConversionPattern=%d{ISO8601} %5p %c{1} [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n + +log4j.appender.httpfsaudit=org.apache.log4j.DailyRollingFileAppender +log4j.appender.httpfsaudit.DatePattern='.'yyyy-MM-dd +log4j.appender.httpfsaudit.File=${httpfs.log.dir}/httpfs-audit.log +log4j.appender.httpfsaudit.Append=true +log4j.appender.httpfsaudit.layout=org.apache.log4j.PatternLayout +log4j.appender.httpfsaudit.layout.ConversionPattern=%d{ISO8601} %5p [%X{hostname}][%X{user}:%X{doAs}] %X{op} %m%n + +log4j.logger.httpfsaudit=INFO, httpfsaudit + +log4j.logger.org.apache.hadoop.fs.http.server=INFO, httpfs +log4j.logger.org.apache.hadoop.lib=INFO, httpfs diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml new file mode 100644 index 0000000000..4a718e1668 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-site.xml @@ -0,0 +1,17 @@ + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java new file mode 100644 index 0000000000..520c7325fa --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -0,0 +1,863 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.http.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.Authenticator; +import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.json.simple.parser.ParseException; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.FileNotFoundException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.lang.reflect.Constructor; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.net.URLEncoder; +import java.text.MessageFormat; +import java.util.HashMap; +import java.util.Map; + +/** + * HttpFSServer implementation of the FileSystemAccess FileSystem. + *

+ * This implementation allows a user to access HDFS over HTTP via a HttpFSServer server. + */ +public class HttpFSFileSystem extends FileSystem { + + public static final String SERVICE_NAME = "/webhdfs"; + + public static final String SERVICE_VERSION = "/v1"; + + public static final String SERVICE_PREFIX = SERVICE_NAME + SERVICE_VERSION; + + public static final String OP_PARAM = "op"; + public static final String DO_AS_PARAM = "doas"; + public static final String OVERWRITE_PARAM = "overwrite"; + public static final String REPLICATION_PARAM = "replication"; + public static final String BLOCKSIZE_PARAM = "blocksize"; + public static final String PERMISSION_PARAM = "permission"; + public static final String DESTINATION_PARAM = "destination"; + public static final String RECURSIVE_PARAM = "recursive"; + public static final String OWNER_PARAM = "owner"; + public static final String GROUP_PARAM = "group"; + public static final String MODIFICATION_TIME_PARAM = "modificationtime"; + public static final String ACCESS_TIME_PARAM = "accesstime"; + public static final String RENEWER_PARAM = "renewer"; + + public static final String DEFAULT_PERMISSION = "default"; + + public static final String RENAME_JSON = "boolean"; + + public static final String DELETE_JSON = "boolean"; + + public static final String MKDIRS_JSON = "boolean"; + + public static final String HOME_DIR_JSON = "Path"; + + public static final String SET_REPLICATION_JSON = "boolean"; + + public static enum FILE_TYPE { + FILE, DIRECTORY, SYMLINK; + + public static FILE_TYPE getType(FileStatus fileStatus) { + if (fileStatus.isFile()) { + return FILE; + } + if (fileStatus.isDirectory()) { + return DIRECTORY; + } + if (fileStatus.isSymlink()) { + return SYMLINK; + } + throw new IllegalArgumentException("Could not determine filetype for: " + + fileStatus.getPath()); + } + } + + public static final String FILE_STATUSES_JSON = "FileStatuses"; + public static final String FILE_STATUS_JSON = "FileStatus"; + public static final String PATH_SUFFIX_JSON = "pathSuffix"; + public static final String TYPE_JSON = "type"; + public static final String LENGTH_JSON = "length"; + public static final String OWNER_JSON = "owner"; + public static final String GROUP_JSON = "group"; + public static final String PERMISSION_JSON = "permission"; + public static final String ACCESS_TIME_JSON = "accessTime"; + public static final String MODIFICATION_TIME_JSON = "modificationTime"; + public static final String BLOCK_SIZE_JSON = "blockSize"; + public static final String REPLICATION_JSON = "replication"; + + public static final String FILE_CHECKSUM_JSON = "FileChecksum"; + public static final String CHECKSUM_ALGORITHM_JSON = "algorithm"; + public static final String CHECKSUM_BYTES_JSON = "bytes"; + public static final String CHECKSUM_LENGTH_JSON = "length"; + + public static final String CONTENT_SUMMARY_JSON = "ContentSummary"; + public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount"; + public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount"; + public static final String CONTENT_SUMMARY_LENGTH_JSON = "length"; + public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota"; + public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed"; + public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota"; + + public static final String DELEGATION_TOKEN_JSON = "Token"; + public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString"; + + public static final String ERROR_JSON = "RemoteException"; + public static final String ERROR_EXCEPTION_JSON = "exception"; + public static final String ERROR_CLASSNAME_JSON = "javaClassName"; + public static final String ERROR_MESSAGE_JSON = "message"; + + public static final int HTTP_TEMPORARY_REDIRECT = 307; + + + /** + * Get operations. + */ + public enum GetOpValues { + OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIR, GETCONTENTSUMMARY, GETFILECHECKSUM, + GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION + } + + /** + * Post operations. + */ + public static enum PostOpValues { + APPEND + } + + /** + * Put operations. + */ + public static enum PutOpValues { + CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES, + RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN + } + + /** + * Delete operations. + */ + public static enum DeleteOpValues { + DELETE + } + + private static final String HTTP_GET = "GET"; + private static final String HTTP_PUT = "PUT"; + private static final String HTTP_POST = "POST"; + private static final String HTTP_DELETE = "DELETE"; + + private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); + private URI uri; + private Path workingDir; + private String doAs; + + /** + * Convenience method that creates a HttpURLConnection for the + * HttpFSServer file system operations. + *

+ * This methods performs and injects any needed authentication credentials + * via the {@link #getConnection(URL, String)} method + * + * @param method the HTTP method. + * @param params the query string parameters. + * @param path the file path + * @param makeQualified if the path should be 'makeQualified' + * + * @return a HttpURLConnection for the HttpFSServer server, + * authenticated and ready to use for the specified path and file system operation. + * + * @throws IOException thrown if an IO error occurrs. + */ + private HttpURLConnection getConnection(String method, Map params, + Path path, boolean makeQualified) throws IOException { + params.put(DO_AS_PARAM, doAs); + if (makeQualified) { + path = makeQualified(path); + } + URI uri = path.toUri(); + StringBuilder sb = new StringBuilder(); + sb.append(uri.getScheme()).append("://").append(uri.getAuthority()). + append(SERVICE_PREFIX).append(uri.getPath()); + + String separator = "?"; + for (Map.Entry entry : params.entrySet()) { + sb.append(separator).append(entry.getKey()).append("="). + append(URLEncoder.encode(entry.getValue(), "UTF8")); + separator = "&"; + } + URL url = new URL(sb.toString()); + return getConnection(url, method); + } + + /** + * Convenience method that creates a HttpURLConnection for the specified URL. + *

+ * This methods performs and injects any needed authentication credentials. + * + * @param url url to connect to. + * @param method the HTTP method. + * + * @return a HttpURLConnection for the HttpFSServer server, authenticated and ready to use for + * the specified path and file system operation. + * + * @throws IOException thrown if an IO error occurrs. + */ + private HttpURLConnection getConnection(URL url, String method) throws IOException { + Class klass = + getConf().getClass("httpfs.authenticator.class", HttpKerberosAuthenticator.class, Authenticator.class); + Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf()); + try { + HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken); + conn.setRequestMethod(method); + if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) { + conn.setDoOutput(true); + } + return conn; + } catch (Exception ex) { + throw new IOException(ex); + } + } + + /** + * Convenience method that JSON Parses the InputStream of a HttpURLConnection. + * + * @param conn the HttpURLConnection. + * + * @return the parsed JSON object. + * + * @throws IOException thrown if the InputStream could not be JSON parsed. + */ + private static Object jsonParse(HttpURLConnection conn) throws IOException { + try { + JSONParser parser = new JSONParser(); + return parser.parse(new InputStreamReader(conn.getInputStream())); + } catch (ParseException ex) { + throw new IOException("JSON parser error, " + ex.getMessage(), ex); + } + } + + /** + * Validates the status of an HttpURLConnection against an expected HTTP + * status code. If the current status code is not the expected one it throws an exception + * with a detail message using Server side error messages if available. + * + * @param conn the HttpURLConnection. + * @param expected the expected HTTP status code. + * + * @throws IOException thrown if the current status code does not match the expected one. + */ + private static void validateResponse(HttpURLConnection conn, int expected) throws IOException { + int status = conn.getResponseCode(); + if (status != expected) { + try { + JSONObject json = (JSONObject) jsonParse(conn); + json = (JSONObject) json.get(ERROR_JSON); + String message = (String) json.get(ERROR_MESSAGE_JSON); + String exception = (String) json.get(ERROR_EXCEPTION_JSON); + String className = (String) json.get(ERROR_CLASSNAME_JSON); + + try { + ClassLoader cl = HttpFSFileSystem.class.getClassLoader(); + Class klass = cl.loadClass(className); + Constructor constr = klass.getConstructor(String.class); + throw (IOException) constr.newInstance(message); + } catch (IOException ex) { + throw ex; + } catch (Exception ex) { + throw new IOException(MessageFormat.format("{0} - {1}", exception, message)); + } + } catch (IOException ex) { + if (ex.getCause() instanceof IOException) { + throw (IOException) ex.getCause(); + } + throw new IOException(MessageFormat.format("HTTP status [{0}], {1}", status, conn.getResponseMessage())); + } + } + } + + /** + * Called after a new FileSystem instance is constructed. + * + * @param name a uri whose authority section names the host, port, etc. for this FileSystem + * @param conf the configuration + */ + @Override + public void initialize(URI name, Configuration conf) throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + doAs = ugi.getUserName(); + super.initialize(name, conf); + try { + uri = new URI(name.getScheme() + "://" + name.getHost() + ":" + name.getPort()); + } catch (URISyntaxException ex) { + throw new IOException(ex); + } + } + + /** + * Returns a URI whose scheme and authority identify this FileSystem. + * + * @return the URI whose scheme and authority identify this FileSystem. + */ + @Override + public URI getUri() { + return uri; + } + + /** + * HttpFSServer subclass of the FSDataInputStream. + *

+ * This implementation does not support the + * PositionReadable and Seekable methods. + */ + private static class HttpFSDataInputStream extends FilterInputStream implements Seekable, PositionedReadable { + + protected HttpFSDataInputStream(InputStream in, int bufferSize) { + super(new BufferedInputStream(in, bufferSize)); + } + + @Override + public int read(long position, byte[] buffer, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long getPos() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + throw new UnsupportedOperationException(); + } + } + + /** + * Opens an FSDataInputStream at the indicated Path. + *

+ * IMPORTANT: the returned does not support the + * PositionReadable and Seekable methods. + * + * @param f the file name to open + * @param bufferSize the size of the buffer to be used. + */ + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.OPEN.toString()); + HttpURLConnection conn = getConnection(HTTP_GET, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize)); + } + + /** + * HttpFSServer subclass of the FSDataOutputStream. + *

+ * This implementation closes the underlying HTTP connection validating the Http connection status + * at closing time. + */ + private static class HttpFSDataOutputStream extends FSDataOutputStream { + private HttpURLConnection conn; + private int closeStatus; + + public HttpFSDataOutputStream(HttpURLConnection conn, OutputStream out, int closeStatus, Statistics stats) + throws IOException { + super(out, stats); + this.conn = conn; + this.closeStatus = closeStatus; + } + + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + validateResponse(conn, closeStatus); + } + } + + } + + /** + * Converts a FsPermission to a Unix octal representation. + * + * @param p the permission. + * + * @return the Unix string symbolic reprentation. + */ + public static String permissionToString(FsPermission p) { + return (p == null) ? DEFAULT_PERMISSION : Integer.toString(p.toShort(), 8); + } + + /* + * Common handling for uploading data for create and append operations. + */ + private FSDataOutputStream uploadData(String method, Path f, Map params, + int bufferSize, int expectedStatus) throws IOException { + HttpURLConnection conn = getConnection(method, params, f, true); + conn.setInstanceFollowRedirects(false); + boolean exceptionAlreadyHandled = false; + try { + if (conn.getResponseCode() == HTTP_TEMPORARY_REDIRECT) { + exceptionAlreadyHandled = true; + String location = conn.getHeaderField("Location"); + if (location != null) { + conn = getConnection(new URL(location), method); + conn.setRequestProperty("Content-Type", "application/octet-stream"); + try { + OutputStream os = new BufferedOutputStream(conn.getOutputStream(), bufferSize); + return new HttpFSDataOutputStream(conn, os, expectedStatus, statistics); + } catch (IOException ex) { + validateResponse(conn, expectedStatus); + throw ex; + } + } else { + validateResponse(conn, HTTP_TEMPORARY_REDIRECT); + throw new IOException("Missing HTTP 'Location' header for [" + conn.getURL() + "]"); + } + } else { + throw new IOException( + MessageFormat.format("Expected HTTP status was [307], received [{0}]", + conn.getResponseCode())); + } + } catch (IOException ex) { + if (exceptionAlreadyHandled) { + throw ex; + } else { + validateResponse(conn, HTTP_TEMPORARY_REDIRECT); + throw ex; + } + } + } + + + /** + * Opens an FSDataOutputStream at the indicated Path with write-progress + * reporting. + *

+ * IMPORTANT: The Progressable parameter is not used. + * + * @param f the file name to open. + * @param permission file permission. + * @param overwrite if a file with this name already exists, then if true, + * the file will be overwritten, and if false an error will be thrown. + * @param bufferSize the size of the buffer to be used. + * @param replication required block replication for the file. + * @param blockSize block size. + * @param progress progressable. + * + * @throws IOException + * @see #setPermission(Path, FsPermission) + */ + @Override + public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, + short replication, long blockSize, Progressable progress) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.CREATE.toString()); + params.put(OVERWRITE_PARAM, Boolean.toString(overwrite)); + params.put(REPLICATION_PARAM, Short.toString(replication)); + params.put(BLOCKSIZE_PARAM, Long.toString(blockSize)); + params.put(PERMISSION_PARAM, permissionToString(permission)); + return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED); + } + + + /** + * Append to an existing file (optional operation). + *

+ * IMPORTANT: The Progressable parameter is not used. + * + * @param f the existing file to be appended. + * @param bufferSize the size of the buffer to be used. + * @param progress for reporting progress if it is not null. + * + * @throws IOException + */ + @Override + public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PostOpValues.APPEND.toString()); + return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK); + } + + /** + * Renames Path src to Path dst. Can take place on local fs + * or remote DFS. + */ + @Override + public boolean rename(Path src, Path dst) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.RENAME.toString()); + params.put(DESTINATION_PARAM, dst.toString()); + HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + return (Boolean) json.get(RENAME_JSON); + } + + /** + * Delete a file. + * + * @deprecated Use delete(Path, boolean) instead + */ + @SuppressWarnings({"deprecation"}) + @Deprecated + @Override + public boolean delete(Path f) throws IOException { + return delete(f, false); + } + + /** + * Delete a file. + * + * @param f the path to delete. + * @param recursive if path is a directory and set to + * true, the directory is deleted else throws an exception. In + * case of a file the recursive can be set to either true or false. + * + * @return true if delete is successful else false. + * + * @throws IOException + */ + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, DeleteOpValues.DELETE.toString()); + params.put(RECURSIVE_PARAM, Boolean.toString(recursive)); + HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + return (Boolean) json.get(DELETE_JSON); + } + + /** + * List the statuses of the files/directories in the given path if the path is + * a directory. + * + * @param f given path + * + * @return the statuses of the files/directories in the given patch + * + * @throws IOException + */ + @Override + public FileStatus[] listStatus(Path f) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString()); + HttpURLConnection conn = getConnection(HTTP_GET, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + json = (JSONObject) json.get(FILE_STATUSES_JSON); + JSONArray jsonArray = (JSONArray) json.get(FILE_STATUS_JSON); + FileStatus[] array = new FileStatus[jsonArray.size()]; + f = makeQualified(f); + for (int i = 0; i < jsonArray.size(); i++) { + array[i] = createFileStatus(f, (JSONObject) jsonArray.get(i)); + } + return array; + } + + /** + * Set the current working directory for the given file system. All relative + * paths will be resolved relative to it. + * + * @param newDir new directory. + */ + @Override + public void setWorkingDirectory(Path newDir) { + workingDir = newDir; + } + + /** + * Get the current working directory for the given file system + * + * @return the directory pathname + */ + @Override + public Path getWorkingDirectory() { + if (workingDir == null) { + workingDir = getHomeDirectory(); + } + return workingDir; + } + + /** + * Make the given file and all non-existent parents into + * directories. Has the semantics of Unix 'mkdir -p'. + * Existence of the directory hierarchy is not an error. + */ + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.MKDIRS.toString()); + params.put(PERMISSION_PARAM, permissionToString(permission)); + HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + return (Boolean) json.get(MKDIRS_JSON); + } + + /** + * Return a file status object that represents the path. + * + * @param f The path we want information from + * + * @return a FileStatus object + * + * @throws FileNotFoundException when the path does not exist; + * IOException see specific implementation + */ + @Override + public FileStatus getFileStatus(Path f) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString()); + HttpURLConnection conn = getConnection(HTTP_GET, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + json = (JSONObject) json.get(FILE_STATUS_JSON); + f = makeQualified(f); + return createFileStatus(f, json); + } + + /** + * Return the current user's home directory in this filesystem. + * The default implementation returns "/user/$USER/". + */ + @Override + public Path getHomeDirectory() { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.GETHOMEDIR.toString()); + try { + HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + return new Path((String) json.get(HOME_DIR_JSON)); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + /** + * Set owner of a path (i.e. a file or a directory). + * The parameters username and groupname cannot both be null. + * + * @param p The path + * @param username If it is null, the original username remains unchanged. + * @param groupname If it is null, the original groupname remains unchanged. + */ + @Override + public void setOwner(Path p, String username, String groupname) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.SETOWNER.toString()); + params.put(OWNER_PARAM, username); + params.put(GROUP_PARAM, groupname); + HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + } + + /** + * Set permission of a path. + * + * @param p path. + * @param permission permission. + */ + @Override + public void setPermission(Path p, FsPermission permission) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString()); + params.put(PERMISSION_PARAM, permissionToString(permission)); + HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + } + + /** + * Set access time of a file + * + * @param p The path + * @param mtime Set the modification time of this file. + * The number of milliseconds since Jan 1, 1970. + * A value of -1 means that this call should not set modification time. + * @param atime Set the access time of this file. + * The number of milliseconds since Jan 1, 1970. + * A value of -1 means that this call should not set access time. + */ + @Override + public void setTimes(Path p, long mtime, long atime) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.SETTIMES.toString()); + params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime)); + params.put(ACCESS_TIME_PARAM, Long.toString(atime)); + HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + } + + /** + * Set replication for an existing file. + * + * @param src file name + * @param replication new replication + * + * @return true if successful; + * false if file does not exist or is a directory + * + * @throws IOException + */ + @Override + public boolean setReplication(Path src, short replication) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString()); + params.put(REPLICATION_PARAM, Short.toString(replication)); + HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) jsonParse(conn); + return (Boolean) json.get(SET_REPLICATION_JSON); + } + + /** + * Creates a FileStatus object using a JSON file-status payload + * received from a HttpFSServer server. + * + * @param json a JSON file-status payload received from a HttpFSServer server + * + * @return the corresponding FileStatus + */ + private FileStatus createFileStatus(Path parent, JSONObject json) { + String pathSuffix = (String) json.get(PATH_SUFFIX_JSON); + Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix); + FILE_TYPE type = FILE_TYPE.valueOf((String) json.get(TYPE_JSON)); + long len = (Long) json.get(LENGTH_JSON); + String owner = (String) json.get(OWNER_JSON); + String group = (String) json.get(GROUP_JSON); + FsPermission permission = + new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 8)); + long aTime = (Long) json.get(ACCESS_TIME_JSON); + long mTime = (Long) json.get(MODIFICATION_TIME_JSON); + long blockSize = (Long) json.get(BLOCK_SIZE_JSON); + short replication = ((Long) json.get(REPLICATION_JSON)).shortValue(); + FileStatus fileStatus = null; + + switch (type) { + case FILE: + case DIRECTORY: + fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY), + replication, blockSize, mTime, aTime, + permission, owner, group, path); + break; + case SYMLINK: + Path symLink = null; + fileStatus = new FileStatus(len, false, + replication, blockSize, mTime, aTime, + permission, owner, group, symLink, + path); + } + return fileStatus; + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString()); + HttpURLConnection conn = getConnection(HTTP_GET, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON); + return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON), + (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON), + (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON), + (Long) json.get(CONTENT_SUMMARY_QUOTA_JSON), + (Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON), + (Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON) + ); + } + + @Override + public FileChecksum getFileChecksum(Path f) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString()); + HttpURLConnection conn = getConnection(HTTP_GET, params, f, true); + validateResponse(conn, HttpURLConnection.HTTP_OK); + final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON); + return new FileChecksum() { + @Override + public String getAlgorithmName() { + return (String) json.get(CHECKSUM_ALGORITHM_JSON); + } + + @Override + public int getLength() { + return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue(); + } + + @Override + public byte[] getBytes() { + return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON)); + } + + @Override + public void write(DataOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void readFields(DataInput in) throws IOException { + throw new UnsupportedOperationException(); + } + }; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java new file mode 100644 index 0000000000..8f781bbb76 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpKerberosAuthenticator.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.client; + + +import org.apache.hadoop.security.authentication.client.Authenticator; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; + +/** + * A KerberosAuthenticator subclass that fallback to + * {@link HttpPseudoAuthenticator}. + */ +public class HttpKerberosAuthenticator extends KerberosAuthenticator { + + /** + * Returns the fallback authenticator if the server does not use + * Kerberos SPNEGO HTTP authentication. + * + * @return a {@link HttpPseudoAuthenticator} instance. + */ + @Override + protected Authenticator getFallBackAuthenticator() { + return new HttpPseudoAuthenticator(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java new file mode 100644 index 0000000000..9ac75a0aec --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpPseudoAuthenticator.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.client; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; + +import java.io.IOException; + +/** + * A PseudoAuthenticator subclass that uses FileSystemAccess's + * UserGroupInformation to obtain the client user name (the UGI's login user). + */ +public class HttpPseudoAuthenticator extends PseudoAuthenticator { + + /** + * Return the client user name. + * + * @return the client user name. + */ + @Override + protected String getUserName() { + try { + return UserGroupInformation.getLoginUser().getUserName(); + } catch (IOException ex) { + throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java new file mode 100644 index 0000000000..cc33e0af2c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/AuthFilter.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; + +import javax.servlet.FilterConfig; +import java.util.Map; +import java.util.Properties; + +/** + * Subclass of Alfredo's AuthenticationFilter that obtains its configuration + * from HttpFSServer's server configuration. + */ +public class AuthFilter extends AuthenticationFilter { + private static final String CONF_PREFIX = "httpfs.authentication."; + + /** + * Returns the Alfredo configuration from HttpFSServer's configuration. + *

+ * It returns all HttpFSServer's configuration properties prefixed with + * httpfs.authentication. The httpfs.authentication + * prefix is removed from the returned property names. + * + * @param configPrefix parameter not used. + * @param filterConfig parameter not used. + * + * @return Alfredo configuration read from HttpFSServer's configuration. + */ + @Override + protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) { + Properties props = new Properties(); + Configuration conf = HttpFSServerWebApp.get().getConfig(); + + props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); + for (Map.Entry entry : conf) { + String name = entry.getKey(); + if (name.startsWith(CONF_PREFIX)) { + String value = conf.get(name); + name = name.substring(CONF_PREFIX.length()); + props.setProperty(name, value); + } + } + return props; + } + + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java new file mode 100644 index 0000000000..b0d8a944da --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -0,0 +1,717 @@ +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.GlobFilter; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * FileSystem operation executors used by {@link HttpFSServer}. + */ +public class FSOperations { + + /** + * Converts a Unix permission octal & symbolic representation + * (i.e. 655 or -rwxr--r--) into a FileSystemAccess permission. + * + * @param str Unix permission symbolic representation. + * + * @return the FileSystemAccess permission. If the given string was + * 'default', it returns FsPermission.getDefault(). + */ + private static FsPermission getPermission(String str) { + FsPermission permission; + if (str.equals(HttpFSFileSystem.DEFAULT_PERMISSION)) { + permission = FsPermission.getDefault(); + } else if (str.length() == 3) { + permission = new FsPermission(Short.parseShort(str, 8)); + } else { + permission = FsPermission.valueOf(str); + } + return permission; + } + + @SuppressWarnings({"unchecked", "deprecation"}) + private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) { + Map json = new LinkedHashMap(); + json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName()); + json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString()); + json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen()); + json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner()); + json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup()); + json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission())); + json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime()); + json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime()); + json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize()); + json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication()); + return json; + } + + /** + * Converts a FileSystemAccess FileStatus object into a JSON + * object. + * + * @param status FileSystemAccess file status. + * + * @return The JSON representation of the file status. + */ + @SuppressWarnings({"unchecked", "deprecation"}) + private static Map fileStatusToJSON(FileStatus status) { + Map json = new LinkedHashMap(); + json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true)); + return json; + } + + /** + * Converts a FileChecksum object into a JSON array + * object. + * + * @param checksum file checksum. + * + * @return The JSON representation of the file checksum. + */ + @SuppressWarnings({"unchecked"}) + private static Map fileChecksumToJSON(FileChecksum checksum) { + Map json = new LinkedHashMap(); + json.put(HttpFSFileSystem.CHECKSUM_ALGORITHM_JSON, checksum.getAlgorithmName()); + json.put(HttpFSFileSystem.CHECKSUM_BYTES_JSON, + org.apache.hadoop.util.StringUtils.byteToHexString(checksum.getBytes())); + json.put(HttpFSFileSystem.CHECKSUM_LENGTH_JSON, checksum.getLength()); + Map response = new LinkedHashMap(); + response.put(HttpFSFileSystem.FILE_CHECKSUM_JSON, json); + return response; + } + + /** + * Converts a ContentSummary object into a JSON array + * object. + * + * @param contentSummary the content summary + * + * @return The JSON representation of the content summary. + */ + @SuppressWarnings({"unchecked"}) + private static Map contentSummaryToJSON(ContentSummary contentSummary) { + Map json = new LinkedHashMap(); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota()); + Map response = new LinkedHashMap(); + response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json); + return response; + } + + /** + * Converts a FileSystemAccess FileStatus array into a JSON array + * object. + * + * @param status FileSystemAccess file status array. + * SCHEME://HOST:PORT in the file status. + * + * @return The JSON representation of the file status array. + */ + @SuppressWarnings("unchecked") + private static Map fileStatusToJSON(FileStatus[] status) { + JSONArray json = new JSONArray(); + if (status != null) { + for (FileStatus s : status) { + json.add(fileStatusToJSONRaw(s, false)); + } + } + Map response = new LinkedHashMap(); + Map temp = new LinkedHashMap(); + temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json); + response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp); + return response; + } + + /** + * Converts an object into a Json Map with with one key-value entry. + *

+ * It assumes the given value is either a JSON primitive type or a + * JsonAware instance. + * + * @param name name for the key of the entry. + * @param value for the value of the entry. + * + * @return the JSON representation of the key-value pair. + */ + @SuppressWarnings("unchecked") + private static JSONObject toJSON(String name, Object value) { + JSONObject json = new JSONObject(); + json.put(name, value); + return json; + } + + /** + * Executor that performs an append FileSystemAccess files system operation. + */ + public static class FSAppend implements FileSystemAccess.FileSystemExecutor { + private InputStream is; + private Path path; + + /** + * Creates an Append executor. + * + * @param is input stream to append. + * @param path path of the file to append. + */ + public FSAppend(InputStream is, String path) { + this.is = is; + this.path = new Path(path); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return void. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096); + OutputStream os = fs.append(path, bufferSize); + IOUtils.copyBytes(is, os, bufferSize, true); + os.close(); + return null; + } + + } + + /** + * Executor that performs a content-summary FileSystemAccess files system operation. + */ + public static class FSContentSummary implements FileSystemAccess.FileSystemExecutor { + private Path path; + + /** + * Creates a content-summary executor. + * + * @param path the path to retrieve the content-summary. + */ + public FSContentSummary(String path) { + this.path = new Path(path); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return a Map object (JSON friendly) with the content-summary. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Map execute(FileSystem fs) throws IOException { + ContentSummary contentSummary = fs.getContentSummary(path); + return contentSummaryToJSON(contentSummary); + } + + } + + /** + * Executor that performs a create FileSystemAccess files system operation. + */ + public static class FSCreate implements FileSystemAccess.FileSystemExecutor { + private InputStream is; + private Path path; + private String permission; + private boolean override; + private short replication; + private long blockSize; + + /** + * Creates a Create executor. + * + * @param is input stream to for the file to create. + * @param path path of the file to create. + * @param perm permission for the file. + * @param override if the file should be overriden if it already exist. + * @param repl the replication factor for the file. + * @param blockSize the block size for the file. + */ + public FSCreate(InputStream is, String path, String perm, boolean override, short repl, long blockSize) { + this.is = is; + this.path = new Path(path); + this.permission = perm; + this.override = override; + this.replication = repl; + this.blockSize = blockSize; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return The URI of the created file. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + if (replication == -1) { + replication = (short) fs.getConf().getInt("dfs.replication", 3); + } + if (blockSize == -1) { + blockSize = fs.getConf().getInt("dfs.block.size", 67108864); + } + FsPermission fsPermission = getPermission(permission); + int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096); + OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null); + IOUtils.copyBytes(is, os, bufferSize, true); + os.close(); + return null; + } + + } + + /** + * Executor that performs a delete FileSystemAccess files system operation. + */ + public static class FSDelete implements FileSystemAccess.FileSystemExecutor { + private Path path; + private boolean recursive; + + /** + * Creates a Delete executor. + * + * @param path path to delete. + * @param recursive if the delete should be recursive or not. + */ + public FSDelete(String path, boolean recursive) { + this.path = new Path(path); + this.recursive = recursive; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return true if the delete operation was successful, + * false otherwise. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public JSONObject execute(FileSystem fs) throws IOException { + boolean deleted = fs.delete(path, recursive); + return toJSON(HttpFSFileSystem.DELETE_JSON.toLowerCase(), deleted); + } + + } + + /** + * Executor that performs a file-checksum FileSystemAccess files system operation. + */ + public static class FSFileChecksum implements FileSystemAccess.FileSystemExecutor { + private Path path; + + /** + * Creates a file-checksum executor. + * + * @param path the path to retrieve the checksum. + */ + public FSFileChecksum(String path) { + this.path = new Path(path); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return a Map object (JSON friendly) with the file checksum. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Map execute(FileSystem fs) throws IOException { + FileChecksum checksum = fs.getFileChecksum(path); + return fileChecksumToJSON(checksum); + } + + } + + /** + * Executor that performs a file-status FileSystemAccess files system operation. + */ + public static class FSFileStatus implements FileSystemAccess.FileSystemExecutor { + private Path path; + + /** + * Creates a file-status executor. + * + * @param path the path to retrieve the status. + */ + public FSFileStatus(String path) { + this.path = new Path(path); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return a Map object (JSON friendly) with the file status. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Map execute(FileSystem fs) throws IOException { + FileStatus status = fs.getFileStatus(path); + return fileStatusToJSON(status); + } + + } + + /** + * Executor that performs a home-dir FileSystemAccess files system operation. + */ + public static class FSHomeDir implements FileSystemAccess.FileSystemExecutor { + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return a JSON object with the user home directory. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + @SuppressWarnings("unchecked") + public JSONObject execute(FileSystem fs) throws IOException { + Path homeDir = fs.getHomeDirectory(); + JSONObject json = new JSONObject(); + json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath()); + return json; + } + + } + + /** + * Executor that performs a list-status FileSystemAccess files system operation. + */ + public static class FSListStatus implements FileSystemAccess.FileSystemExecutor, PathFilter { + private Path path; + private PathFilter filter; + + /** + * Creates a list-status executor. + * + * @param path the directory to retrieve the status of its contents. + * @param filter glob filter to use. + * + * @throws IOException thrown if the filter expression is incorrect. + */ + public FSListStatus(String path, String filter) throws IOException { + this.path = new Path(path); + this.filter = (filter == null) ? this : new GlobFilter(filter); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return a Map with the file status of the directory + * contents. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Map execute(FileSystem fs) throws IOException { + FileStatus[] status = fs.listStatus(path, filter); + return fileStatusToJSON(status); + } + + @Override + public boolean accept(Path path) { + return true; + } + + } + + /** + * Executor that performs a mkdirs FileSystemAccess files system operation. + */ + public static class FSMkdirs implements FileSystemAccess.FileSystemExecutor { + + private Path path; + private String permission; + + /** + * Creates a mkdirs executor. + * + * @param path directory path to create. + * @param permission permission to use. + */ + public FSMkdirs(String path, String permission) { + this.path = new Path(path); + this.permission = permission; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return true if the mkdirs operation was successful, + * false otherwise. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public JSONObject execute(FileSystem fs) throws IOException { + FsPermission fsPermission = getPermission(permission); + boolean mkdirs = fs.mkdirs(path, fsPermission); + return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs); + } + + } + + /** + * Executor that performs a open FileSystemAccess files system operation. + */ + public static class FSOpen implements FileSystemAccess.FileSystemExecutor { + private Path path; + + /** + * Creates a open executor. + * + * @param path file to open. + */ + public FSOpen(String path) { + this.path = new Path(path); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return The inputstream of the file. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public InputStream execute(FileSystem fs) throws IOException { + int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096); + return fs.open(path, bufferSize); + } + + } + + /** + * Executor that performs a rename FileSystemAccess files system operation. + */ + public static class FSRename implements FileSystemAccess.FileSystemExecutor { + private Path path; + private Path toPath; + + /** + * Creates a rename executor. + * + * @param path path to rename. + * @param toPath new name. + */ + public FSRename(String path, String toPath) { + this.path = new Path(path); + this.toPath = new Path(toPath); + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return true if the rename operation was successful, + * false otherwise. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public JSONObject execute(FileSystem fs) throws IOException { + boolean renamed = fs.rename(path, toPath); + return toJSON(HttpFSFileSystem.RENAME_JSON, renamed); + } + + } + + /** + * Executor that performs a set-owner FileSystemAccess files system operation. + */ + public static class FSSetOwner implements FileSystemAccess.FileSystemExecutor { + private Path path; + private String owner; + private String group; + + /** + * Creates a set-owner executor. + * + * @param path the path to set the owner. + * @param owner owner to set. + * @param group group to set. + */ + public FSSetOwner(String path, String owner, String group) { + this.path = new Path(path); + this.owner = owner; + this.group = group; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return void. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + fs.setOwner(path, owner, group); + return null; + } + + } + + /** + * Executor that performs a set-permission FileSystemAccess files system operation. + */ + public static class FSSetPermission implements FileSystemAccess.FileSystemExecutor { + + private Path path; + private String permission; + + /** + * Creates a set-permission executor. + * + * @param path path to set the permission. + * @param permission permission to set. + */ + public FSSetPermission(String path, String permission) { + this.path = new Path(path); + this.permission = permission; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return void. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + FsPermission fsPermission = getPermission(permission); + fs.setPermission(path, fsPermission); + return null; + } + + } + + /** + * Executor that performs a set-replication FileSystemAccess files system operation. + */ + public static class FSSetReplication implements FileSystemAccess.FileSystemExecutor { + private Path path; + private short replication; + + /** + * Creates a set-replication executor. + * + * @param path path to set the replication factor. + * @param replication replication factor to set. + */ + public FSSetReplication(String path, short replication) { + this.path = new Path(path); + this.replication = replication; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return true if the replication value was set, + * false otherwise. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + @SuppressWarnings("unchecked") + public JSONObject execute(FileSystem fs) throws IOException { + boolean ret = fs.setReplication(path, replication); + JSONObject json = new JSONObject(); + json.put(HttpFSFileSystem.SET_REPLICATION_JSON, ret); + return json; + } + + } + + /** + * Executor that performs a set-times FileSystemAccess files system operation. + */ + public static class FSSetTimes implements FileSystemAccess.FileSystemExecutor { + private Path path; + private long mTime; + private long aTime; + + /** + * Creates a set-times executor. + * + * @param path path to set the times. + * @param mTime modified time to set. + * @param aTime access time to set. + */ + public FSSetTimes(String path, long mTime, long aTime) { + this.path = new Path(path); + this.mTime = mTime; + this.aTime = aTime; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return void. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public Void execute(FileSystem fs) throws IOException { + fs.setTimes(path, mTime, aTime); + return null; + } + + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java new file mode 100644 index 0000000000..26dff496dd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.lib.service.FileSystemAccessException; +import org.apache.hadoop.lib.wsrs.ExceptionProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.Provider; +import java.io.FileNotFoundException; +import java.io.IOException; + +/** + * JAX-RS ExceptionMapper implementation that maps HttpFSServer's + * exceptions to HTTP status codes. + */ +@Provider +public class HttpFSExceptionProvider extends ExceptionProvider { + private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit"); + private static Logger LOG = LoggerFactory.getLogger(HttpFSExceptionProvider.class); + + /** + * Maps different exceptions thrown by HttpFSServer to HTTP status codes. + *

+ *

    + *
  • SecurityException : HTTP UNAUTHORIZED
  • + *
  • FileNotFoundException : HTTP NOT_FOUND
  • + *
  • IOException : INTERNAL_HTTP SERVER_ERROR
  • + *
  • UnsupporteOperationException : HTTP BAD_REQUEST
  • + *
  • all other exceptions : HTTP INTERNAL_SERVER_ERROR
  • + *
+ * + * @param throwable exception thrown. + * + * @return mapped HTTP status code + */ + @Override + public Response toResponse(Throwable throwable) { + Response.Status status; + if (throwable instanceof FileSystemAccessException) { + throwable = throwable.getCause(); + } + if (throwable instanceof SecurityException) { + status = Response.Status.UNAUTHORIZED; + } else if (throwable instanceof FileNotFoundException) { + status = Response.Status.NOT_FOUND; + } else if (throwable instanceof IOException) { + status = Response.Status.INTERNAL_SERVER_ERROR; + } else if (throwable instanceof UnsupportedOperationException) { + status = Response.Status.BAD_REQUEST; + } else { + status = Response.Status.INTERNAL_SERVER_ERROR; + } + return createResponse(status, throwable); + } + + /** + * Logs the HTTP status code and exception in HttpFSServer's log. + * + * @param status HTTP status code. + * @param throwable exception thrown. + */ + @Override + protected void log(Response.Status status, Throwable throwable) { + String method = MDC.get("method"); + String path = MDC.get("path"); + String message = getOneLineMessage(throwable); + AUDIT_LOG.warn("FAILED [{}:{}] response [{}] {}", new Object[]{method, path, status, message}); + LOG.warn("[{}:{}] response [{}] {}", new Object[]{method, path, status, message, throwable}); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java new file mode 100644 index 0000000000..e24ebd54cf --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java @@ -0,0 +1,536 @@ +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.apache.hadoop.lib.wsrs.BooleanParam; +import org.apache.hadoop.lib.wsrs.EnumParam; +import org.apache.hadoop.lib.wsrs.LongParam; +import org.apache.hadoop.lib.wsrs.ShortParam; +import org.apache.hadoop.lib.wsrs.StringParam; +import org.apache.hadoop.lib.wsrs.UserProvider; +import org.slf4j.MDC; + +import java.util.regex.Pattern; + +/** + * HttpFS HTTP Parameters used by {@link HttpFSServer}. + */ +public class HttpFSParams { + + /** + * To avoid instantiation. + */ + private HttpFSParams() { + } + + /** + * Class for access-time parameter. + */ + public static class AccessTimeParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "-1"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public AccessTimeParam(String str) { + super(NAME, str); + } + } + + /** + * Class for block-size parameter. + */ + public static class BlockSizeParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "-1"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public BlockSizeParam(String str) { + super(NAME, str); + } + } + + /** + * Class for data parameter. + */ + public static class DataParam extends BooleanParam { + + /** + * Parameter name. + */ + public static final String NAME = "data"; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "false"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public DataParam(String str) { + super(NAME, str); + } + } + + /** + * Class for DELETE operation parameter. + */ + public static class DeleteOpParam extends EnumParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OP_PARAM; + + /** + * Constructor. + * + * @param str parameter value. + */ + public DeleteOpParam(String str) { + super(NAME, str, HttpFSFileSystem.DeleteOpValues.class); + } + } + + /** + * Class for delete's recursive parameter. + */ + public static class DeleteRecursiveParam extends BooleanParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "false"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public DeleteRecursiveParam(String str) { + super(NAME, str); + } + } + + /** + * Class for do-as parameter. + */ + public static class DoAsParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.DO_AS_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = ""; + + /** + * Constructor. + * + * @param str parameter value. + */ + public DoAsParam(String str) { + super(NAME, str, UserProvider.USER_PATTERN); + } + + /** + * Delegates to parent and then adds do-as user to + * MDC context for logging purposes. + * + * @param name parameter name. + * @param str parameter value. + * + * @return parsed parameter + */ + @Override + public String parseParam(String name, String str) { + String doAs = super.parseParam(name, str); + MDC.put(NAME, (doAs != null) ? doAs : "-"); + return doAs; + } + } + + /** + * Class for filter parameter. + */ + public static class FilterParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = "filter"; + + /** + * Default parameter value. + */ + public static final String DEFAULT = ""; + + /** + * Constructor. + * + * @param expr parameter value. + */ + public FilterParam(String expr) { + super(NAME, expr); + } + + } + + /** + * Class for path parameter. + */ + public static class FsPathParam extends StringParam { + + /** + * Constructor. + * + * @param path parameter value. + */ + public FsPathParam(String path) { + super("path", path); + } + + /** + * Makes the path absolute adding '/' to it. + *

+ * This is required because JAX-RS resolution of paths does not add + * the root '/'. + * + * @returns absolute path. + */ + public void makeAbsolute() { + String path = value(); + path = "/" + ((path != null) ? path : ""); + setValue(path); + } + + } + + /** + * Class for GET operation parameter. + */ + public static class GetOpParam extends EnumParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OP_PARAM; + + /** + * Constructor. + * + * @param str parameter value. + */ + public GetOpParam(String str) { + super(NAME, str, HttpFSFileSystem.GetOpValues.class); + } + } + + /** + * Class for group parameter. + */ + public static class GroupParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.GROUP_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = ""; + + /** + * Constructor. + * + * @param str parameter value. + */ + public GroupParam(String str) { + super(NAME, str, UserProvider.USER_PATTERN); + } + + } + + /** + * Class for len parameter. + */ + public static class LenParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = "len"; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "-1"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public LenParam(String str) { + super(NAME, str); + } + } + + /** + * Class for modified-time parameter. + */ + public static class ModifiedTimeParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "-1"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public ModifiedTimeParam(String str) { + super(NAME, str); + } + } + + /** + * Class for offset parameter. + */ + public static class OffsetParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = "offset"; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "0"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public OffsetParam(String str) { + super(NAME, str); + } + } + + /** + * Class for overwrite parameter. + */ + public static class OverwriteParam extends BooleanParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "true"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public OverwriteParam(String str) { + super(NAME, str); + } + } + + /** + * Class for owner parameter. + */ + public static class OwnerParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OWNER_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = ""; + + /** + * Constructor. + * + * @param str parameter value. + */ + public OwnerParam(String str) { + super(NAME, str, UserProvider.USER_PATTERN); + } + + } + + /** + * Class for permission parameter. + */ + public static class PermissionParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION; + + + /** + * Symbolic Unix permissions regular expression pattern. + */ + private static final Pattern PERMISSION_PATTERN = + Pattern.compile(DEFAULT + "|(-[-r][-w][-x][-r][-w][-x][-r][-w][-x])" + "|[0-7][0-7][0-7]"); + + /** + * Constructor. + * + * @param permission parameter value. + */ + public PermissionParam(String permission) { + super(NAME, permission.toLowerCase(), PERMISSION_PATTERN); + } + + } + + /** + * Class for POST operation parameter. + */ + public static class PostOpParam extends EnumParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OP_PARAM; + + /** + * Constructor. + * + * @param str parameter value. + */ + public PostOpParam(String str) { + super(NAME, str, HttpFSFileSystem.PostOpValues.class); + } + } + + /** + * Class for PUT operation parameter. + */ + public static class PutOpParam extends EnumParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.OP_PARAM; + + /** + * Constructor. + * + * @param str parameter value. + */ + public PutOpParam(String str) { + super(NAME, str, HttpFSFileSystem.PutOpValues.class); + } + } + + /** + * Class for replication parameter. + */ + public static class ReplicationParam extends ShortParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = "-1"; + + /** + * Constructor. + * + * @param str parameter value. + */ + public ReplicationParam(String str) { + super(NAME, str); + } + } + + /** + * Class for to-path parameter. + */ + public static class ToPathParam extends StringParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM; + + /** + * Default parameter value. + */ + public static final String DEFAULT = ""; + + /** + * Constructor. + * + * @param path parameter value. + */ + public ToPathParam(String path) { + super(NAME, path); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java new file mode 100644 index 0000000000..7d2711d2be --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSReleaseFilter.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter; + +/** + * Filter that releases FileSystemAccess filesystem instances upon HTTP request + * completion. + */ +public class HttpFSReleaseFilter extends FileSystemReleaseFilter { + + /** + * Returns the {@link FileSystemAccess} service to return the FileSystemAccess filesystem + * instance to. + * + * @return the FileSystemAccess service. + */ + @Override + protected FileSystemAccess getFileSystemAccess() { + return HttpFSServerWebApp.get().get(FileSystemAccess.class); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java new file mode 100644 index 0000000000..aebaed8d1d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -0,0 +1,604 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam; +import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam; +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.apache.hadoop.lib.service.FileSystemAccessException; +import org.apache.hadoop.lib.service.Groups; +import org.apache.hadoop.lib.service.Instrumentation; +import org.apache.hadoop.lib.service.ProxyUser; +import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter; +import org.apache.hadoop.lib.servlet.HostnameFilter; +import org.apache.hadoop.lib.wsrs.InputStreamEntity; +import org.json.simple.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriBuilder; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.security.AccessControlException; +import java.security.Principal; +import java.text.MessageFormat; +import java.util.List; +import java.util.Map; + +/** + * Main class of HttpFSServer server. + *

+ * The HttpFSServer class uses Jersey JAX-RS to binds HTTP requests to the + * different operations. + */ +@Path(HttpFSFileSystem.SERVICE_VERSION) +public class HttpFSServer { + private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit"); + + /** + * Special binding for '/' as it is not handled by the wildcard binding. + * + * @param user principal making the request. + * @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}. + * @param filter Glob filter, default value is none. Used only if the + * operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS} + * @param doAs user being impersonated, defualt value is none. It can be used + * only if the current user is a HttpFSServer proxyuser. + * + * @return the request response + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + @GET + @Path("/") + @Produces(MediaType.APPLICATION_JSON) + public Response root(@Context Principal user, + @QueryParam(GetOpParam.NAME) GetOpParam op, + @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter, + @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs) + throws IOException, FileSystemAccessException { + return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT), + new LenParam(LenParam.DEFAULT), filter, doAs, + new OverwriteParam(OverwriteParam.DEFAULT), + new BlockSizeParam(BlockSizeParam.DEFAULT), + new PermissionParam(PermissionParam.DEFAULT), + new ReplicationParam(ReplicationParam.DEFAULT)); + } + + /** + * Resolves the effective user that will be used to request a FileSystemAccess filesystem. + *

+ * If the doAs-user is NULL or the same as the user, it returns the user. + *

+ * Otherwise it uses proxyuser rules (see {@link ProxyUser} to determine if the + * current user can impersonate the doAs-user. + *

+ * If the current user cannot impersonate the doAs-user an + * AccessControlException will be thrown. + * + * @param user principal for whom the filesystem instance is. + * @param doAs do-as user, if any. + * + * @return the effective user. + * + * @throws IOException thrown if an IO error occurrs. + * @throws AccessControlException thrown if the current user cannot impersonate + * the doAs-user. + */ + private String getEffectiveUser(Principal user, String doAs) throws IOException { + String effectiveUser = user.getName(); + if (doAs != null && !doAs.equals(user.getName())) { + ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class); + proxyUser.validate(user.getName(), HostnameFilter.get(), doAs); + effectiveUser = doAs; + AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", user.getName(), doAs); + } + return effectiveUser; + } + + /** + * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective + * user. + * + * @param user principal making the request. + * @param doAs do-as user, if any. + * @param executor FileSystemExecutor to execute. + * + * @return FileSystemExecutor response + * + * @throws IOException thrown if an IO error occurrs. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + private T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystemExecutor executor) + throws IOException, FileSystemAccessException { + String hadoopUser = getEffectiveUser(user, doAs); + FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); + Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration(); + return fsAccess.execute(hadoopUser, conf, executor); + } + + /** + * Returns a filesystem instance. The fileystem instance is wired for release at the completion of + * the current Servlet request via the {@link FileSystemReleaseFilter}. + *

+ * If a do-as user is specified, the current user must be a valid proxyuser, otherwise an + * AccessControlException will be thrown. + * + * @param user principal for whom the filesystem instance is. + * @param doAs do-as user, if any. + * + * @return a filesystem for the specified user or do-as user. + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException { + String hadoopUser = getEffectiveUser(user, doAs); + FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); + Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration(); + FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf); + FileSystemReleaseFilter.setFileSystem(fs); + return fs; + } + + /** + * Binding to handle all GET requests, supported operations are + * {@link HttpFSFileSystem.GetOpValues}. + *

+ * The {@link HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only + * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns + * HttpFSServer instrumentation data. The specified path must be '/'. + * + * @param user principal making the request. + * @param path path for the GET request. + * @param op GET operation, default value is {@link HttpFSFileSystem.GetOpValues#OPEN}. + * @param offset of the file being fetch, used only with + * {@link HttpFSFileSystem.GetOpValues#OPEN} operations. + * @param len amounts of bytes, used only with {@link HttpFSFileSystem.GetOpValues#OPEN} + * operations. + * @param filter Glob filter, default value is none. Used only if the + * operation is {@link HttpFSFileSystem.GetOpValues#LISTSTATUS} + * @param doAs user being impersonated, defualt value is none. It can be used + * only if the current user is a HttpFSServer proxyuser. + * @param override, default is true. Used only for + * {@link HttpFSFileSystem.PutOpValues#CREATE} operations. + * @param blockSize block size to set, used only by + * {@link HttpFSFileSystem.PutOpValues#CREATE} operations. + * @param permission permission to set, used only by + * {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}. + * @param replication replication factor to set, used only by + * {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}. + * + * @return the request response. + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + @GET + @Path("{path:.*}") + @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) + public Response get(@Context Principal user, + @PathParam("path") @DefaultValue("") FsPathParam path, + @QueryParam(GetOpParam.NAME) GetOpParam op, + @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset, + @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len, + @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter, + @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs, + + //these params are only for createHandle operation acceptance purposes + @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override, + @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize, + @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT) + PermissionParam permission, + @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT) + ReplicationParam replication + ) + throws IOException, FileSystemAccessException { + Response response = null; + if (op == null) { + throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME)); + } else { + path.makeAbsolute(); + MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); + switch (op.value()) { + case OPEN: { + //Invoking the command directly using an unmanaged FileSystem that is released by the + //FileSystemReleaseFilter + FSOperations.FSOpen command = new FSOperations.FSOpen(path.value()); + FileSystem fs = createFileSystem(user, doAs.value()); + InputStream is = command.execute(fs); + AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len}); + InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value()); + response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build(); + break; + } + case GETFILESTATUS: { + FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value()); + Map json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case LISTSTATUS: { + FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value()); + Map json = fsExecute(user, doAs.value(), command); + if (filter.value() == null) { + AUDIT_LOG.info("[{}]", path); + } else { + AUDIT_LOG.info("[{}] filter [{}]", path, filter.value()); + } + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case GETHOMEDIR: { + FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); + JSONObject json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info(""); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case INSTRUMENTATION: { + if (!path.value().equals("/")) { + throw new UnsupportedOperationException( + MessageFormat.format("Invalid path for {0}={1}, must be '/'", + GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION)); + } + Groups groups = HttpFSServerWebApp.get().get(Groups.class); + List userGroups = groups.getGroups(user.getName()); + if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { + throw new AccessControlException("User not in HttpFSServer admin group"); + } + Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); + Map snapshot = instrumentation.getSnapshot(); + response = Response.ok(snapshot).build(); + break; + } + case GETCONTENTSUMMARY: { + FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value()); + Map json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case GETFILECHECKSUM: { + FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value()); + Map json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case GETDELEGATIONTOKEN: { + response = Response.status(Response.Status.BAD_REQUEST).build(); + break; + } + case GETFILEBLOCKLOCATIONS: { + response = Response.status(Response.Status.BAD_REQUEST).build(); + break; + } + } + return response; + } + } + + /** + * Creates the URL for an upload operation (create or append). + * + * @param uriInfo uri info of the request. + * @param uploadOperation operation for the upload URL. + * + * @return the URI for uploading data. + */ + protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum uploadOperation) { + UriBuilder uriBuilder = uriInfo.getRequestUriBuilder(); + uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation). + queryParam(DataParam.NAME, Boolean.TRUE); + return uriBuilder.build(null); + } + + /** + * Binding to handle all DELETE requests. + * + * @param user principal making the request. + * @param path path for the DELETE request. + * @param op DELETE operation, default value is {@link HttpFSFileSystem.DeleteOpValues#DELETE}. + * @param recursive indicates if the delete is recursive, default is false + * @param doAs user being impersonated, defualt value is none. It can be used + * only if the current user is a HttpFSServer proxyuser. + * + * @return the request response. + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + @DELETE + @Path("{path:.*}") + @Produces(MediaType.APPLICATION_JSON) + public Response delete(@Context Principal user, + @PathParam("path") FsPathParam path, + @QueryParam(DeleteOpParam.NAME) DeleteOpParam op, + @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT) + DeleteRecursiveParam recursive, + @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs) + throws IOException, FileSystemAccessException { + Response response = null; + if (op == null) { + throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME)); + } + switch (op.value()) { + case DELETE: { + path.makeAbsolute(); + MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE"); + AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); + FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value()); + JSONObject json = fsExecute(user, doAs.value(), command); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + } + return response; + } + + + /** + * Binding to handle all PUT requests, supported operations are + * {@link HttpFSFileSystem.PutOpValues}. + * + * @param is request input stream, used only for + * {@link HttpFSFileSystem.PostOpValues#APPEND} operations. + * @param user principal making the request. + * @param uriInfo the request uriInfo. + * @param path path for the PUT request. + * @param op PUT operation, no default value. + * @param toPath new path, used only for + * {@link HttpFSFileSystem.PutOpValues#RENAME} operations. + * {@link HttpFSFileSystem.PutOpValues#SETTIMES}. + * @param owner owner to set, used only for + * {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations. + * @param group group to set, used only for + * {@link HttpFSFileSystem.PutOpValues#SETOWNER} operations. + * @param override, default is true. Used only for + * {@link HttpFSFileSystem.PutOpValues#CREATE} operations. + * @param blockSize block size to set, used only by + * {@link HttpFSFileSystem.PutOpValues#CREATE} operations. + * @param permission permission to set, used only by + * {@link HttpFSFileSystem.PutOpValues#SETPERMISSION}. + * @param replication replication factor to set, used only by + * {@link HttpFSFileSystem.PutOpValues#SETREPLICATION}. + * @param modifiedTime modified time, in seconds since EPOC, used only by + * {@link HttpFSFileSystem.PutOpValues#SETTIMES}. + * @param accessTime accessed time, in seconds since EPOC, used only by + * {@link HttpFSFileSystem.PutOpValues#SETTIMES}. + * @param hasData indicates if the append request is uploading data or not + * (just getting the handle). + * @param doAs user being impersonated, defualt value is none. It can be used + * only if the current user is a HttpFSServer proxyuser. + * + * @return the request response. + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + @PUT + @Path("{path:.*}") + @Consumes({"*/*"}) + @Produces({MediaType.APPLICATION_JSON}) + public Response put(InputStream is, + @Context Principal user, + @Context UriInfo uriInfo, + @PathParam("path") FsPathParam path, + @QueryParam(PutOpParam.NAME) PutOpParam op, + @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath, + @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner, + @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group, + @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override, + @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize, + @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT) + PermissionParam permission, + @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT) + ReplicationParam replication, + @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT) + ModifiedTimeParam modifiedTime, + @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT) + AccessTimeParam accessTime, + @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData, + @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs) + throws IOException, FileSystemAccessException { + Response response = null; + if (op == null) { + throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME)); + } + path.makeAbsolute(); + MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); + switch (op.value()) { + case CREATE: { + if (!hasData.value()) { + response = Response.temporaryRedirect( + createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build(); + } else { + FSOperations.FSCreate + command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(), + replication.value(), blockSize.value()); + fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]", + new Object[]{path, permission, override, replication, blockSize}); + response = Response.status(Response.Status.CREATED).build(); + } + break; + } + case MKDIRS: { + FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value()); + JSONObject json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] permission [{}]", path, permission.value()); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case RENAME: { + FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value()); + JSONObject json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] to [{}]", path, toPath); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } + case SETOWNER: { + FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value()); + fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value()); + response = Response.ok().build(); + break; + } + case SETPERMISSION: { + FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value()); + fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] to [{}]", path, permission.value()); + response = Response.ok().build(); + break; + } + case SETREPLICATION: { + FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value()); + JSONObject json = fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] to [{}]", path, replication.value()); + response = Response.ok(json).build(); + break; + } + case SETTIMES: { + FSOperations.FSSetTimes + command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value()); + fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value()); + response = Response.ok().build(); + break; + } + case RENEWDELEGATIONTOKEN: { + response = Response.status(Response.Status.BAD_REQUEST).build(); + break; + } + case CANCELDELEGATIONTOKEN: { + response = Response.status(Response.Status.BAD_REQUEST).build(); + break; + } + } + return response; + } + + /** + * Binding to handle all OPST requests, supported operations are + * {@link HttpFSFileSystem.PostOpValues}. + * + * @param is request input stream, used only for + * {@link HttpFSFileSystem.PostOpValues#APPEND} operations. + * @param user principal making the request. + * @param uriInfo the request uriInfo. + * @param path path for the POST request. + * @param op POST operation, default is {@link HttpFSFileSystem.PostOpValues#APPEND}. + * @param hasData indicates if the append request is uploading data or not (just getting the handle). + * @param doAs user being impersonated, defualt value is none. It can be used + * only if the current user is a HttpFSServer proxyuser. + * + * @return the request response. + * + * @throws IOException thrown if an IO error occurred. Thrown exceptions are + * handled by {@link HttpFSExceptionProvider}. + * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown + * exceptions are handled by {@link HttpFSExceptionProvider}. + */ + @POST + @Path("{path:.*}") + @Consumes({"*/*"}) + @Produces({MediaType.APPLICATION_JSON}) + public Response post(InputStream is, + @Context Principal user, + @Context UriInfo uriInfo, + @PathParam("path") FsPathParam path, + @QueryParam(PostOpParam.NAME) PostOpParam op, + @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData, + @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs) + throws IOException, FileSystemAccessException { + Response response = null; + if (op == null) { + throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME)); + } + path.makeAbsolute(); + MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); + switch (op.value()) { + case APPEND: { + if (!hasData.value()) { + response = Response.temporaryRedirect( + createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build(); + } else { + FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value()); + fsExecute(user, doAs.value(), command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok().type(MediaType.APPLICATION_JSON).build(); + } + break; + } + } + return response; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java new file mode 100644 index 0000000000..9e1609ed6b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServerWebApp.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.ServerException; +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.apache.hadoop.lib.servlet.ServerWebApp; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Bootstrap class that manages the initialization and destruction of the + * HttpFSServer server, it is a javax.servlet.ServletContextListener + * implementation that is wired in HttpFSServer's WAR WEB-INF/web.xml. + *

+ * It provides acces to the server context via the singleton {@link #get}. + *

+ * All the configuration is loaded from configuration properties prefixed + * with httpfs.. + */ +public class HttpFSServerWebApp extends ServerWebApp { + private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class); + + /** + * Server name and prefix for all configuration properties. + */ + public static final String NAME = "httpfs"; + + /** + * Configuration property that defines HttpFSServer admin group. + */ + public static final String CONF_ADMIN_GROUP = "admin.group"; + + private static HttpFSServerWebApp SERVER; + + private String adminGroup; + + /** + * Default constructor. + * + * @throws IOException thrown if the home/conf/log/temp directory paths + * could not be resolved. + */ + public HttpFSServerWebApp() throws IOException { + super(NAME); + } + + /** + * Constructor used for testing purposes. + */ + protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir, + Configuration config) { + super(NAME, homeDir, configDir, logDir, tempDir, config); + } + + /** + * Constructor used for testing purposes. + */ + public HttpFSServerWebApp(String homeDir, Configuration config) { + super(NAME, homeDir, config); + } + + /** + * Initializes the HttpFSServer server, loads configuration and required services. + * + * @throws ServerException thrown if HttpFSServer server could not be initialized. + */ + @Override + public void init() throws ServerException { + super.init(); + if (SERVER != null) { + throw new RuntimeException("HttpFSServer server already initialized"); + } + SERVER = this; + adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin"); + LOG.info("Connects to Namenode [{}]", + get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name")); + } + + /** + * Shutdowns all running services. + */ + @Override + public void destroy() { + SERVER = null; + super.destroy(); + } + + /** + * Returns HttpFSServer server singleton, configuration and services are accessible through it. + * + * @return the HttpFSServer server singleton. + */ + public static HttpFSServerWebApp get() { + return SERVER; + } + + /** + * Returns HttpFSServer admin group. + * + * @return httpfs admin group. + */ + public String getAdminGroup() { + return adminGroup; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java new file mode 100644 index 0000000000..319b414d4b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/RunnableCallable.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.lang; + +import org.apache.hadoop.lib.util.Check; + +import java.util.concurrent.Callable; + +/** + * Adapter class that allows Runnables and Callables to + * be treated as the other. + */ +public class RunnableCallable implements Callable, Runnable { + private Runnable runnable; + private Callable callable; + + /** + * Constructor that takes a runnable. + * + * @param runnable runnable. + */ + public RunnableCallable(Runnable runnable) { + this.runnable = Check.notNull(runnable, "runnable"); + } + + /** + * Constructor that takes a callable. + * + * @param callable callable. + */ + public RunnableCallable(Callable callable) { + this.callable = Check.notNull(callable, "callable"); + } + + /** + * Invokes the wrapped callable/runnable as a callable. + * + * @return void + * + * @throws Exception thrown by the wrapped callable/runnable invocation. + */ + @Override + public Void call() throws Exception { + if (runnable != null) { + runnable.run(); + } else { + callable.call(); + } + return null; + } + + /** + * Invokes the wrapped callable/runnable as a runnable. + * + * @return void + * + * @throws Exception thrown by the wrapped callable/runnable invocation. + */ + @Override + public void run() { + if (runnable != null) { + runnable.run(); + } else { + try { + callable.call(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + } + + /** + * Returns the class name of the wrapper callable/runnable. + * + * @return the class name of the wrapper callable/runnable. + */ + public String toString() { + return (runnable != null) ? runnable.getClass().getSimpleName() : callable.getClass().getSimpleName(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java new file mode 100644 index 0000000000..1fb2fb7766 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/lang/XException.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.lang; + +import org.apache.hadoop.lib.util.Check; + +import java.text.MessageFormat; + +/** + * Generic exception that requires error codes and uses the a message + * template from the error code. + */ +public class XException extends Exception { + + /** + * Interface to define error codes. + */ + public static interface ERROR { + + /** + * Returns the template for the error. + * + * @return the template for the error, the template must be in JDK + * MessageFormat syntax (using {#} positional parameters). + */ + public String getTemplate(); + + } + + private ERROR error; + + /** + * Private constructor used by the public constructors. + * + * @param error error code. + * @param message error message. + * @param cause exception cause if any. + */ + private XException(ERROR error, String message, Throwable cause) { + super(message, cause); + this.error = error; + } + + /** + * Creates an XException using another XException as cause. + *

+ * The error code and error message are extracted from the cause. + * + * @param cause exception cause. + */ + public XException(XException cause) { + this(cause.getError(), cause.getMessage(), cause); + } + + /** + * Creates an XException using the specified error code. The exception + * message is resolved using the error code template and the passed + * parameters. + * + * @param error error code for the XException. + * @param params parameters to use when creating the error message + * with the error code template. + */ + @SuppressWarnings({"ThrowableResultOfMethodCallIgnored"}) + public XException(ERROR error, Object... params) { + this(Check.notNull(error, "error"), format(error, params), getCause(params)); + } + + /** + * Returns the error code of the exception. + * + * @return the error code of the exception. + */ + public ERROR getError() { + return error; + } + + /** + * Creates a message using a error message template and arguments. + *

+ * The template must be in JDK MessageFormat syntax + * (using {#} positional parameters). + * + * @param error error code, to get the template from. + * @param args arguments to use for creating the message. + * + * @return the resolved error message. + */ + private static String format(ERROR error, Object... args) { + String template = error.getTemplate(); + if (template == null) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < args.length; i++) { + sb.append(" {").append(i).append("}"); + } + template = sb.deleteCharAt(0).toString(); + } + return error + ": " + MessageFormat.format(error.getTemplate(), args); + } + + /** + * Returns the last parameter if it is an instance of Throwable + * returns it else it returns NULL. + * + * @param params parameters to look for a cause. + * + * @return the last parameter if it is an instance of Throwable + * returns it else it returns NULL. + */ + private static Throwable getCause(Object... params) { + Throwable throwable = null; + if (params != null && params.length > 0 && params[params.length - 1] instanceof Throwable) { + throwable = (Throwable) params[params.length - 1]; + } + return throwable; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java new file mode 100644 index 0000000000..f93a30321e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/BaseService.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.util.ConfigurationUtils; + +import java.util.Map; + +/** + * Convenience class implementing the {@link Service} interface. + */ +public abstract class BaseService implements Service { + private String prefix; + private Server server; + private Configuration serviceConfig; + + /** + * Service constructor. + * + * @param prefix service prefix. + */ + public BaseService(String prefix) { + this.prefix = prefix; + } + + /** + * Initializes the service. + *

+ * It collects all service properties (properties having the + * #SERVER#.#SERVICE#. prefix). The property names are then + * trimmed from the #SERVER#.#SERVICE#. prefix. + *

+ * After collecting the service properties it delegates to the + * {@link #init()} method. + * + * @param server the server initializing the service, give access to the + * server context. + * + * @throws ServiceException thrown if the service could not be initialized. + */ + @Override + public final void init(Server server) throws ServiceException { + this.server = server; + String servicePrefix = getPrefixedName(""); + serviceConfig = new Configuration(false); + for (Map.Entry entry : ConfigurationUtils.resolve(server.getConfig())) { + String key = entry.getKey(); + if (key.startsWith(servicePrefix)) { + serviceConfig.set(key.substring(servicePrefix.length()), entry.getValue()); + } + } + init(); + } + + + /** + * Post initializes the service. This method is called by the + * {@link Server} after all services of the server have been initialized. + *

+ * This method does a NOP. + * + * @throws ServiceException thrown if the service could not be + * post-initialized. + */ + @Override + public void postInit() throws ServiceException { + } + + /** + * Destroy the services. This method is called once, when the + * {@link Server} owning the service is being destroyed. + *

+ * This method does a NOP. + */ + @Override + public void destroy() { + } + + /** + * Returns the service dependencies of this service. The service will be + * instantiated only if all the service dependencies are already initialized. + *

+ * This method returns an empty array (size 0) + * + * @return an empty array (size 0). + */ + @Override + public Class[] getServiceDependencies() { + return new Class[0]; + } + + /** + * Notification callback when the server changes its status. + *

+ * This method returns an empty array (size 0) + * + * @param oldStatus old server status. + * @param newStatus new server status. + * + * @throws ServiceException thrown if the service could not process the status change. + */ + @Override + public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException { + } + + /** + * Returns the service prefix. + * + * @return the service prefix. + */ + protected String getPrefix() { + return prefix; + } + + /** + * Returns the server owning the service. + * + * @return the server owning the service. + */ + protected Server getServer() { + return server; + } + + /** + * Returns the full prefixed name of a service property. + * + * @param name of the property. + * + * @return prefixed name of the property. + */ + protected String getPrefixedName(String name) { + return server.getPrefixedName(prefix + "." + name); + } + + /** + * Returns the service configuration properties. Property + * names are trimmed off from its prefix. + *

+ * The sevice configuration properties are all properties + * with names starting with #SERVER#.#SERVICE#. + * in the server configuration. + * + * @return the service configuration properties with names + * trimmed off from their #SERVER#.#SERVICE#. + * prefix. + */ + protected Configuration getServiceConfig() { + return serviceConfig; + } + + /** + * Initializes the server. + *

+ * This method is called by {@link #init(Server)} after all service properties + * (properties prefixed with + * + * @throws ServiceException thrown if the service could not be initialized. + */ + protected abstract void init() throws ServiceException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java new file mode 100644 index 0000000000..2243324052 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java @@ -0,0 +1,766 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.util.Check; +import org.apache.hadoop.lib.util.ConfigurationUtils; +import org.apache.log4j.LogManager; +import org.apache.log4j.PropertyConfigurator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +/** + * A Server class provides standard configuration, logging and {@link Service} + * lifecyle management. + *

+ * A Server normally has a home directory, a configuration directory, a temp + * directory and logs directory. + *

+ * The Server configuration is loaded from 2 overlapped files, + * #SERVER#-default.xml and #SERVER#-site.xml. The + * default file is loaded from the classpath, the site file is laoded from the + * configuration directory. + *

+ * The Server collects all configuration properties prefixed with + * #SERVER#. The property names are then trimmed from the + * #SERVER# prefix. + *

+ * The Server log configuration is loaded from the + * #SERVICE#-log4j.properties file in the configuration directory. + *

+ * The lifecycle of server is defined in by {@link Server.Status} enum. + * When a server is create, its status is UNDEF, when being initialized it is + * BOOTING, once initialization is complete by default transitions to NORMAL. + * The #SERVER#.startup.status configuration property can be used + * to specify a different startup status (NORMAL, ADMIN or HALTED). + *

+ * Services classes are defined in the #SERVER#.services and + * #SERVER#.services.ext properties. They are loaded in order + * (services first, then services.ext). + *

+ * Before initializing the services, they are traversed and duplicate service + * interface are removed from the service list. The last service using a given + * interface wins (this enables a simple override mechanism). + *

+ * After the services have been resoloved by interface de-duplication they are + * initialized in order. Once all services are initialized they are + * post-initialized (this enables late/conditional service bindings). + *

+ */ +public class Server { + private Logger log; + + /** + * Server property name that defines the service classes. + */ + public static final String CONF_SERVICES = "services"; + + /** + * Server property name that defines the service extension classes. + */ + public static final String CONF_SERVICES_EXT = "services.ext"; + + /** + * Server property name that defines server startup status. + */ + public static final String CONF_STARTUP_STATUS = "startup.status"; + + /** + * Enumeration that defines the server status. + */ + public enum Status { + UNDEF(false, false), + BOOTING(false, true), + HALTED(true, true), + ADMIN(true, true), + NORMAL(true, true), + SHUTTING_DOWN(false, true), + SHUTDOWN(false, false); + + private boolean settable; + private boolean operational; + + /** + * Status constructor. + * + * @param settable indicates if the status is settable. + * @param operational indicates if the server is operational + * when in this status. + */ + private Status(boolean settable, boolean operational) { + this.settable = settable; + this.operational = operational; + } + + /** + * Returns if this server status is operational. + * + * @return if this server status is operational. + */ + public boolean isOperational() { + return operational; + } + } + + /** + * Name of the log4j configuration file the Server will load from the + * classpath if the #SERVER#-log4j.properties is not defined + * in the server configuration directory. + */ + public static final String DEFAULT_LOG4J_PROPERTIES = "default-log4j.properties"; + + private Status status; + private String name; + private String homeDir; + private String configDir; + private String logDir; + private String tempDir; + private Configuration config; + private Map services = new LinkedHashMap(); + + /** + * Creates a server instance. + *

+ * The config, log and temp directories are all under the specified home directory. + * + * @param name server name. + * @param homeDir server home directory. + */ + public Server(String name, String homeDir) { + this(name, homeDir, null); + } + + /** + * Creates a server instance. + * + * @param name server name. + * @param homeDir server home directory. + * @param configDir config directory. + * @param logDir log directory. + * @param tempDir temp directory. + */ + public Server(String name, String homeDir, String configDir, String logDir, String tempDir) { + this(name, homeDir, configDir, logDir, tempDir, null); + } + + /** + * Creates a server instance. + *

+ * The config, log and temp directories are all under the specified home directory. + *

+ * It uses the provided configuration instead loading it from the config dir. + * + * @param name server name. + * @param homeDir server home directory. + * @param config server configuration. + */ + public Server(String name, String homeDir, Configuration config) { + this(name, homeDir, homeDir + "/conf", homeDir + "/log", homeDir + "/temp", config); + } + + /** + * Creates a server instance. + *

+ * It uses the provided configuration instead loading it from the config dir. + * + * @param name server name. + * @param homeDir server home directory. + * @param configDir config directory. + * @param logDir log directory. + * @param tempDir temp directory. + * @param config server configuration. + */ + public Server(String name, String homeDir, String configDir, String logDir, String tempDir, Configuration config) { + this.name = Check.notEmpty(name, "name").trim().toLowerCase(); + this.homeDir = Check.notEmpty(homeDir, "homeDir"); + this.configDir = Check.notEmpty(configDir, "configDir"); + this.logDir = Check.notEmpty(logDir, "logDir"); + this.tempDir = Check.notEmpty(tempDir, "tempDir"); + checkAbsolutePath(homeDir, "homeDir"); + checkAbsolutePath(configDir, "configDir"); + checkAbsolutePath(logDir, "logDir"); + checkAbsolutePath(tempDir, "tempDir"); + if (config != null) { + this.config = new Configuration(false); + ConfigurationUtils.copy(config, this.config); + } + status = Status.UNDEF; + } + + /** + * Validates that the specified value is an absolute path (starts with '/'). + * + * @param value value to verify it is an absolute path. + * @param name name to use in the exception if the value is not an absolute + * path. + * + * @return the value. + * + * @throws IllegalArgumentException thrown if the value is not an absolute + * path. + */ + private String checkAbsolutePath(String value, String name) { + if (!value.startsWith("/")) { + throw new IllegalArgumentException( + MessageFormat.format("[{0}] must be an absolute path [{1}]", name, value)); + } + return value; + } + + /** + * Returns the current server status. + * + * @return the current server status. + */ + public Status getStatus() { + return status; + } + + /** + * Sets a new server status. + *

+ * The status must be settable. + *

+ * All services will be notified o the status change via the + * {@link Service#serverStatusChange(Status, Status)} method. If a service + * throws an exception during the notification, the server will be destroyed. + * + * @param status status to set. + * + * @throws ServerException thrown if the service has been destroy because of + * a failed notification to a service. + */ + public void setStatus(Status status) throws ServerException { + Check.notNull(status, "status"); + if (status.settable) { + if (status != this.status) { + Status oldStatus = this.status; + this.status = status; + for (Service service : services.values()) { + try { + service.serverStatusChange(oldStatus, status); + } catch (Exception ex) { + log.error("Service [{}] exception during status change to [{}] -server shutting down-, {}", + new Object[]{service.getInterface().getSimpleName(), status, ex.getMessage(), ex}); + destroy(); + throw new ServerException(ServerException.ERROR.S11, service.getInterface().getSimpleName(), + status, ex.getMessage(), ex); + } + } + } + } else { + throw new IllegalArgumentException("Status [" + status + " is not settable"); + } + } + + /** + * Verifies the server is operational. + * + * @throws IllegalStateException thrown if the server is not operational. + */ + protected void ensureOperational() { + if (!getStatus().isOperational()) { + throw new IllegalStateException("Server is not running"); + } + } + + /** + * Convenience method that returns a resource as inputstream from the + * classpath. + *

+ * It first attempts to use the Thread's context classloader and if not + * set it uses the ClassUtils classloader. + * + * @param name resource to retrieve. + * + * @return inputstream with the resource, NULL if the resource does not + * exist. + */ + static InputStream getResource(String name) { + Check.notEmpty(name, "name"); + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + if (cl == null) { + cl = Server.class.getClassLoader(); + } + return cl.getResourceAsStream(name); + } + + /** + * Initializes the Server. + *

+ * The initialization steps are: + *

    + *
  • It verifies the service home and temp directories exist
  • + *
  • Loads the Server #SERVER#-default.xml + * configuration file from the classpath
  • + *
  • Initializes log4j logging. If the + * #SERVER#-log4j.properties file does not exist in the config + * directory it load default-log4j.properties from the classpath + *
  • + *
  • Loads the #SERVER#-site.xml file from the server config + * directory and merges it with the default configuration.
  • + *
  • Loads the services
  • + *
  • Initializes the services
  • + *
  • Post-initializes the services
  • + *
  • Sets the server startup status
  • + * + * @throws ServerException thrown if the server could not be initialized. + */ + public void init() throws ServerException { + if (status != Status.UNDEF) { + throw new IllegalStateException("Server already initialized"); + } + status = Status.BOOTING; + verifyDir(homeDir); + verifyDir(tempDir); + Properties serverInfo = new Properties(); + try { + InputStream is = getResource(name + ".properties"); + serverInfo.load(is); + is.close(); + } catch (IOException ex) { + throw new RuntimeException("Could not load server information file: " + name + ".properties"); + } + initLog(); + log.info("++++++++++++++++++++++++++++++++++++++++++++++++++++++"); + log.info("Server [{}] starting", name); + log.info(" Built information:"); + log.info(" Version : {}", serverInfo.getProperty(name + ".version", "undef")); + log.info(" Source Repository : {}", serverInfo.getProperty(name + ".source.repository", "undef")); + log.info(" Source Revision : {}", serverInfo.getProperty(name + ".source.revision", "undef")); + log.info(" Built by : {}", serverInfo.getProperty(name + ".build.username", "undef")); + log.info(" Built timestamp : {}", serverInfo.getProperty(name + ".build.timestamp", "undef")); + log.info(" Runtime information:"); + log.info(" Home dir: {}", homeDir); + log.info(" Config dir: {}", (config == null) ? configDir : "-"); + log.info(" Log dir: {}", logDir); + log.info(" Temp dir: {}", tempDir); + initConfig(); + log.debug("Loading services"); + List list = loadServices(); + try { + log.debug("Initializing services"); + initServices(list); + log.info("Services initialized"); + } catch (ServerException ex) { + log.error("Services initialization failure, destroying initialized services"); + destroyServices(); + throw ex; + } + Status status = Status.valueOf(getConfig().get(getPrefixedName(CONF_STARTUP_STATUS), Status.NORMAL.toString())); + setStatus(status); + log.info("Server [{}] started!, status [{}]", name, status); + } + + /** + * Verifies the specified directory exists. + * + * @param dir directory to verify it exists. + * + * @throws ServerException thrown if the directory does not exist or it the + * path it is not a directory. + */ + private void verifyDir(String dir) throws ServerException { + File file = new File(dir); + if (!file.exists()) { + throw new ServerException(ServerException.ERROR.S01, dir); + } + if (!file.isDirectory()) { + throw new ServerException(ServerException.ERROR.S02, dir); + } + } + + /** + * Initializes Log4j logging. + * + * @throws ServerException thrown if Log4j could not be initialized. + */ + protected void initLog() throws ServerException { + verifyDir(logDir); + LogManager.resetConfiguration(); + File log4jFile = new File(configDir, name + "-log4j.properties"); + if (log4jFile.exists()) { + PropertyConfigurator.configureAndWatch(log4jFile.toString(), 10 * 1000); //every 10 secs + log = LoggerFactory.getLogger(Server.class); + } else { + Properties props = new Properties(); + try { + InputStream is = getResource(DEFAULT_LOG4J_PROPERTIES); + props.load(is); + } catch (IOException ex) { + throw new ServerException(ServerException.ERROR.S03, DEFAULT_LOG4J_PROPERTIES, ex.getMessage(), ex); + } + PropertyConfigurator.configure(props); + log = LoggerFactory.getLogger(Server.class); + log.warn("Log4j [{}] configuration file not found, using default configuration from classpath", log4jFile); + } + } + + /** + * Loads and inializes the server configuration. + * + * @throws ServerException thrown if the configuration could not be loaded/initialized. + */ + protected void initConfig() throws ServerException { + verifyDir(configDir); + File file = new File(configDir); + Configuration defaultConf; + String defaultConfig = name + "-default.xml"; + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + InputStream inputStream = classLoader.getResourceAsStream(defaultConfig); + if (inputStream == null) { + log.warn("Default configuration file not available in classpath [{}]", defaultConfig); + defaultConf = new Configuration(false); + } else { + try { + defaultConf = new Configuration(false); + ConfigurationUtils.load(defaultConf, inputStream); + } catch (Exception ex) { + throw new ServerException(ServerException.ERROR.S03, defaultConfig, ex.getMessage(), ex); + } + } + + if (config == null) { + Configuration siteConf; + File siteFile = new File(file, name + "-site.xml"); + if (!siteFile.exists()) { + log.warn("Site configuration file [{}] not found in config directory", siteFile); + siteConf = new Configuration(false); + } else { + if (!siteFile.isFile()) { + throw new ServerException(ServerException.ERROR.S05, siteFile.getAbsolutePath()); + } + try { + log.debug("Loading site configuration from [{}]", siteFile); + inputStream = new FileInputStream(siteFile); + siteConf = new Configuration(false); + ConfigurationUtils.load(siteConf, inputStream); + } catch (IOException ex) { + throw new ServerException(ServerException.ERROR.S06, siteFile, ex.getMessage(), ex); + } + } + + config = new Configuration(false); + ConfigurationUtils.copy(siteConf, config); + } + + ConfigurationUtils.injectDefaults(defaultConf, config); + + for (String name : System.getProperties().stringPropertyNames()) { + String value = System.getProperty(name); + if (name.startsWith(getPrefix() + ".")) { + config.set(name, value); + if (name.endsWith(".password") || name.endsWith(".secret")) { + value = "*MASKED*"; + } + log.info("System property sets {}: {}", name, value); + } + } + + log.debug("Loaded Configuration:"); + log.debug("------------------------------------------------------"); + for (Map.Entry entry : config) { + String name = entry.getKey(); + String value = config.get(entry.getKey()); + if (name.endsWith(".password") || name.endsWith(".secret")) { + value = "*MASKED*"; + } + log.debug(" {}: {}", entry.getKey(), value); + } + log.debug("------------------------------------------------------"); + } + + /** + * Loads the specified services. + * + * @param classes services classes to load. + * @param list list of loaded service in order of appearance in the + * configuration. + * + * @throws ServerException thrown if a service class could not be loaded. + */ + private void loadServices(Class[] classes, List list) throws ServerException { + for (Class klass : classes) { + try { + Service service = (Service) klass.newInstance(); + log.debug("Loading service [{}] implementation [{}]", service.getInterface(), + service.getClass()); + if (!service.getInterface().isInstance(service)) { + throw new ServerException(ServerException.ERROR.S04, klass, service.getInterface().getName()); + } + list.add(service); + } catch (ServerException ex) { + throw ex; + } catch (Exception ex) { + throw new ServerException(ServerException.ERROR.S07, klass, ex.getMessage(), ex); + } + } + } + + /** + * Loads services defined in services and + * services.ext and de-dups them. + * + * @return List of final services to initialize. + * + * @throws ServerException throw if the services could not be loaded. + */ + protected List loadServices() throws ServerException { + try { + Map map = new LinkedHashMap(); + Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES)); + Class[] classesExt = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT)); + List list = new ArrayList(); + loadServices(classes, list); + loadServices(classesExt, list); + + //removing duplicate services, strategy: last one wins + for (Service service : list) { + if (map.containsKey(service.getInterface())) { + log.debug("Replacing service [{}] implementation [{}]", service.getInterface(), + service.getClass()); + } + map.put(service.getInterface(), service); + } + list = new ArrayList(); + for (Map.Entry entry : map.entrySet()) { + list.add(entry.getValue()); + } + return list; + } catch (RuntimeException ex) { + throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex); + } + } + + /** + * Initializes the list of services. + * + * @param services services to initialized, it must be a de-dupped list of + * services. + * + * @throws ServerException thrown if the services could not be initialized. + */ + protected void initServices(List services) throws ServerException { + for (Service service : services) { + log.debug("Initializing service [{}]", service.getInterface()); + checkServiceDependencies(service); + service.init(this); + this.services.put(service.getInterface(), service); + } + for (Service service : services) { + service.postInit(); + } + } + + /** + * Checks if all service dependencies of a service are available. + * + * @param service service to check if all its dependencies are available. + * + * @throws ServerException thrown if a service dependency is missing. + */ + protected void checkServiceDependencies(Service service) throws ServerException { + if (service.getServiceDependencies() != null) { + for (Class dependency : service.getServiceDependencies()) { + if (services.get(dependency) == null) { + throw new ServerException(ServerException.ERROR.S10, service.getClass(), dependency); + } + } + } + } + + /** + * Destroys the server services. + */ + protected void destroyServices() { + List list = new ArrayList(services.values()); + Collections.reverse(list); + for (Service service : list) { + try { + log.debug("Destroying service [{}]", service.getInterface()); + service.destroy(); + } catch (Throwable ex) { + log.error("Could not destroy service [{}], {}", + new Object[]{service.getInterface(), ex.getMessage(), ex}); + } + } + log.info("Services destroyed"); + } + + /** + * Destroys the server. + *

    + * All services are destroyed in reverse order of initialization, then the + * Log4j framework is shutdown. + */ + public void destroy() { + ensureOperational(); + destroyServices(); + log.info("Server [{}] shutdown!", name); + log.info("======================================================"); + if (!Boolean.getBoolean("test.circus")) { + LogManager.shutdown(); + } + status = Status.SHUTDOWN; + } + + /** + * Returns the name of the server. + * + * @return the server name. + */ + public String getName() { + return name; + } + + /** + * Returns the server prefix for server configuration properties. + *

    + * By default it is the server name. + * + * @return the prefix for server configuration properties. + */ + public String getPrefix() { + return getName(); + } + + /** + * Returns the prefixed name of a server property. + * + * @param name of the property. + * + * @return prefixed name of the property. + */ + public String getPrefixedName(String name) { + return getPrefix() + "." + Check.notEmpty(name, "name"); + } + + /** + * Returns the server home dir. + * + * @return the server home dir. + */ + public String getHomeDir() { + return homeDir; + } + + /** + * Returns the server config dir. + * + * @return the server config dir. + */ + public String getConfigDir() { + return configDir; + } + + /** + * Returns the server log dir. + * + * @return the server log dir. + */ + public String getLogDir() { + return logDir; + } + + /** + * Returns the server temp dir. + * + * @return the server temp dir. + */ + public String getTempDir() { + return tempDir; + } + + /** + * Returns the server configuration. + * + * @return + */ + public Configuration getConfig() { + return config; + + } + + /** + * Returns the {@link Service} associated to the specified interface. + * + * @param serviceKlass service interface. + * + * @return the service implementation. + */ + @SuppressWarnings("unchecked") + public T get(Class serviceKlass) { + ensureOperational(); + Check.notNull(serviceKlass, "serviceKlass"); + return (T) services.get(serviceKlass); + } + + /** + * Adds a service programmatically. + *

    + * If a service with the same interface exists, it will be destroyed and + * removed before the given one is initialized and added. + *

    + * If an exception is thrown the server is destroyed. + * + * @param klass service class to add. + * + * @throws ServerException throw if the service could not initialized/added + * to the server. + */ + public void setService(Class klass) throws ServerException { + ensureOperational(); + Check.notNull(klass, "serviceKlass"); + if (getStatus() == Status.SHUTTING_DOWN) { + throw new IllegalStateException("Server shutting down"); + } + try { + Service newService = klass.newInstance(); + Service oldService = services.get(newService.getInterface()); + if (oldService != null) { + try { + oldService.destroy(); + } catch (Throwable ex) { + log.error("Could not destroy service [{}], {}", + new Object[]{oldService.getInterface(), ex.getMessage(), ex}); + } + } + newService.init(this); + services.put(newService.getInterface(), newService); + } catch (Exception ex) { + log.error("Could not set service [{}] programmatically -server shutting down-, {}", klass, ex); + destroy(); + throw new ServerException(ServerException.ERROR.S09, klass, ex.getMessage(), ex); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java new file mode 100644 index 0000000000..2330dcb30c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import org.apache.hadoop.lib.lang.XException; + +/** + * Exception thrown by the {@link Server} class. + */ +public class ServerException extends XException { + + /** + * Error codes use by the {@link Server} class. + */ + public static enum ERROR implements XException.ERROR { + S01("Dir [{0}] does not exist"), + S02("[{0}] is not a directory"), + S03("Could not load file from classpath [{0}], {1}"), + S04("Service [{0}] does not implement declared interface [{1}]"), + S05("[{0}] is not a file"), + S06("Could not load file [{0}], {1}"), + S07("Could not instanciate service class [{0}], {1}"), + S08("Could not load service classes, {0}"), + S09("Could not set service [{0}] programmatically -server shutting down-, {1}"), + S10("Service [{0}] requires service [{1}]"), + S11("Service [{0}] exception during status change to [{1}] -server shutting down-, {2}"); + + private String msg; + + /** + * Constructor for the error code enum. + * + * @param msg message template. + */ + private ERROR(String msg) { + this.msg = msg; + } + + /** + * Returns the message template for the error code. + * + * @return the message template for the error code. + */ + @Override + public String getTemplate() { + return msg; + } + } + + /** + * Constructor for sub-classes. + * + * @param error error code for the XException. + * @param params parameters to use when creating the error message + * with the error code template. + */ + protected ServerException(XException.ERROR error, Object... params) { + super(error, params); + } + + /** + * Creates an server exception using the specified error code. + * The exception message is resolved using the error code template + * and the passed parameters. + * + * @param error error code for the XException. + * @param params parameters to use when creating the error message + * with the error code template. + */ + public ServerException(ERROR error, Object... params) { + super(error, params); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Service.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Service.java new file mode 100644 index 0000000000..9bea5ce6db --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Service.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +/** + * Service interface for components to be managed by the {@link Server} class. + */ +public interface Service { + + /** + * Initializes the service. This method is called once, when the + * {@link Server} owning the service is being initialized. + * + * @param server the server initializing the service, give access to the + * server context. + * + * @throws ServiceException thrown if the service could not be initialized. + */ + public void init(Server server) throws ServiceException; + + /** + * Post initializes the service. This method is called by the + * {@link Server} after all services of the server have been initialized. + * + * @throws ServiceException thrown if the service could not be + * post-initialized. + */ + public void postInit() throws ServiceException; + + /** + * Destroy the services. This method is called once, when the + * {@link Server} owning the service is being destroyed. + */ + public void destroy(); + + /** + * Returns the service dependencies of this service. The service will be + * instantiated only if all the service dependencies are already initialized. + * + * @return the service dependencies. + */ + public Class[] getServiceDependencies(); + + /** + * Returns the interface implemented by this service. This interface is used + * the {@link Server} when the {@link Server#get(Class)} method is used to + * retrieve a service. + * + * @return the interface that identifies the service. + */ + public Class getInterface(); + + /** + * Notification callback when the server changes its status. + * + * @param oldStatus old server status. + * @param newStatus new server status. + * + * @throws ServiceException thrown if the service could not process the status change. + */ + public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServiceException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServiceException.java new file mode 100644 index 0000000000..de8ac3ee30 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServiceException.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import org.apache.hadoop.lib.lang.XException; + +/** + * Exception thrown by {@link Service} implementations. + */ +public class ServiceException extends ServerException { + + /** + * Creates an service exception using the specified error code. + * The exception message is resolved using the error code template + * and the passed parameters. + * + * @param error error code for the XException. + * @param params parameters to use when creating the error message + * with the error code template. + */ + public ServiceException(XException.ERROR error, Object... params) { + super(error, params); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java new file mode 100644 index 0000000000..7984761d54 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccess.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; + +import java.io.IOException; + +public interface FileSystemAccess { + + public interface FileSystemExecutor { + + public T execute(FileSystem fs) throws IOException; + } + + public T execute(String user, Configuration conf, FileSystemExecutor executor) throws + FileSystemAccessException; + + public FileSystem createFileSystem(String user, Configuration conf) throws IOException, FileSystemAccessException; + + public void releaseFileSystem(FileSystem fs) throws IOException; + + public Configuration getDefaultConfiguration(); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java new file mode 100644 index 0000000000..8a0ba3caa0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/FileSystemAccessException.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import org.apache.hadoop.lib.lang.XException; + +public class FileSystemAccessException extends XException { + + public enum ERROR implements XException.ERROR { + H01("Service property [{0}] not defined"), + H02("Kerberos initialization failed, {0}"), + H03("FileSystemExecutor error, {0}"), + H04("JobClientExecutor error, {0}"), + H05("[{0}] validation failed, {1}"), + H06("Property [{0}] not defined in configuration object"), + H07("[{0}] not healthy, {1}"), + H08(""), + H09("Invalid FileSystemAccess security mode [{0}]"); + + private String template; + + ERROR(String template) { + this.template = template; + } + + @Override + public String getTemplate() { + return template; + } + } + + public FileSystemAccessException(ERROR error, Object... params) { + super(error, params); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java new file mode 100644 index 0000000000..e83b05916a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Groups.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import java.io.IOException; +import java.util.List; + +public interface Groups { + + public List getGroups(String user) throws IOException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Instrumentation.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Instrumentation.java new file mode 100644 index 0000000000..df326f758b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Instrumentation.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import java.util.Map; + +public interface Instrumentation { + + public interface Cron { + + public Cron start(); + + public Cron stop(); + } + + public interface Variable { + + T getValue(); + } + + public Cron createCron(); + + public void incr(String group, String name, long count); + + public void addCron(String group, String name, Cron cron); + + public void addVariable(String group, String name, Variable variable); + + //sampling happens once a second + public void addSampler(String group, String name, int samplingSize, Variable variable); + + public Map> getSnapshot(); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ProxyUser.java new file mode 100644 index 0000000000..60e93a6bfe --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/ProxyUser.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import java.io.IOException; +import java.security.AccessControlException; + +public interface ProxyUser { + + public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException, AccessControlException; + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Scheduler.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Scheduler.java new file mode 100644 index 0000000000..14abc1c687 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/Scheduler.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service; + +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +public interface Scheduler { + + public abstract void schedule(Callable callable, long delay, long interval, TimeUnit unit); + + public abstract void schedule(Runnable runnable, long delay, long interval, TimeUnit unit); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java new file mode 100644 index 0000000000..f1a9ac055d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java @@ -0,0 +1,278 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.hadoop; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.lib.server.BaseService; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.apache.hadoop.lib.service.FileSystemAccessException; +import org.apache.hadoop.lib.service.Instrumentation; +import org.apache.hadoop.lib.util.Check; +import org.apache.hadoop.lib.util.ConfigurationUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.VersionInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +public class FileSystemAccessService extends BaseService implements FileSystemAccess { + private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class); + + public static final String PREFIX = "hadoop"; + + private static final String INSTRUMENTATION_GROUP = "hadoop"; + + public static final String AUTHENTICATION_TYPE = "authentication.type"; + public static final String KERBEROS_KEYTAB = "authentication.kerberos.keytab"; + public static final String KERBEROS_PRINCIPAL = "authentication.kerberos.principal"; + + public static final String NAME_NODE_WHITELIST = "name.node.whitelist"; + + private static final String HADOOP_CONF_PREFIX = "conf:"; + + private static final String NAME_NODE_PROPERTY = "fs.default.name"; + + public FileSystemAccessService() { + super(PREFIX); + } + + private Collection nameNodeWhitelist; + + Configuration serviceHadoopConf; + + private AtomicInteger unmanagedFileSystems = new AtomicInteger(); + + @Override + protected void init() throws ServiceException { + LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion()); + String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim(); + if (security.equals("kerberos")) { + String defaultName = getServer().getName(); + String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab"; + keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim(); + if (keytab.length() == 0) { + throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB); + } + String principal = defaultName + "/localhost@LOCALHOST"; + principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim(); + if (principal.length() == 0) { + throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL); + } + Configuration conf = new Configuration(); + conf.set("hadoop.security.authentication", "kerberos"); + UserGroupInformation.setConfiguration(conf); + try { + UserGroupInformation.loginUserFromKeytab(principal, keytab); + } catch (IOException ex) { + throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex); + } + LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab); + } else if (security.equals("simple")) { + Configuration conf = new Configuration(); + conf.set("hadoop.security.authentication", "simple"); + UserGroupInformation.setConfiguration(conf); + LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name")); + } else { + throw new ServiceException(FileSystemAccessException.ERROR.H09, security); + } + + serviceHadoopConf = new Configuration(false); + for (Map.Entry entry : getServiceConfig()) { + String name = (String) entry.getKey(); + if (name.startsWith(HADOOP_CONF_PREFIX)) { + name = name.substring(HADOOP_CONF_PREFIX.length()); + String value = (String) entry.getValue(); + serviceHadoopConf.set(name, value); + + } + } + setRequiredServiceHadoopConf(serviceHadoopConf); + + LOG.debug("FileSystemAccess default configuration:"); + for (Map.Entry entry : serviceHadoopConf) { + LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); + } + + nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST)); + } + + @Override + public void postInit() throws ServiceException { + super.postInit(); + Instrumentation instrumentation = getServer().get(Instrumentation.class); + instrumentation.addVariable(INSTRUMENTATION_GROUP, "unmanaged.fs", new Instrumentation.Variable() { + @Override + public Integer getValue() { + return unmanagedFileSystems.get(); + } + }); + instrumentation.addSampler(INSTRUMENTATION_GROUP, "unmanaged.fs", 60, new Instrumentation.Variable() { + @Override + public Long getValue() { + return (long) unmanagedFileSystems.get(); + } + }); + } + + private Set toLowerCase(Collection collection) { + Set set = new HashSet(); + for (String value : collection) { + set.add(value.toLowerCase()); + } + return set; + } + + @Override + public Class getInterface() { + return FileSystemAccess.class; + } + + @Override + public Class[] getServiceDependencies() { + return new Class[]{Instrumentation.class}; + } + + protected UserGroupInformation getUGI(String user) throws IOException { + return UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser()); + } + + protected void setRequiredServiceHadoopConf(Configuration conf) { + conf.set("fs.hdfs.impl.disable.cache", "true"); + } + + protected Configuration createHadoopConf(Configuration conf) { + Configuration hadoopConf = new Configuration(); + ConfigurationUtils.copy(serviceHadoopConf, hadoopConf); + ConfigurationUtils.copy(conf, hadoopConf); + return hadoopConf; + } + + protected Configuration createNameNodeConf(Configuration conf) { + return createHadoopConf(conf); + } + + protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException { + return FileSystem.get(namenodeConf); + } + + protected void closeFileSystem(FileSystem fs) throws IOException { + fs.close(); + } + + protected void validateNamenode(String namenode) throws FileSystemAccessException { + if (nameNodeWhitelist.size() > 0 && !nameNodeWhitelist.contains("*")) { + if (!nameNodeWhitelist.contains(namenode.toLowerCase())) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H05, namenode, "not in whitelist"); + } + } + } + + protected void checkNameNodeHealth(FileSystem fileSystem) throws FileSystemAccessException { + } + + @Override + public T execute(String user, final Configuration conf, final FileSystemExecutor executor) + throws FileSystemAccessException { + Check.notEmpty(user, "user"); + Check.notNull(conf, "conf"); + Check.notNull(executor, "executor"); + if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY); + } + try { + validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); + UserGroupInformation ugi = getUGI(user); + return ugi.doAs(new PrivilegedExceptionAction() { + public T run() throws Exception { + Configuration namenodeConf = createNameNodeConf(conf); + FileSystem fs = createFileSystem(namenodeConf); + Instrumentation instrumentation = getServer().get(Instrumentation.class); + Instrumentation.Cron cron = instrumentation.createCron(); + try { + checkNameNodeHealth(fs); + cron.start(); + return executor.execute(fs); + } finally { + cron.stop(); + instrumentation.addCron(INSTRUMENTATION_GROUP, executor.getClass().getSimpleName(), cron); + closeFileSystem(fs); + } + } + }); + } catch (FileSystemAccessException ex) { + throw ex; + } catch (Exception ex) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H03, ex); + } + } + + public FileSystem createFileSystemInternal(String user, final Configuration conf) + throws IOException, FileSystemAccessException { + Check.notEmpty(user, "user"); + Check.notNull(conf, "conf"); + try { + validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority()); + UserGroupInformation ugi = getUGI(user); + return ugi.doAs(new PrivilegedExceptionAction() { + public FileSystem run() throws Exception { + Configuration namenodeConf = createNameNodeConf(conf); + return createFileSystem(namenodeConf); + } + }); + } catch (IOException ex) { + throw ex; + } catch (FileSystemAccessException ex) { + throw ex; + } catch (Exception ex) { + throw new FileSystemAccessException(FileSystemAccessException.ERROR.H08, ex.getMessage(), ex); + } + } + + @Override + public FileSystem createFileSystem(String user, final Configuration conf) throws IOException, + FileSystemAccessException { + unmanagedFileSystems.incrementAndGet(); + return createFileSystemInternal(user, conf); + } + + @Override + public void releaseFileSystem(FileSystem fs) throws IOException { + unmanagedFileSystems.decrementAndGet(); + closeFileSystem(fs); + } + + + @Override + public Configuration getDefaultConfiguration() { + Configuration conf = new Configuration(false); + ConfigurationUtils.copy(serviceHadoopConf, conf); + return conf; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java new file mode 100644 index 0000000000..cc6d31090f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/instrumentation/InstrumentationService.java @@ -0,0 +1,403 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.instrumentation; + +import org.apache.hadoop.lib.server.BaseService; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Instrumentation; +import org.apache.hadoop.lib.service.Scheduler; +import org.json.simple.JSONAware; +import org.json.simple.JSONObject; +import org.json.simple.JSONStreamAware; + +import java.io.IOException; +import java.io.Writer; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +public class InstrumentationService extends BaseService implements Instrumentation { + public static final String PREFIX = "instrumentation"; + public static final String CONF_TIMERS_SIZE = "timers.size"; + + private int timersSize; + private Lock counterLock; + private Lock timerLock; + private Lock variableLock; + private Lock samplerLock; + private Map> counters; + private Map> timers; + private Map> variables; + private Map> samplers; + private List samplersList; + private Map> all; + + public InstrumentationService() { + super(PREFIX); + } + + @Override + @SuppressWarnings("unchecked") + public void init() throws ServiceException { + timersSize = getServiceConfig().getInt(CONF_TIMERS_SIZE, 10); + counterLock = new ReentrantLock(); + timerLock = new ReentrantLock(); + variableLock = new ReentrantLock(); + samplerLock = new ReentrantLock(); + Map jvmVariables = new ConcurrentHashMap(); + counters = new ConcurrentHashMap>(); + timers = new ConcurrentHashMap>(); + variables = new ConcurrentHashMap>(); + samplers = new ConcurrentHashMap>(); + samplersList = new ArrayList(); + all = new LinkedHashMap>(); + all.put("os-env", System.getenv()); + all.put("sys-props", (Map) (Map) System.getProperties()); + all.put("jvm", jvmVariables); + all.put("counters", (Map) counters); + all.put("timers", (Map) timers); + all.put("variables", (Map) variables); + all.put("samplers", (Map) samplers); + + jvmVariables.put("free.memory", new VariableHolder(new Instrumentation.Variable() { + public Long getValue() { + return Runtime.getRuntime().freeMemory(); + } + })); + jvmVariables.put("max.memory", new VariableHolder(new Instrumentation.Variable() { + public Long getValue() { + return Runtime.getRuntime().maxMemory(); + } + })); + jvmVariables.put("total.memory", new VariableHolder(new Instrumentation.Variable() { + public Long getValue() { + return Runtime.getRuntime().totalMemory(); + } + })); + } + + @Override + public void postInit() throws ServiceException { + Scheduler scheduler = getServer().get(Scheduler.class); + if (scheduler != null) { + scheduler.schedule(new SamplersRunnable(), 0, 1, TimeUnit.SECONDS); + } + } + + @Override + public Class getInterface() { + return Instrumentation.class; + } + + @SuppressWarnings("unchecked") + private T getToAdd(String group, String name, Class klass, Lock lock, Map> map) { + boolean locked = false; + try { + Map groupMap = map.get(group); + if (groupMap == null) { + lock.lock(); + locked = true; + groupMap = map.get(group); + if (groupMap == null) { + groupMap = new ConcurrentHashMap(); + map.put(group, groupMap); + } + } + T element = groupMap.get(name); + if (element == null) { + if (!locked) { + lock.lock(); + locked = true; + } + element = groupMap.get(name); + if (element == null) { + try { + if (klass == Timer.class) { + element = (T) new Timer(timersSize); + } else { + element = klass.newInstance(); + } + } catch (Exception ex) { + throw new RuntimeException(ex); + } + groupMap.put(name, element); + } + } + return element; + } finally { + if (locked) { + lock.unlock(); + } + } + } + + static class Cron implements Instrumentation.Cron { + long start; + long lapStart; + long own; + long total; + + public Cron start() { + if (total != 0) { + throw new IllegalStateException("Cron already used"); + } + if (start == 0) { + start = System.currentTimeMillis(); + lapStart = start; + } else if (lapStart == 0) { + lapStart = System.currentTimeMillis(); + } + return this; + } + + public Cron stop() { + if (total != 0) { + throw new IllegalStateException("Cron already used"); + } + if (lapStart > 0) { + own += System.currentTimeMillis() - lapStart; + lapStart = 0; + } + return this; + } + + void end() { + stop(); + total = System.currentTimeMillis() - start; + } + + } + + static class Timer implements JSONAware, JSONStreamAware { + static final int LAST_TOTAL = 0; + static final int LAST_OWN = 1; + static final int AVG_TOTAL = 2; + static final int AVG_OWN = 3; + + Lock lock = new ReentrantLock(); + private long[] own; + private long[] total; + private int last; + private boolean full; + private int size; + + public Timer(int size) { + this.size = size; + own = new long[size]; + total = new long[size]; + for (int i = 0; i < size; i++) { + own[i] = -1; + total[i] = -1; + } + last = -1; + } + + long[] getValues() { + lock.lock(); + try { + long[] values = new long[4]; + values[LAST_TOTAL] = total[last]; + values[LAST_OWN] = own[last]; + int limit = (full) ? size : (last + 1); + for (int i = 0; i < limit; i++) { + values[AVG_TOTAL] += total[i]; + values[AVG_OWN] += own[i]; + } + values[AVG_TOTAL] = values[AVG_TOTAL] / limit; + values[AVG_OWN] = values[AVG_OWN] / limit; + return values; + } finally { + lock.unlock(); + } + } + + void addCron(Cron cron) { + cron.end(); + lock.lock(); + try { + last = (last + 1) % size; + full = full || last == (size - 1); + total[last] = cron.total; + own[last] = cron.own; + } finally { + lock.unlock(); + } + } + + @SuppressWarnings("unchecked") + private JSONObject getJSON() { + long[] values = getValues(); + JSONObject json = new JSONObject(); + json.put("lastTotal", values[0]); + json.put("lastOwn", values[1]); + json.put("avgTotal", values[2]); + json.put("avgOwn", values[3]); + return json; + } + + @Override + public String toJSONString() { + return getJSON().toJSONString(); + } + + @Override + public void writeJSONString(Writer out) throws IOException { + getJSON().writeJSONString(out); + } + + } + + @Override + public Cron createCron() { + return new Cron(); + } + + @Override + public void incr(String group, String name, long count) { + AtomicLong counter = getToAdd(group, name, AtomicLong.class, counterLock, counters); + counter.addAndGet(count); + } + + @Override + public void addCron(String group, String name, Instrumentation.Cron cron) { + Timer timer = getToAdd(group, name, Timer.class, timerLock, timers); + timer.addCron((Cron) cron); + } + + static class VariableHolder implements JSONAware, JSONStreamAware { + Variable var; + + public VariableHolder() { + } + + public VariableHolder(Variable var) { + this.var = var; + } + + @SuppressWarnings("unchecked") + private JSONObject getJSON() { + JSONObject json = new JSONObject(); + json.put("value", var.getValue()); + return json; + } + + @Override + public String toJSONString() { + return getJSON().toJSONString(); + } + + @Override + public void writeJSONString(Writer out) throws IOException { + out.write(toJSONString()); + } + + } + + @Override + public void addVariable(String group, String name, Variable variable) { + VariableHolder holder = getToAdd(group, name, VariableHolder.class, variableLock, variables); + holder.var = variable; + } + + static class Sampler implements JSONAware, JSONStreamAware { + Variable variable; + long[] values; + private AtomicLong sum; + private int last; + private boolean full; + + void init(int size, Variable variable) { + this.variable = variable; + values = new long[size]; + sum = new AtomicLong(); + last = 0; + } + + void sample() { + int index = last; + long valueGoingOut = values[last]; + full = full || last == (values.length - 1); + last = (last + 1) % values.length; + values[index] = variable.getValue(); + sum.addAndGet(-valueGoingOut + values[index]); + } + + double getRate() { + return ((double) sum.get()) / ((full) ? values.length : ((last == 0) ? 1 : last)); + } + + @SuppressWarnings("unchecked") + private JSONObject getJSON() { + JSONObject json = new JSONObject(); + json.put("sampler", getRate()); + json.put("size", (full) ? values.length : last); + return json; + } + + @Override + public String toJSONString() { + return getJSON().toJSONString(); + } + + @Override + public void writeJSONString(Writer out) throws IOException { + out.write(toJSONString()); + } + } + + @Override + public void addSampler(String group, String name, int samplingSize, Variable variable) { + Sampler sampler = getToAdd(group, name, Sampler.class, samplerLock, samplers); + samplerLock.lock(); + try { + sampler.init(samplingSize, variable); + samplersList.add(sampler); + } finally { + samplerLock.unlock(); + } + } + + class SamplersRunnable implements Runnable { + + @Override + public void run() { + samplerLock.lock(); + try { + for (Sampler sampler : samplersList) { + sampler.sample(); + } + } finally { + samplerLock.unlock(); + } + } + } + + @Override + public Map> getSnapshot() { + return all; + } + + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java new file mode 100644 index 0000000000..2f6d837172 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/scheduler/SchedulerService.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.scheduler; + +import org.apache.hadoop.lib.lang.RunnableCallable; +import org.apache.hadoop.lib.server.BaseService; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Instrumentation; +import org.apache.hadoop.lib.service.Scheduler; +import org.apache.hadoop.lib.util.Check; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.text.MessageFormat; +import java.util.concurrent.Callable; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +public class SchedulerService extends BaseService implements Scheduler { + private static final Logger LOG = LoggerFactory.getLogger(SchedulerService.class); + + private static final String INST_GROUP = "scheduler"; + + public static final String PREFIX = "scheduler"; + + public static final String CONF_THREADS = "threads"; + + private ScheduledExecutorService scheduler; + + public SchedulerService() { + super(PREFIX); + } + + @Override + public void init() throws ServiceException { + int threads = getServiceConfig().getInt(CONF_THREADS, 5); + scheduler = new ScheduledThreadPoolExecutor(threads); + LOG.debug("Scheduler started"); + } + + @Override + public void destroy() { + try { + long limit = System.currentTimeMillis() + 30 * 1000; + scheduler.shutdownNow(); + while (!scheduler.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + LOG.debug("Waiting for scheduler to shutdown"); + if (System.currentTimeMillis() > limit) { + LOG.warn("Gave up waiting for scheduler to shutdown"); + break; + } + } + if (scheduler.isTerminated()) { + LOG.debug("Scheduler shutdown"); + } + } catch (InterruptedException ex) { + LOG.warn(ex.getMessage(), ex); + } + } + + @Override + public Class[] getServiceDependencies() { + return new Class[]{Instrumentation.class}; + } + + @Override + public Class getInterface() { + return Scheduler.class; + } + + @Override + public void schedule(final Callable callable, long delay, long interval, TimeUnit unit) { + Check.notNull(callable, "callable"); + if (!scheduler.isShutdown()) { + LOG.debug("Scheduling callable [{}], interval [{}] seconds, delay [{}] in [{}]", + new Object[]{callable, delay, interval, unit}); + Runnable r = new Runnable() { + public void run() { + String instrName = callable.getClass().getSimpleName(); + Instrumentation instr = getServer().get(Instrumentation.class); + if (getServer().getStatus() == Server.Status.HALTED) { + LOG.debug("Skipping [{}], server status [{}]", callable, getServer().getStatus()); + instr.incr(INST_GROUP, instrName + ".skips", 1); + } else { + LOG.debug("Executing [{}]", callable); + instr.incr(INST_GROUP, instrName + ".execs", 1); + Instrumentation.Cron cron = instr.createCron().start(); + try { + callable.call(); + } catch (Exception ex) { + instr.incr(INST_GROUP, instrName + ".fails", 1); + LOG.error("Error executing [{}], {}", new Object[]{callable, ex.getMessage(), ex}); + } finally { + instr.addCron(INST_GROUP, instrName, cron.stop()); + } + } + } + }; + scheduler.scheduleWithFixedDelay(r, delay, interval, unit); + } else { + throw new IllegalStateException( + MessageFormat.format("Scheduler shutting down, ignoring scheduling of [{}]", callable)); + } + } + + @Override + public void schedule(Runnable runnable, long delay, long interval, TimeUnit unit) { + schedule((Callable) new RunnableCallable(runnable), delay, interval, unit); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java new file mode 100644 index 0000000000..d1e75fc510 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/GroupsService.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.BaseService; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Groups; +import org.apache.hadoop.lib.util.ConfigurationUtils; + +import java.io.IOException; +import java.util.List; + +public class GroupsService extends BaseService implements Groups { + private static final String PREFIX = "groups"; + + private org.apache.hadoop.security.Groups hGroups; + + public GroupsService() { + super(PREFIX); + } + + @Override + protected void init() throws ServiceException { + Configuration hConf = new Configuration(false); + ConfigurationUtils.copy(getServiceConfig(), hConf); + hGroups = new org.apache.hadoop.security.Groups(hConf); + } + + @Override + public Class getInterface() { + return Groups.class; + } + + @Override + public List getGroups(String user) throws IOException { + return hGroups.getGroups(user); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/ProxyUserService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/ProxyUserService.java new file mode 100644 index 0000000000..3eefd3657c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/ProxyUserService.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.security; + +import org.apache.hadoop.lib.lang.XException; +import org.apache.hadoop.lib.server.BaseService; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Groups; +import org.apache.hadoop.lib.service.ProxyUser; +import org.apache.hadoop.lib.util.Check; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetAddress; +import java.security.AccessControlException; +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class ProxyUserService extends BaseService implements ProxyUser { + private static Logger LOG = LoggerFactory.getLogger(ProxyUserService.class); + + public enum ERROR implements XException.ERROR { + PRXU01("Could not normalize host name [{0}], {1}"), + PRXU02("Missing [{0}] property"); + + private String template; + + ERROR(String template) { + this.template = template; + } + + @Override + public String getTemplate() { + return template; + } + } + + private static final String PREFIX = "proxyuser"; + private static final String GROUPS = ".groups"; + private static final String HOSTS = ".hosts"; + + private Map> proxyUserHosts = new HashMap>(); + private Map> proxyUserGroups = new HashMap>(); + + public ProxyUserService() { + super(PREFIX); + } + + @Override + public Class getInterface() { + return ProxyUser.class; + } + + @Override + public Class[] getServiceDependencies() { + return new Class[]{Groups.class}; + } + + @Override + protected void init() throws ServiceException { + for (Map.Entry entry : getServiceConfig()) { + String key = entry.getKey(); + if (key.endsWith(GROUPS)) { + String proxyUser = key.substring(0, key.lastIndexOf(GROUPS)); + if (getServiceConfig().get(proxyUser + HOSTS) == null) { + throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + HOSTS)); + } + String value = entry.getValue().trim(); + LOG.info("Loading proxyuser settings [{}]=[{}]", key, value); + Set values = null; + if (!value.equals("*")) { + values = new HashSet(Arrays.asList(value.split(","))); + } + proxyUserGroups.put(proxyUser, values); + } + if (key.endsWith(HOSTS)) { + String proxyUser = key.substring(0, key.lastIndexOf(HOSTS)); + if (getServiceConfig().get(proxyUser + GROUPS) == null) { + throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + GROUPS)); + } + String value = entry.getValue().trim(); + LOG.info("Loading proxyuser settings [{}]=[{}]", key, value); + Set values = null; + if (!value.equals("*")) { + String[] hosts = value.split(","); + for (int i = 0; i < hosts.length; i++) { + String originalName = hosts[i]; + try { + hosts[i] = normalizeHostname(originalName); + } catch (Exception ex) { + throw new ServiceException(ERROR.PRXU01, originalName, ex.getMessage(), ex); + } + LOG.info(" Hostname, original [{}], normalized [{}]", originalName, hosts[i]); + } + values = new HashSet(Arrays.asList(hosts)); + } + proxyUserHosts.put(proxyUser, values); + } + } + } + + @Override + public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException, + AccessControlException { + Check.notEmpty(proxyUser, "proxyUser"); + Check.notEmpty(proxyHost, "proxyHost"); + Check.notEmpty(doAsUser, "doAsUser"); + LOG.debug("Authorization check proxyuser [{}] host [{}] doAs [{}]", + new Object[]{proxyUser, proxyHost, doAsUser}); + if (proxyUserHosts.containsKey(proxyUser)) { + proxyHost = normalizeHostname(proxyHost); + validateRequestorHost(proxyUser, proxyHost, proxyUserHosts.get(proxyUser)); + validateGroup(proxyUser, doAsUser, proxyUserGroups.get(proxyUser)); + } else { + throw new AccessControlException(MessageFormat.format("User [{0}] not defined as proxyuser", proxyUser)); + } + } + + private void validateRequestorHost(String proxyUser, String hostname, Set validHosts) + throws IOException, AccessControlException { + if (validHosts != null) { + if (!validHosts.contains(hostname) && !validHosts.contains(normalizeHostname(hostname))) { + throw new AccessControlException(MessageFormat.format("Unauthorized host [{0}] for proxyuser [{1}]", + hostname, proxyUser)); + } + } + } + + private void validateGroup(String proxyUser, String user, Set validGroups) throws IOException, + AccessControlException { + if (validGroups != null) { + List userGroups = getServer().get(Groups.class).getGroups(user); + for (String g : validGroups) { + if (userGroups.contains(g)) { + return; + } + } + throw new AccessControlException( + MessageFormat.format("Unauthorized proxyuser [{0}] for user [{1}], not in proxyuser groups", + proxyUser, user)); + } + } + + private String normalizeHostname(String name) { + try { + InetAddress address = InetAddress.getByName(name); + return address.getCanonicalHostName(); + } catch (IOException ex) { + throw new AccessControlException(MessageFormat.format("Could not resolve host [{0}], {1}", name, + ex.getMessage())); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java new file mode 100644 index 0000000000..ab61fbbb89 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/FileSystemReleaseFilter.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.lib.service.FileSystemAccess; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import java.io.IOException; + +/** + * The FileSystemReleaseFilter releases back to the + * {@link FileSystemAccess} service a FileSystem instance. + *

    + * This filter is useful in situations where a servlet request + * is streaming out HDFS data and the corresponding filesystem + * instance have to be closed after the streaming completes. + */ +public abstract class FileSystemReleaseFilter implements Filter { + private static final ThreadLocal FILE_SYSTEM_TL = new ThreadLocal(); + + /** + * Initializes the filter. + *

    + * This implementation is a NOP. + * + * @param filterConfig filter configuration. + * + * @throws ServletException thrown if the filter could not be initialized. + */ + @Override + public void init(FilterConfig filterConfig) throws ServletException { + } + + /** + * It delegates the incoming request to the FilterChain, and + * at its completion (in a finally block) releases the filesystem instance + * back to the {@link FileSystemAccess} service. + * + * @param servletRequest servlet request. + * @param servletResponse servlet response. + * @param filterChain filter chain. + * + * @throws IOException thrown if an IO error occurrs. + * @throws ServletException thrown if a servet error occurrs. + */ + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) + throws IOException, ServletException { + try { + filterChain.doFilter(servletRequest, servletResponse); + } finally { + FileSystem fs = FILE_SYSTEM_TL.get(); + if (fs != null) { + FILE_SYSTEM_TL.remove(); + getFileSystemAccess().releaseFileSystem(fs); + } + } + } + + /** + * Destroys the filter. + *

    + * This implementation is a NOP. + */ + @Override + public void destroy() { + } + + /** + * Static method that sets the FileSystem to release back to + * the {@link FileSystemAccess} service on servlet request completion. + * + * @param fs fileystem instance. + */ + public static void setFileSystem(FileSystem fs) { + FILE_SYSTEM_TL.set(fs); + } + + /** + * Abstract method to be implemetned by concrete implementations of the + * filter that return the {@link FileSystemAccess} service to which the filesystem + * will be returned to. + * + * @return the FileSystemAccess service. + */ + protected abstract FileSystemAccess getFileSystemAccess(); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java new file mode 100644 index 0000000000..cb9f550291 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import java.io.IOException; +import java.net.InetAddress; + +/** + * Filter that resolves the requester hostname. + */ +public class HostnameFilter implements Filter { + static final ThreadLocal HOSTNAME_TL = new ThreadLocal(); + + /** + * Initializes the filter. + *

    + * This implementation is a NOP. + * + * @param config filter configuration. + * + * @throws ServletException thrown if the filter could not be initialized. + */ + @Override + public void init(FilterConfig config) throws ServletException { + } + + /** + * Resolves the requester hostname and delegates the request to the chain. + *

    + * The requester hostname is available via the {@link #get} method. + * + * @param request servlet request. + * @param response servlet response. + * @param chain filter chain. + * + * @throws IOException thrown if an IO error occurrs. + * @throws ServletException thrown if a servet error occurrs. + */ + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + try { + String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName(); + HOSTNAME_TL.set(hostname); + chain.doFilter(request, response); + } finally { + HOSTNAME_TL.remove(); + } + } + + /** + * Returns the requester hostname. + * + * @return the requester hostname. + */ + public static String get() { + return HOSTNAME_TL.get(); + } + + /** + * Destroys the filter. + *

    + * This implementation is a NOP. + */ + @Override + public void destroy() { + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java new file mode 100644 index 0000000000..b8daf1b179 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/MDCFilter.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import org.slf4j.MDC; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import java.io.IOException; +import java.security.Principal; + +/** + * Filter that sets request contextual information for the slf4j MDC. + *

    + * It sets the following values: + *

      + *
    • hostname: if the {@link HostnameFilter} is present and configured + * before this filter
    • + *
    • user: the HttpServletRequest.getUserPrincipal().getName()
    • + *
    • method: the HTTP method fo the request (GET, POST, ...)
    • + *
    • path: the path of the request URL
    • + *
    + */ +public class MDCFilter implements Filter { + + /** + * Initializes the filter. + *

    + * This implementation is a NOP. + * + * @param config filter configuration. + * + * @throws ServletException thrown if the filter could not be initialized. + */ + @Override + public void init(FilterConfig config) throws ServletException { + } + + /** + * Sets the slf4j MDC and delegates the request to the chain. + * + * @param request servlet request. + * @param response servlet response. + * @param chain filter chain. + * + * @throws IOException thrown if an IO error occurrs. + * @throws ServletException thrown if a servet error occurrs. + */ + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + try { + MDC.clear(); + String hostname = HostnameFilter.get(); + if (hostname != null) { + MDC.put("hostname", HostnameFilter.get()); + } + Principal principal = ((HttpServletRequest) request).getUserPrincipal(); + String user = (principal != null) ? principal.getName() : null; + if (user != null) { + MDC.put("user", user); + } + MDC.put("method", ((HttpServletRequest) request).getMethod()); + MDC.put("path", ((HttpServletRequest) request).getPathInfo()); + chain.doFilter(request, response); + } finally { + MDC.clear(); + } + } + + /** + * Destroys the filter. + *

    + * This implementation is a NOP. + */ + @Override + public void destroy() { + } +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java new file mode 100644 index 0000000000..77e31456f7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/ServerWebApp.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.server.ServerException; + +import javax.servlet.ServletContextEvent; +import javax.servlet.ServletContextListener; +import java.text.MessageFormat; + +/** + * {@link Server} subclass that implements ServletContextListener + * and uses its lifecycle to start and stop the server. + */ +public abstract class ServerWebApp extends Server implements ServletContextListener { + + private static final String HOME_DIR = ".home.dir"; + private static final String CONFIG_DIR = ".config.dir"; + private static final String LOG_DIR = ".log.dir"; + private static final String TEMP_DIR = ".temp.dir"; + + private static ThreadLocal HOME_DIR_TL = new ThreadLocal(); + + /** + * Method for testing purposes. + */ + public static void setHomeDirForCurrentThread(String homeDir) { + HOME_DIR_TL.set(homeDir); + } + + /** + * Constructor for testing purposes. + */ + protected ServerWebApp(String name, String homeDir, String configDir, String logDir, String tempDir, + Configuration config) { + super(name, homeDir, configDir, logDir, tempDir, config); + } + + /** + * Constructor for testing purposes. + */ + protected ServerWebApp(String name, String homeDir, Configuration config) { + super(name, homeDir, config); + } + + /** + * Constructor. Subclasses must have a default constructor specifying + * the server name. + *

    + * The server name is used to resolve the Java System properties that define + * the server home, config, log and temp directories. + *

    + * The home directory is looked in the Java System property + * #SERVER_NAME#.home.dir. + *

    + * The config directory is looked in the Java System property + * #SERVER_NAME#.config.dir, if not defined it resolves to + * the #SERVER_HOME_DIR#/conf directory. + *

    + * The log directory is looked in the Java System property + * #SERVER_NAME#.log.dir, if not defined it resolves to + * the #SERVER_HOME_DIR#/log directory. + *

    + * The temp directory is looked in the Java System property + * #SERVER_NAME#.temp.dir, if not defined it resolves to + * the #SERVER_HOME_DIR#/temp directory. + * + * @param name server name. + */ + public ServerWebApp(String name) { + super(name, getHomeDir(name), + getDir(name, CONFIG_DIR, getHomeDir(name) + "/conf"), + getDir(name, LOG_DIR, getHomeDir(name) + "/log"), + getDir(name, TEMP_DIR, getHomeDir(name) + "/temp"), null); + } + + /** + * Returns the server home directory. + *

    + * It is looked up in the Java System property + * #SERVER_NAME#.home.dir. + * + * @param name the server home directory. + * + * @return the server home directory. + */ + static String getHomeDir(String name) { + String homeDir = HOME_DIR_TL.get(); + if (homeDir == null) { + String sysProp = name + HOME_DIR; + homeDir = System.getProperty(sysProp); + if (homeDir == null) { + throw new IllegalArgumentException(MessageFormat.format("System property [{0}] not defined", sysProp)); + } + } + return homeDir; + } + + /** + * Convenience method that looks for Java System property defining a + * diretory and if not present defaults to the specified directory. + * + * @param name server name, used as prefix of the Java System property. + * @param dirType dir type, use as postfix of the Java System property. + * @param defaultDir the default directory to return if the Java System + * property name + dirType is not defined. + * + * @return the directory defined in the Java System property or the + * the default directory if the Java System property is not defined. + */ + static String getDir(String name, String dirType, String defaultDir) { + String sysProp = name + dirType; + return System.getProperty(sysProp, defaultDir); + } + + /** + * Initializes the ServletContextListener which initializes + * the Server. + * + * @param event servelt context event. + */ + public void contextInitialized(ServletContextEvent event) { + try { + init(); + } catch (ServerException ex) { + event.getServletContext().log("ERROR: " + ex.getMessage()); + throw new RuntimeException(ex); + } + } + + /** + * Destroys the ServletContextListener which destroys + * the Server. + * + * @param event servelt context event. + */ + public void contextDestroyed(ServletContextEvent event) { + destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java new file mode 100644 index 0000000000..26ad35570a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/Check.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.util; + +import java.text.MessageFormat; +import java.util.List; +import java.util.regex.Pattern; + +/** + * Utility methods to check preconditions. + *

    + * Commonly used for method arguments preconditions. + */ +public class Check { + + /** + * Verifies a variable is not NULL. + * + * @param obj the variable to check. + * @param name the name to use in the exception message. + * + * @return the variable. + * + * @throws IllegalArgumentException if the variable is NULL. + */ + public static T notNull(T obj, String name) { + if (obj == null) { + throw new IllegalArgumentException(name + " cannot be null"); + } + return obj; + } + + /** + * Verifies a list does not have any NULL elements. + * + * @param list the list to check. + * @param name the name to use in the exception message. + * + * @return the list. + * + * @throws IllegalArgumentException if the list has NULL elements. + */ + public static List notNullElements(List list, String name) { + notNull(list, name); + for (int i = 0; i < list.size(); i++) { + notNull(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i)); + } + return list; + } + + /** + * Verifies a string is not NULL and not emtpy + * + * @param str the variable to check. + * @param name the name to use in the exception message. + * + * @return the variable. + * + * @throws IllegalArgumentException if the variable is NULL or empty. + */ + public static String notEmpty(String str, String name) { + if (str == null) { + throw new IllegalArgumentException(name + " cannot be null"); + } + if (str.length() == 0) { + throw new IllegalArgumentException(name + " cannot be empty"); + } + return str; + } + + /** + * Verifies a string list is not NULL and not emtpy + * + * @param list the list to check. + * @param name the name to use in the exception message. + * + * @return the variable. + * + * @throws IllegalArgumentException if the string list has NULL or empty + * elements. + */ + public static List notEmptyElements(List list, String name) { + notNull(list, name); + for (int i = 0; i < list.size(); i++) { + notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i)); + } + return list; + } + + private static final String IDENTIFIER_PATTERN_STR = "[a-zA-z_][a-zA-Z0-9_\\-]*"; + + private static final Pattern IDENTIFIER_PATTERN = Pattern.compile("^" + IDENTIFIER_PATTERN_STR + "$"); + + /** + * Verifies a value is a valid identifier, + * [a-zA-z_][a-zA-Z0-9_\-]*, up to a maximum length. + * + * @param value string to check if it is a valid identifier. + * @param maxLen maximun length. + * @param name the name to use in the exception message. + * + * @return the value. + * + * @throws IllegalArgumentException if the string is not a valid identifier. + */ + public static String validIdentifier(String value, int maxLen, String name) { + Check.notEmpty(value, name); + if (value.length() > maxLen) { + throw new IllegalArgumentException( + MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen)); + } + if (!IDENTIFIER_PATTERN.matcher(value).find()) { + throw new IllegalArgumentException( + MessageFormat.format("[{0}] = [{1}] must be '{2}'", name, value, IDENTIFIER_PATTERN_STR)); + } + return value; + } + + /** + * Verifies an integer is greater than zero. + * + * @param value integer value. + * @param name the name to use in the exception message. + * + * @return the value. + * + * @throws IllegalArgumentException if the integer is zero or less. + */ + public static int gt0(int value, String name) { + return (int) gt0((long) value, name); + } + + /** + * Verifies an long is greater than zero. + * + * @param value long value. + * @param name the name to use in the exception message. + * + * @return the value. + * + * @throws IllegalArgumentException if the long is zero or less. + */ + public static long gt0(long value, String name) { + if (value <= 0) { + throw new IllegalArgumentException( + MessageFormat.format("parameter [{0}] = [{1}] must be greater than zero", name, value)); + } + return value; + } + + /** + * Verifies an integer is greater or equal to zero. + * + * @param value integer value. + * @param name the name to use in the exception message. + * + * @return the value. + * + * @throws IllegalArgumentException if the integer is greater or equal to zero. + */ + public static int ge0(int value, String name) { + return (int) ge0((long) value, name); + } + + /** + * Verifies an long is greater or equal to zero. + * + * @param value integer value. + * @param name the name to use in the exception message. + * + * @return the value. + * + * @throws IllegalArgumentException if the long is greater or equal to zero. + */ + public static long ge0(long value, String name) { + if (value < 0) { + throw new IllegalArgumentException(MessageFormat.format( + "parameter [{0}] = [{1}] must be greater than or equals zero", name, value)); + } + return value; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java new file mode 100644 index 0000000000..820abf4eb4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/util/ConfigurationUtils.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.util; + +import org.apache.hadoop.conf.Configuration; +import org.w3c.dom.DOMException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.w3c.dom.Text; +import org.xml.sax.SAXException; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +/** + * Configuration utilities. + */ +public abstract class ConfigurationUtils { + + /** + * Copy configuration key/value pairs from one configuration to another if a property exists in the target, it gets + * replaced. + * + * @param source source configuration. + * @param target target configuration. + */ + public static void copy(Configuration source, Configuration target) { + Check.notNull(source, "source"); + Check.notNull(target, "target"); + for (Map.Entry entry : source) { + target.set(entry.getKey(), entry.getValue()); + } + } + + /** + * Injects configuration key/value pairs from one configuration to another if the key does not exist in the target + * configuration. + * + * @param source source configuration. + * @param target target configuration. + */ + public static void injectDefaults(Configuration source, Configuration target) { + Check.notNull(source, "source"); + Check.notNull(target, "target"); + for (Map.Entry entry : source) { + if (target.get(entry.getKey()) == null) { + target.set(entry.getKey(), entry.getValue()); + } + } + } + + /** + * Returns a new ConfigurationUtils instance with all inline values resolved. + * + * @return a new ConfigurationUtils instance with all inline values resolved. + */ + public static Configuration resolve(Configuration conf) { + Configuration resolved = new Configuration(false); + for (Map.Entry entry : conf) { + resolved.set(entry.getKey(), conf.get(entry.getKey())); + } + return resolved; + } + + // Canibalized from FileSystemAccess Configuration.loadResource(). + + /** + * Create a configuration from an InputStream. + *

    + * ERROR canibalized from Configuration.loadResource(). + * + * @param is inputstream to read the configuration from. + * + * @throws IOException thrown if the configuration could not be read. + */ + public static void load(Configuration conf, InputStream is) throws IOException { + try { + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); + // ignore all comments inside the xml file + docBuilderFactory.setIgnoringComments(true); + DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); + Document doc = builder.parse(is); + parseDocument(conf, doc); + } catch (SAXException e) { + throw new IOException(e); + } catch (ParserConfigurationException e) { + throw new IOException(e); + } + } + + // Canibalized from FileSystemAccess Configuration.loadResource(). + private static void parseDocument(Configuration conf, Document doc) throws IOException { + try { + Element root = doc.getDocumentElement(); + if (!"configuration".equals(root.getTagName())) { + throw new IOException("bad conf file: top-level element not "); + } + NodeList props = root.getChildNodes(); + for (int i = 0; i < props.getLength(); i++) { + Node propNode = props.item(i); + if (!(propNode instanceof Element)) { + continue; + } + Element prop = (Element) propNode; + if (!"property".equals(prop.getTagName())) { + throw new IOException("bad conf file: element not "); + } + NodeList fields = prop.getChildNodes(); + String attr = null; + String value = null; + for (int j = 0; j < fields.getLength(); j++) { + Node fieldNode = fields.item(j); + if (!(fieldNode instanceof Element)) { + continue; + } + Element field = (Element) fieldNode; + if ("name".equals(field.getTagName()) && field.hasChildNodes()) { + attr = ((Text) field.getFirstChild()).getData().trim(); + } + if ("value".equals(field.getTagName()) && field.hasChildNodes()) { + value = ((Text) field.getFirstChild()).getData(); + } + } + + if (attr != null && value != null) { + conf.set(attr, value); + } + } + + } catch (DOMException e) { + throw new IOException(e); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java new file mode 100644 index 0000000000..7bc3a14757 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import java.text.MessageFormat; + +public abstract class BooleanParam extends Param { + + public BooleanParam(String name, String str) { + value = parseParam(name, str); + } + + protected Boolean parse(String str) throws Exception { + if (str.equalsIgnoreCase("true")) { + return true; + } + if (str.equalsIgnoreCase("false")) { + return false; + } + throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str)); + } + + @Override + protected String getDomain() { + return "a boolean"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java new file mode 100644 index 0000000000..aa9408f32e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +public abstract class ByteParam extends Param { + + public ByteParam(String name, String str) { + value = parseParam(name, str); + } + + protected Byte parse(String str) throws Exception { + return Byte.parseByte(str); + } + + @Override + protected String getDomain() { + return "a byte"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java new file mode 100644 index 0000000000..ff86406e4a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.apache.hadoop.util.StringUtils; + +import java.util.Arrays; + +public abstract class EnumParam> extends Param { + Class klass; + + public EnumParam(String label, String str, Class e) { + klass = e; + value = parseParam(label, str); + } + + protected E parse(String str) throws Exception { + return Enum.valueOf(klass, str.toUpperCase()); + } + + @Override + protected String getDomain() { + return StringUtils.join(",", Arrays.asList(klass.getEnumConstants())); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ExceptionProvider.java new file mode 100644 index 0000000000..9c21c6a536 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ExceptionProvider.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.apache.hadoop.fs.http.client.HttpFSFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; +import java.util.LinkedHashMap; +import java.util.Map; + +public class ExceptionProvider implements ExceptionMapper { + private static Logger LOG = LoggerFactory.getLogger(ExceptionProvider.class); + + private static final String ENTER = System.getProperty("line.separator"); + + protected Response createResponse(Response.Status status, Throwable throwable) { + Map json = new LinkedHashMap(); + json.put(HttpFSFileSystem.ERROR_MESSAGE_JSON, getOneLineMessage(throwable)); + json.put(HttpFSFileSystem.ERROR_EXCEPTION_JSON, throwable.getClass().getSimpleName()); + json.put(HttpFSFileSystem.ERROR_CLASSNAME_JSON, throwable.getClass().getName()); + Map response = new LinkedHashMap(); + response.put(HttpFSFileSystem.ERROR_JSON, json); + log(status, throwable); + return Response.status(status).type(MediaType.APPLICATION_JSON).entity(response).build(); + } + + protected String getOneLineMessage(Throwable throwable) { + String message = throwable.getMessage(); + if (message != null) { + int i = message.indexOf(ENTER); + if (i > -1) { + message = message.substring(0, i); + } + } + return message; + } + + protected void log(Response.Status status, Throwable throwable) { + LOG.debug("{}", throwable.getMessage(), throwable); + } + + @Override + public Response toResponse(Throwable throwable) { + return createResponse(Response.Status.BAD_REQUEST, throwable); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java new file mode 100644 index 0000000000..336a62ce9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.apache.hadoop.io.IOUtils; + +import javax.ws.rs.core.StreamingOutput; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +public class InputStreamEntity implements StreamingOutput { + private InputStream is; + private long offset; + private long len; + + public InputStreamEntity(InputStream is, long offset, long len) { + this.is = is; + this.offset = offset; + this.len = len; + } + + public InputStreamEntity(InputStream is) { + this(is, 0, -1); + } + + @Override + public void write(OutputStream os) throws IOException { + is.skip(offset); + if (len == -1) { + IOUtils.copyBytes(is, os, 4096, true); + } else { + IOUtils.copyBytes(is, os, len, true); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java new file mode 100644 index 0000000000..6eddaa2e5f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +public abstract class IntegerParam extends Param { + + public IntegerParam(String name, String str) { + value = parseParam(name, str); + } + + protected Integer parse(String str) throws Exception { + return Integer.parseInt(str); + } + + @Override + protected String getDomain() { + return "an integer"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONMapProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONMapProvider.java new file mode 100644 index 0000000000..0ca62fafb0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONMapProvider.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.json.simple.JSONObject; + +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyWriter; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; +import java.util.Map; + +@Provider +@Produces(MediaType.APPLICATION_JSON) +public class JSONMapProvider implements MessageBodyWriter { + private static final String ENTER = System.getProperty("line.separator"); + + @Override + public boolean isWriteable(Class aClass, Type type, Annotation[] annotations, MediaType mediaType) { + return Map.class.isAssignableFrom(aClass); + } + + @Override + public long getSize(Map map, Class aClass, Type type, Annotation[] annotations, MediaType mediaType) { + return -1; + } + + @Override + public void writeTo(Map map, Class aClass, Type type, Annotation[] annotations, + MediaType mediaType, MultivaluedMap stringObjectMultivaluedMap, + OutputStream outputStream) throws IOException, WebApplicationException { + Writer writer = new OutputStreamWriter(outputStream); + JSONObject.writeJSONString(map, writer); + writer.write(ENTER); + writer.flush(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONProvider.java new file mode 100644 index 0000000000..34aa2f9a08 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/JSONProvider.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.json.simple.JSONStreamAware; + +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyWriter; +import javax.ws.rs.ext.Provider; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +@Provider +@Produces(MediaType.APPLICATION_JSON) +public class JSONProvider implements MessageBodyWriter { + private static final String ENTER = System.getProperty("line.separator"); + + @Override + public boolean isWriteable(Class aClass, Type type, Annotation[] annotations, MediaType mediaType) { + return JSONStreamAware.class.isAssignableFrom(aClass); + } + + @Override + public long getSize(JSONStreamAware jsonStreamAware, Class aClass, Type type, Annotation[] annotations, + MediaType mediaType) { + return -1; + } + + @Override + public void writeTo(JSONStreamAware jsonStreamAware, Class aClass, Type type, Annotation[] annotations, + MediaType mediaType, MultivaluedMap stringObjectMultivaluedMap, + OutputStream outputStream) throws IOException, WebApplicationException { + Writer writer = new OutputStreamWriter(outputStream); + jsonStreamAware.writeJSONString(writer); + writer.write(ENTER); + writer.flush(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java new file mode 100644 index 0000000000..354a550d7b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +public abstract class LongParam extends Param { + + public LongParam(String name, String str) { + value = parseParam(name, str); + } + + protected Long parse(String str) throws Exception { + return Long.parseLong(str); + } + + @Override + protected String getDomain() { + return "a long"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java new file mode 100644 index 0000000000..68a41d5151 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.apache.hadoop.lib.util.Check; + +import java.text.MessageFormat; + +public abstract class Param { + protected T value; + + public T parseParam(String name, String str) { + Check.notNull(name, "name"); + try { + return (str != null && str.trim().length() > 0) ? parse(str) : null; + } catch (Exception ex) { + throw new IllegalArgumentException( + MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]", + name, str, getDomain())); + } + } + + public T value() { + return value; + } + + protected void setValue(T value) { + this.value = value; + } + + protected abstract String getDomain(); + + protected abstract T parse(String str) throws Exception; + + public String toString() { + return value.toString(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java new file mode 100644 index 0000000000..a3995baa61 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +public abstract class ShortParam extends Param { + + public ShortParam(String name, String str) { + value = parseParam(name, str); + } + + protected Short parse(String str) throws Exception { + return Short.parseShort(str); + } + + @Override + protected String getDomain() { + return "a short"; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java new file mode 100644 index 0000000000..4b3a9274fe --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import org.apache.hadoop.lib.util.Check; + +import java.text.MessageFormat; +import java.util.regex.Pattern; + +public abstract class StringParam extends Param { + private Pattern pattern; + + public StringParam(String name, String str) { + this(name, str, null); + } + + public StringParam(String name, String str, Pattern pattern) { + this.pattern = pattern; + value = parseParam(name, str); + } + + public String parseParam(String name, String str) { + String ret = null; + Check.notNull(name, "name"); + try { + if (str != null) { + str = str.trim(); + if (str.length() > 0) { + return parse(str); + } + } + } catch (Exception ex) { + throw new IllegalArgumentException( + MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]", + name, str, getDomain())); + } + return ret; + } + + protected String parse(String str) throws Exception { + if (pattern != null) { + if (!pattern.matcher(str).matches()) { + throw new IllegalArgumentException("Invalid value"); + } + } + return str; + } + + @Override + protected String getDomain() { + return (pattern == null) ? "a string" : pattern.pattern(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java new file mode 100644 index 0000000000..b4c0e9a9d9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/UserProvider.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import com.sun.jersey.api.core.HttpContext; +import com.sun.jersey.core.spi.component.ComponentContext; +import com.sun.jersey.core.spi.component.ComponentScope; +import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable; +import com.sun.jersey.spi.inject.Injectable; +import com.sun.jersey.spi.inject.InjectableProvider; +import org.slf4j.MDC; + +import javax.ws.rs.core.Context; +import javax.ws.rs.ext.Provider; +import java.lang.reflect.Type; +import java.security.Principal; +import java.util.regex.Pattern; + +@Provider +public class UserProvider extends AbstractHttpContextInjectable implements + InjectableProvider { + + public static final String USER_NAME_PARAM = "user.name"; + + public static final Pattern USER_PATTERN = Pattern.compile("[_a-zA-Z0-9]+"); + + private static class UserParam extends StringParam { + + public UserParam(String user) { + super(USER_NAME_PARAM, user, USER_PATTERN); + } + } + + @Override + public Principal getValue(HttpContext httpContext) { + Principal principal = httpContext.getRequest().getUserPrincipal(); + if (principal == null) { + final String user = httpContext.getRequest().getQueryParameters().getFirst(USER_NAME_PARAM); + if (user != null) { + principal = new Principal() { + @Override + public String getName() { + return new UserParam(user).value(); + } + }; + } + } + if (principal != null) { + MDC.put("user", principal.getName()); + } + return principal; + } + + @Override + public ComponentScope getScope() { + return ComponentScope.PerRequest; + } + + @Override + public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) { + return (type.equals(Principal.class)) ? this : null; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh new file mode 100644 index 0000000000..a72d62927c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh @@ -0,0 +1,167 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +BASEDIR=`dirname ${PRG}` +BASEDIR=`cd ${BASEDIR}/..;pwd` + + +function print() { + if [ "${HTTPFS_SILENT}" != "true" ]; then + echo "$@" + fi +} + +# if HTTPFS_HOME is already set warn it will be ignored +# +if [ "${HTTPFS_HOME}" != "" ]; then + echo "WARNING: current setting of HTTPFS_HOME ignored" +fi + +print + +# setting HTTPFS_HOME to the installation dir, it cannot be changed +# +export HTTPFS_HOME=${BASEDIR} +httpfs_home=${HTTPFS_HOME} +print "Setting HTTPFS_HOME: ${HTTPFS_HOME}" + +# if the installation has a env file, source it +# this is for native packages installations +# +if [ -e "${HTTPFS_HOME}/bin/httpfs-env.sh" ]; then + print "Sourcing: ${HTTPFS_HOME}/bin/httpfs-env.sh" + source ${HTTPFS_HOME}/bin/HTTPFS-env.sh + grep "^ *export " ${HTTPFS_HOME}/bin/httpfs-env.sh | sed 's/ *export/ setting/' +fi + +# verify that the sourced env file didn't change HTTPFS_HOME +# if so, warn and revert +# +if [ "${HTTPFS_HOME}" != "${httpfs_home}" ]; then + print "WARN: HTTPFS_HOME resetting to ''${HTTPFS_HOME}'' ignored" + export HTTPFS_HOME=${httpfs_home} + print " using HTTPFS_HOME: ${HTTPFS_HOME}" +fi + +if [ "${HTTPFS_CONFIG}" = "" ]; then + export HTTPFS_CONFIG=${HTTPFS_HOME}/etc/hadoop + print "Setting HTTPFS_CONFIG: ${HTTPFS_CONFIG}" +else + print "Using HTTPFS_CONFIG: ${HTTPFS_CONFIG}" +fi +httpfs_config=${HTTPFS_CONFIG} + +# if the configuration dir has a env file, source it +# +if [ -e "${HTTPFS_CONFIG}/httpfs-env.sh" ]; then + print "Sourcing: ${HTTPFS_CONFIG}/httpfs-env.sh" + source ${HTTPFS_CONFIG}/httpfs-env.sh + grep "^ *export " ${HTTPFS_CONFIG}/httpfs-env.sh | sed 's/ *export/ setting/' +fi + +# verify that the sourced env file didn't change HTTPFS_HOME +# if so, warn and revert +# +if [ "${HTTPFS_HOME}" != "${httpfs_home}" ]; then + echo "WARN: HTTPFS_HOME resetting to ''${HTTPFS_HOME}'' ignored" + export HTTPFS_HOME=${httpfs_home} +fi + +# verify that the sourced env file didn't change HTTPFS_CONFIG +# if so, warn and revert +# +if [ "${HTTPFS_CONFIG}" != "${httpfs_config}" ]; then + echo "WARN: HTTPFS_CONFIG resetting to ''${HTTPFS_CONFIG}'' ignored" + export HTTPFS_CONFIG=${httpfs_config} +fi + +if [ "${HTTPFS_LOG}" = "" ]; then + export HTTPFS_LOG=${HTTPFS_HOME}/logs + print "Setting HTTPFS_LOG: ${HTTPFS_LOG}" +else + print "Using HTTPFS_LOG: ${HTTPFS_LOG}" +fi + +if [ ! -f ${HTTPFS_LOG} ]; then + mkdir -p ${HTTPFS_LOG} +fi + +if [ "${HTTPFS_TEMP}" = "" ]; then + export HTTPFS_TEMP=${HTTPFS_HOME}/temp + print "Setting HTTPFS_TEMP: ${HTTPFS_TEMP}" +else + print "Using HTTPFS_TEMP: ${HTTPFS_TEMP}" +fi + +if [ ! -f ${HTTPFS_TEMP} ]; then + mkdir -p ${HTTPFS_TEMP} +fi + +if [ "${HTTPFS_HTTP_PORT}" = "" ]; then + export HTTPFS_HTTP_PORT=14000 + print "Setting HTTPFS_HTTP_PORT: ${HTTPFS_HTTP_PORT}" +else + print "Using HTTPFS_HTTP_PORT: ${HTTPFS_HTTP_PORT}" +fi + +if [ "${HTTPFS_ADMIN_PORT}" = "" ]; then + export HTTPFS_ADMIN_PORT=`expr $HTTPFS_HTTP_PORT + 1` + print "Setting HTTPFS_ADMIN_PORT: ${HTTPFS_ADMIN_PORT}" +else + print "Using HTTPFS_ADMIN_PORT: ${HTTPFS_ADMIN_PORT}" +fi + +if [ "${HTTPFS_HTTP_HOSTNAME}" = "" ]; then + export HTTPFS_HTTP_HOSTNAME=`hostname -f` + print "Setting HTTPFS_HTTP_HOSTNAME: ${HTTPFS_HTTP_HOSTNAME}" +else + print "Using HTTPFS_HTTP_HOSTNAME: ${HTTPFS_HTTP_HOSTNAME}" +fi + +if [ "${CATALINA_BASE}" = "" ]; then + export CATALINA_BASE=${HTTPFS_HOME}/share/hadoop/httpfs/tomcat + print "Setting CATALINA_BASE: ${CATALINA_BASE}" +else + print "Using CATALINA_BASE: ${CATALINA_BASE}" +fi + +if [ "${CATALINA_OUT}" = "" ]; then + export CATALINA_OUT=${HTTPFS_LOG}/httpfs-catalina.out + print "Setting CATALINA_OUT: ${CATALINA_OUT}" +else + print "Using CATALINA_OUT: ${CATALINA_OUT}" +fi + +if [ "${CATALINA_PID}" = "" ]; then + export CATALINA_PID=/tmp/httpfs.pid + print "Setting CATALINA_PID: ${CATALINA_PID}" +else + print "Using CATALINA_PID: ${CATALINA_PID}" +fi + +print diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/default-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/default-log4j.properties new file mode 100644 index 0000000000..a0c65275e0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/default-log4j.properties @@ -0,0 +1,20 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.Target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n +log4j.rootLogger=INFO, console + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml new file mode 100644 index 0000000000..6fac2651f5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml @@ -0,0 +1,204 @@ + + + + + + + + + httpfs.buffer.size + 4096 + + The buffer size used by a read/write request when streaming data from/to + HDFS. + + + + + + + httpfs.services + + org.apache.hadoop.lib.service.instrumentation.InstrumentationService, + org.apache.hadoop.lib.service.scheduler.SchedulerService, + org.apache.hadoop.lib.service.security.GroupsService, + org.apache.hadoop.lib.service.security.ProxyUserService, + org.apache.hadoop.lib.service.hadoop.FileSystemAccessService + + + Services used by the httpfs server. + + + + + + + kerberos.realm + LOCALHOST + + Kerberos realm, used only if Kerberos authentication is used between + the clients and httpfs or between HttpFS and HDFS. + + This property is only used to resolve other properties within this + configuration file. + + + + + + + httpfs.hostname + ${httpfs.http.hostname} + + Property used to synthetize the HTTP Kerberos principal used by httpfs. + + This property is only used to resolve other properties within this + configuration file. + + + + + httpfs.authentication.type + simple + + Defines the authentication mechanism used by httpfs for its HTTP clients. + + Valid values are 'simple' and 'kerberos'. + + If using 'simple' HTTP clients must specify the username with the + 'user.name' query string parameter. + + If using 'kerberos' HTTP clients must use HTTP SPNEGO. + + + + + httpfs.authentication.kerberos.principal + HTTP/${httpfs.hostname}@${kerberos.realm} + + The HTTP Kerberos principal used by HttpFS in the HTTP endpoint. + + The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos + HTTP SPENGO specification. + + + + + httpfs.authentication.kerberos.keytab + ${user.home}/httpfs.keytab + + The Kerberos keytab file with the credentials for the + HTTP Kerberos principal used by httpfs in the HTTP endpoint. + + + + + + + httpfs.proxyuser.#USER#.hosts + * + + List of hosts the '#USER#' user is allowed to perform 'doAs' + operations. + + The '#USER#' must be replaced with the username o the user who is + allowed to perform 'doAs' operations. + + The value can be the '*' wildcard or a list of hostnames. + + For multiple users copy this property and replace the user name + in the property name. + + + + + httpfs.proxyuser.#USER#.groups + * + + List of groups the '#USER#' user is allowed to impersonate users + from to perform 'doAs' operations. + + The '#USER#' must be replaced with the username o the user who is + allowed to perform 'doAs' operations. + + The value can be the '*' wildcard or a list of groups. + + For multiple users copy this property and replace the user name + in the property name. + + + + + + + namenode.hostname + localhost + + The HDFS Namenode host the httpfs server connects to perform file + system operations. + + This property is only used to resolve other properties within this + configuration file. + + + + + httpfs.hadoop.conf:fs.default.name + hdfs://${namenode.hostname}:8020 + + The HDFS Namenode URI the httpfs server connects to perform file + system operations. + + + + + + + httpfs.hadoop.authentication.type + simple + + Defines the authentication mechanism used by httpfs to connect to + the HDFS Namenode. + + Valid values are 'simple' and 'kerberos'. + + + + + httpfs.hadoop.authentication.kerberos.keytab + ${user.home}/httpfs.keytab + + The Kerberos keytab file with the credentials for the + Kerberos principal used by httpfs to connect to the HDFS Namenode. + + + + + httpfs.hadoop.authentication.kerberos.principal + ${user.name}/${httpfs.hostname}@${kerberos.realm} + + The Kerberos principal used by httpfs to connect to the HDFS Namenode. + + + + + httpfs.hadoop.conf:dfs.namenode.kerberos.principal + hdfs/${namenode.hostname}@${kerberos.realm} + + The HDFS Namenode Kerberos principal. + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs.properties new file mode 100644 index 0000000000..164896e1f0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs.properties @@ -0,0 +1,21 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +httpfs.version=${project.version} + +httpfs.source.repository=${httpfs.source.repository} +httpfs.source.revision=${httpfs.source.revision} + +httpfs.build.username=${user.name} +httpfs.build.timestamp=${httpfs.build.timestamp} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh new file mode 100644 index 0000000000..6566ab25c2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# resolve links - $0 may be a softlink +PRG="${0}" + +while [ -h "${PRG}" ]; do + ls=`ls -ld "${PRG}"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "${PRG}"`/"$link" + fi +done + +BASEDIR=`dirname ${PRG}` +BASEDIR=`cd ${BASEDIR}/..;pwd` + +source ${BASEDIR}/libexec/httpfs-config.sh + +# The Java System property 'httpfs.http.port' it is not used by HttpFS, +# it is used in Tomcat's server.xml configuration file +# +print "Using CATALINA_OPTS: ${CATALINA_OPTS}" + +catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}"; +catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}"; +catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}"; +catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}"; +catalina_opts="${catalina_opts} -Dhttpfs.admin.port=${HTTPFS_ADMIN_PORT}"; +catalina_opts="${catalina_opts} -Dhttpfs.http.port=${HTTPFS_HTTP_PORT}"; +catalina_opts="${catalina_opts} -Dhttpfs.http.hostname=${HTTPFS_HTTP_HOSTNAME}"; + +print "Adding to CATALINA_OPTS: ${catalina_opts}" + +export CATALINA_OPTS="${CATALINA_OPTS} ${catalina_opts}" + +# A bug in catalina.sh script does not use CATALINA_OPTS for stopping the server +# +if [ "${1}" = "stop" ]; then + export JAVA_OPTS=${CATALINA_OPTS} +fi + +if [ "${HTTPFS_SILENT}" != "true" ]; then + ${BASEDIR}/share/hadoop/httpfs/tomcat/bin/catalina.sh "$@" +else + ${BASEDIR}/share/hadoop/httpfs/tomcat/bin/catalina.sh "$@" > /dev/null +fi + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml new file mode 100644 index 0000000000..9d0ae0db4c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/WEB-INF/web.xml @@ -0,0 +1,16 @@ + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html new file mode 100644 index 0000000000..2f9aa7a463 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/ROOT/index.html @@ -0,0 +1,21 @@ + + + + +HttpFs service, service base URL at /webhdfs/v1. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties new file mode 100644 index 0000000000..294ef741d6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/logging.properties @@ -0,0 +1,67 @@ +# +# All Rights Reserved. +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +handlers = 1catalina.org.apache.juli.FileHandler, 2localhost.org.apache.juli.FileHandler, 3manager.org.apache.juli.FileHandler, 4host-manager.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +.handlers = 1catalina.org.apache.juli.FileHandler, java.util.logging.ConsoleHandler + +############################################################ +# Handler specific properties. +# Describes specific configuration info for Handlers. +############################################################ + +1catalina.org.apache.juli.FileHandler.level = FINE +1catalina.org.apache.juli.FileHandler.directory = ${httpfs.log.dir} +1catalina.org.apache.juli.FileHandler.prefix = httpfs-catalina. + +2localhost.org.apache.juli.FileHandler.level = FINE +2localhost.org.apache.juli.FileHandler.directory = ${httpfs.log.dir} +2localhost.org.apache.juli.FileHandler.prefix = httpfs-localhost. + +3manager.org.apache.juli.FileHandler.level = FINE +3manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir} +3manager.org.apache.juli.FileHandler.prefix = httpfs-manager. + +4host-manager.org.apache.juli.FileHandler.level = FINE +4host-manager.org.apache.juli.FileHandler.directory = ${httpfs.log.dir} +4host-manager.org.apache.juli.FileHandler.prefix = httpfs-host-manager. + +java.util.logging.ConsoleHandler.level = FINE +java.util.logging.ConsoleHandler.formatter = java.util.logging.SimpleFormatter + + +############################################################ +# Facility specific properties. +# Provides extra control for each logger. +############################################################ + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.FileHandler + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.FileHandler + +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO +org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.FileHandler + +# For example, set the com.xyz.foo logger to only log SEVERE +# messages: +#org.apache.catalina.startup.ContextConfig.level = FINE +#org.apache.catalina.startup.HostConfig.level = FINE +#org.apache.catalina.session.ManagerBase.level = FINE +#org.apache.catalina.core.AprLifecycleListener.level=FINE diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml new file mode 100644 index 0000000000..a425bdd2b3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/tomcat/server.xml @@ -0,0 +1,150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml new file mode 100644 index 0000000000..3ba374e369 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/webapp/WEB-INF/web.xml @@ -0,0 +1,88 @@ + + + + + + org.apache.hadoop.fs.http.server.HttpFSServerWebApp + + + + webservices-driver + com.sun.jersey.spi.container.servlet.ServletContainer + + com.sun.jersey.config.property.packages + org.apache.hadoop.fs.http.server,org.apache.hadoop.lib.wsrs + + + + + 1 + + + + webservices-driver + /* + + + + authFilter + org.apache.hadoop.fs.http.server.AuthFilter + + + + MDCFilter + org.apache.hadoop.lib.servlet.MDCFilter + + + + hostnameFilter + org.apache.hadoop.lib.servlet.HostnameFilter + + + + fsReleaseFilter + org.apache.hadoop.fs.http.server.HttpFSReleaseFilter + + + + authFilter + * + + + + MDCFilter + * + + + + hostnameFilter + * + + + + fsReleaseFilter + * + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm new file mode 100644 index 0000000000..26891721b8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/ServerSetup.apt.vm @@ -0,0 +1,121 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. + + --- + Hadoop HDFS over HTTP ${project.version} - Server Setup + --- + --- + ${maven.build.timestamp} + +Hadoop HDFS over HTTP ${project.version} - Server Setup + + \[ {{{./index.html}Go Back}} \] + + This page explains how to quickly setup HttpFS with Pseudo authentication + against a Hadoop cluster with Pseudo authentication. + +* Requirements + + * Java 6+ + + * Maven 3+ + +* Install HttpFS + ++---+ +~ $ tar xzf httpfs-${project.version}.tar.gz ++---+ + +* Configure HttpFS + + Edit the <<>> file and + set the <<>> property to the HDFS + Namenode URI. For example: + ++---+ +httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021 ++---+ + +* Configure Hadoop + + Edit Hadoop <<>> and defined the Unix user that will + run the HttpFS server as a proxyuser. For example: + ++---+ + ... + + fsAccess.proxyuser.#HTTPFSUSER#.hosts + httpfs-host.foo.com + + + fsAccess.proxyuser.#HTTPFSUSER#.groups + * + + ... ++---+ + + IMPORTANT: Replace <<<#HTTPFSUSER#>>> with the Unix user that will + start the HttpFS server. + +* Restart Hadoop + + You need to restart Hadoop for the proxyuser configuration ot become + active. + +* Start/Stop HttpFS + + To start/stop HttpFS use HttpFS's bin/httpfs.sh script. For example: + ++---+ +httpfs-${project.version} $ bin/httpfs.sh start ++---+ + + NOTE: Invoking the script without any parameters list all possible + parameters (start, stop, run, etc.). The <<>> script is a wrapper + for Tomcat's <<>> script that sets the environment variables + and Java System properties required to run HttpFS server. + +* Test HttpFS is working + ++---+ +~ $ curl -i "http://:14000?user.name=babu&op=homedir" +HTTP/1.1 200 OK +Content-Type: application/json +Transfer-Encoding: chunked + +{"homeDir":"http:\/\/:14000\/user\/babu"} ++---+ + +* Embedded Tomcat Configuration + + To configure the embedded Tomcat go to the <<>>. + + HttpFS preconfigures the HTTP and Admin ports in Tomcat's <<>> to + 14000 and 14001. + + Tomcat logs are also preconfigured to go to HttpFS's <<>> directory. + + The following environment variables (which can be set in HttpFS's + <<>> script) can be used to alter those values: + + * HTTPFS_HTTP_PORT + + * HTTPFS_ADMIN_PORT + + * HTTPFS_LOG + +* HttpFS Configuration + + HttpFS supports the following {{{./httpfs-default.html}configuration properties}} + in the HttpFS's <<>> configuration file. + + \[ {{{./index.html}Go Back}} \] diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm new file mode 100644 index 0000000000..30417423cd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/UsingHttpTools.apt.vm @@ -0,0 +1,91 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. + + --- + Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools + --- + --- + ${maven.build.timestamp} + +Hadoop HDFS over HTTP ${project.version} - Using HTTP Tools + + \[ {{{./index.html}Go Back}} \] + +* Security + + Out of the box HttpFS supports both pseudo authentication and Kerberos HTTP + SPNEGO authentication. + +** Pseudo Authentication + + With pseudo authentication the user name must be specified in the + <<>>> query string parameter of a HttpFS URL. + For example: + ++---+ +$ curl "http://:14000/webhdfs/v1?op=homedir&user.name=babu" ++---+ + +** Kerberos HTTP SPNEGO Authentication + + Kerberos HTTP SPENGO authentication requires a tool or library supporting + Kerberos HTTP SPNEGO protocol. + + IMPORTANT: If using <<>>, the <<>> version being used must support + GSS (<<>> prints out 'GSS' if it supports it). + + For example: + ++---+ +$ kinit +Please enter the password for tucu@LOCALHOST: +$ curl --negotiate -u foo "http://:14000/webhdfs/v1?op=homedir" +Enter host password for user 'foo': ++---+ + + NOTE: the <<<-u USER>>> option is required by the <<<--negotiate>>> but it is + not used. Use any value as <<>> and when asked for the password press + [ENTER] as the password value is ignored. + +** {Remembering Who I Am} (Establishing an Authenticated Session) + + As most authentication mechanisms, Hadoop HTTP authentication authenticates + users once and issues a short-lived authentication token to be presented in + subsequent requests. This authentication token is a signed HTTP Cookie. + + When using tools like <<>>, the authentication token must be stored on + the first request doing authentication, and submitted in subsequent requests. + To do this with curl the <<<-b>>> and <<<-c>>> options to save and send HTTP + Cookies must be used. + + For example, the first request doing authentication should save the received + HTTP Cookies. + + Using Pseudo Authentication: + ++---+ +$ curl -c ~/.httpfsauth "http://:14000/webhdfs/v1?op=homedir&user.name=babu" ++---+ + + Using Kerberos HTTP SPNEGO authentication: + ++---+ +$ curl --negotiate -u foo -c ~/.httpfsauth "http://:14000/webhdfs/v1?op=homedir" ++---+ + + Then, subsequent requests forward the previously received HTTP Cookie: + ++---+ +$ curl -b ~/.httpfsauth "http://:14000/webhdfs/v1?op=liststatus" ++---+ + + \[ {{{./index.html}Go Back}} \] diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm new file mode 100644 index 0000000000..2f8dd5b9cc --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/apt/index.apt.vm @@ -0,0 +1,88 @@ +~~ Licensed under the Apache License, Version 2.0 (the "License"); +~~ you may not use this file except in compliance with the License. +~~ You may obtain a copy of the License at +~~ +~~ http://www.apache.org/licenses/LICENSE-2.0 +~~ +~~ Unless required by applicable law or agreed to in writing, software +~~ distributed under the License is distributed on an "AS IS" BASIS, +~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +~~ See the License for the specific language governing permissions and +~~ limitations under the License. + + --- + Hadoop HDFS over HTTP - Documentation Sets ${project.version} + --- + --- + ${maven.build.timestamp} + +Hadoop HDFS over HTTP - Documentation Sets ${project.version} + + HttpFS is a server that provides a REST HTTP gateway supporting all HDFS + File System operations (read and write). And it is inteoperable with the + <> REST HTTP API. + + HttpFS can be used to transfer data between clusters running different + versions of Hadoop (overcoming RPC versioning issues), for example using + Hadoop DistCP. + + HttpFS can be used to access data in HDFS on a cluster behind of a firewall + (the HttpFS server acts as a gateway and is the only system that is allowed + to cross the firewall into the cluster). + + HttpFS can be used to access data in HDFS using HTTP utilities (such as curl + and wget) and HTTP libraries Perl from other languages than Java. + + The <> client FileSytem implementation can be used to access HttpFS + using the Hadoop filesystem command (<<>>) line tool as well as + from Java aplications using the Hadoop FileSystem Java API. + + HttpFS has built-in security supporting Hadoop pseudo authentication and + HTTP SPNEGO Kerberos and other pluggable authentication mechanims. It also + provides Hadoop proxy user support. + +* How Does HttpFS Works? + + HttpFS is a separate service from Hadoop NameNode. + + HttpFS itself is Java web-application and it runs using a preconfigured Tomcat + bundled with HttpFS binary distribution. + + HttpFS HTTP web-service API calls are HTTP REST calls that map to a HDFS file + system operation. For example, using the <<>> Unix command: + + * <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo/README.txt>>> returns + the contents of the HDFS <<>> file. + + * <<<$ curl http://httpfs-host:14000/webhdfs/v1/user/foo?op=list>>> returns the + contents of the HDFS <<>> directory in JSON format. + + * <<<$ curl -X POST http://httpfs-host:14000/webhdfs/v1/user/foo/bar?op=mkdirs>>> + creates the HDFS <<>> directory. + +* How HttpFS and Hadoop HDFS Proxy differ? + + HttpFS was inspired by Hadoop HDFS proxy. + + HttpFS can be seening as a full rewrite of Hadoop HDFS proxy. + + Hadoop HDFS proxy provides a subset of file system operations (read only), + HttpFS provides support for all file system operations. + + HttpFS uses a clean HTTP REST API making its use with HTTP tools more + intuitive. + + HttpFS supports Hadoop pseudo authentication, Kerberos SPENGOS authentication + and Hadoop proxy users. Hadoop HDFS proxy did not. + +* User and Developer Documentation + + * {{{./ServerSetup.html}HttpFS Server Setup}} + + * {{{./UsingHttpTools.html}Using HTTP Tools}} + +* Current Limitations + + <<>> + operations are not supported. + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl new file mode 100644 index 0000000000..8f2ae9bcbb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/configuration.xsl @@ -0,0 +1,49 @@ + + + + + + + + +

    Configuration Properties

    + + + + + + + + + + + + + +
    namevaluedescription
    + + + + + + + +
    + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml new file mode 100644 index 0000000000..d6424ebc2f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/site.xml @@ -0,0 +1,34 @@ + + + + + + + +   + + + + org.apache.maven.skins + maven-stylus-skin + 1.2 + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java new file mode 100644 index 0000000000..48bc7240d5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java @@ -0,0 +1,485 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.HFSTestCase; +import org.apache.hadoop.test.HadoopUsersConfTestHelper; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestHdfs; +import org.apache.hadoop.test.TestHdfsHelper; +import org.apache.hadoop.test.TestJetty; +import org.apache.hadoop.test.TestJettyHelper; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.webapp.WebAppContext; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URL; +import java.security.PrivilegedExceptionAction; +import java.util.Arrays; +import java.util.Collection; + +@RunWith(value = Parameterized.class) +public class TestHttpFSFileSystem extends HFSTestCase { + + private void createHttpFSServer() throws Exception { + File homeDir = TestDirHelper.getTestDir(); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath()); + + String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name"); + Configuration conf = new Configuration(false); + conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper + .getHadoopProxyUserGroups()); + conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper + .getHadoopProxyUserHosts()); + File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); + OutputStream os = new FileOutputStream(hoopSite); + conf.writeXml(os); + os.close(); + + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + URL url = cl.getResource("webapp"); + WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs"); + Server server = TestJettyHelper.getJettyServer(); + server.addHandler(context); + server.start(); + } + + protected FileSystem getHttpFileSystem() throws Exception { + Configuration conf = new Configuration(); + conf.set("fs.http.impl", HttpFSFileSystem.class.getName()); + return FileSystem.get(TestJettyHelper.getJettyURL().toURI(), conf); + } + + protected void testGet() throws Exception { + FileSystem fs = getHttpFileSystem(); + Assert.assertNotNull(fs); + Assert.assertEquals(fs.getUri(), TestJettyHelper.getJettyURL().toURI()); + fs.close(); + } + + private void testOpen() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + fs.close(); + fs = getHttpFileSystem(); + InputStream is = fs.open(new Path(path.toUri().getPath())); + Assert.assertEquals(is.read(), 1); + is.close(); + fs.close(); + } + + private void testCreate(Path path, boolean override) throws Exception { + FileSystem fs = getHttpFileSystem(); + FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); + OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, + (short) 2, 100 * 1024 * 1024, null); + os.write(1); + os.close(); + fs.close(); + + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + FileStatus status = fs.getFileStatus(path); + Assert.assertEquals(status.getReplication(), 2); + Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024); + Assert.assertEquals(status.getPermission(), permission); + InputStream is = fs.open(path); + Assert.assertEquals(is.read(), 1); + is.close(); + fs.close(); + } + + private void testCreate() throws Exception { + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + testCreate(path, false); + testCreate(path, true); + try { + testCreate(path, false); + Assert.fail(); + } catch (IOException ex) { + + } catch (Exception ex) { + Assert.fail(); + } + } + + private void testAppend() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + fs.close(); + fs = getHttpFileSystem(); + os = fs.append(new Path(path.toUri().getPath())); + os.write(2); + os.close(); + fs.close(); + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + InputStream is = fs.open(path); + Assert.assertEquals(is.read(), 1); + Assert.assertEquals(is.read(), 2); + Assert.assertEquals(is.read(), -1); + is.close(); + fs.close(); + } + + private void testRename() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo"); + fs.mkdirs(path); + fs.close(); + fs = getHttpFileSystem(); + Path oldPath = new Path(path.toUri().getPath()); + Path newPath = new Path(path.getParent(), "bar"); + fs.rename(oldPath, newPath); + fs.close(); + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Assert.assertFalse(fs.exists(oldPath)); + Assert.assertTrue(fs.exists(newPath)); + fs.close(); + } + + private void testDelete() throws Exception { + Path foo = new Path(TestHdfsHelper.getHdfsTestDir(), "foo"); + Path bar = new Path(TestHdfsHelper.getHdfsTestDir(), "bar"); + Path foe = new Path(TestHdfsHelper.getHdfsTestDir(), "foe"); + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + fs.mkdirs(foo); + fs.mkdirs(new Path(bar, "a")); + fs.mkdirs(foe); + + FileSystem hoopFs = getHttpFileSystem(); + Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false)); + Assert.assertFalse(fs.exists(foo)); + try { + hoopFs.delete(new Path(bar.toUri().getPath()), false); + Assert.fail(); + } catch (IOException ex) { + } catch (Exception ex) { + Assert.fail(); + } + Assert.assertTrue(fs.exists(bar)); + Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true)); + Assert.assertFalse(fs.exists(bar)); + + Assert.assertTrue(fs.exists(foe)); + Assert.assertTrue(hoopFs.delete(foe, true)); + Assert.assertFalse(fs.exists(foe)); + + hoopFs.close(); + fs.close(); + } + + private void testListStatus() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + FileStatus status1 = fs.getFileStatus(path); + fs.close(); + + fs = getHttpFileSystem(); + FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath())); + fs.close(); + + Assert.assertEquals(status2.getPermission(), status1.getPermission()); + Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath()); + Assert.assertEquals(status2.getReplication(), status1.getReplication()); + Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize()); + Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime()); + Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime()); + Assert.assertEquals(status2.getOwner(), status1.getOwner()); + Assert.assertEquals(status2.getGroup(), status1.getGroup()); + Assert.assertEquals(status2.getLen(), status1.getLen()); + + FileStatus[] stati = fs.listStatus(path.getParent()); + Assert.assertEquals(stati.length, 1); + Assert.assertEquals(stati[0].getPath().getName(), path.getName()); + } + + private void testWorkingdirectory() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path workingDir = fs.getWorkingDirectory(); + fs.close(); + + fs = getHttpFileSystem(); + Path hoopWorkingDir = fs.getWorkingDirectory(); + fs.close(); + Assert.assertEquals(hoopWorkingDir.toUri().getPath(), workingDir.toUri().getPath()); + + fs = getHttpFileSystem(); + fs.setWorkingDirectory(new Path("/tmp")); + workingDir = fs.getWorkingDirectory(); + fs.close(); + Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath()); + } + + private void testMkdirs() throws Exception { + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo"); + FileSystem fs = getHttpFileSystem(); + fs.mkdirs(path); + fs.close(); + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Assert.assertTrue(fs.exists(path)); + fs.close(); + } + + private void testSetTimes() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + FileStatus status1 = fs.getFileStatus(path); + fs.close(); + long at = status1.getAccessTime(); + long mt = status1.getModificationTime(); + + fs = getHttpFileSystem(); + fs.setTimes(path, mt + 10, at + 20); + fs.close(); + + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + status1 = fs.getFileStatus(path); + fs.close(); + long atNew = status1.getAccessTime(); + long mtNew = status1.getModificationTime(); + Assert.assertEquals(mtNew, mt + 10); + Assert.assertEquals(atNew, at + 20); + } + + private void testSetPermission() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + fs.close(); + + fs = getHttpFileSystem(); + FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE); + fs.setPermission(path, permission1); + fs.close(); + + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + FileStatus status1 = fs.getFileStatus(path); + fs.close(); + FsPermission permission2 = status1.getPermission(); + Assert.assertEquals(permission2, permission1); + } + + private void testSetOwner() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + fs.close(); + + fs = getHttpFileSystem(); + String user = HadoopUsersConfTestHelper.getHadoopUsers()[1]; + String group = HadoopUsersConfTestHelper.getHadoopUserGroups(user)[0]; + fs.setOwner(path, user, group); + fs.close(); + + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + FileStatus status1 = fs.getFileStatus(path); + fs.close(); + Assert.assertEquals(status1.getOwner(), user); + Assert.assertEquals(status1.getGroup(), group); + } + + private void testSetReplication() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + fs.close(); + fs.setReplication(path, (short) 2); + + fs = getHttpFileSystem(); + fs.setReplication(path, (short) 1); + fs.close(); + + fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + FileStatus status1 = fs.getFileStatus(path); + fs.close(); + Assert.assertEquals(status1.getReplication(), (short) 1); + } + + private void testChecksum() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + FileChecksum hdfsChecksum = fs.getFileChecksum(path); + fs.close(); + fs = getHttpFileSystem(); + FileChecksum httpChecksum = fs.getFileChecksum(path); + fs.close(); + Assert.assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName()); + Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength()); + Assert.assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes()); + } + + private void testContentSummary() throws Exception { + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + Path path = new Path(TestHdfsHelper.getHdfsTestDir(), "foo.txt"); + OutputStream os = fs.create(path); + os.write(1); + os.close(); + ContentSummary hdfsContentSummary = fs.getContentSummary(path); + fs.close(); + fs = getHttpFileSystem(); + ContentSummary httpContentSummary = fs.getContentSummary(path); + fs.close(); + Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount()); + Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount()); + Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength()); + Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota()); + Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed()); + Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota()); + } + + protected enum Operation { + GET, OPEN, CREATE, APPEND, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS, + SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY + } + + private void operation(Operation op) throws Exception { + switch (op) { + case GET: + testGet(); + break; + case OPEN: + testOpen(); + break; + case CREATE: + testCreate(); + break; + case APPEND: + testAppend(); + break; + case RENAME: + testRename(); + break; + case DELETE: + testDelete(); + break; + case LIST_STATUS: + testListStatus(); + break; + case WORKING_DIRECTORY: + testWorkingdirectory(); + break; + case MKDIRS: + testMkdirs(); + break; + case SET_TIMES: + testSetTimes(); + break; + case SET_PERMISSION: + testSetPermission(); + break; + case SET_OWNER: + testSetOwner(); + break; + case SET_REPLICATION: + testSetReplication(); + break; + case CHECKSUM: + testChecksum(); + break; + case CONTENT_SUMMARY: + testContentSummary(); + break; + } + } + + @Parameterized.Parameters + public static Collection operations() { + Object[][] ops = new Object[Operation.values().length][]; + for (int i = 0; i < Operation.values().length; i++) { + ops[i] = new Object[]{Operation.values()[i]}; + } + return Arrays.asList(ops); + } + + private Operation operation; + + public TestHttpFSFileSystem(Operation operation) { + this.operation = operation; + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testOperation() throws Exception { + createHttpFSServer(); + operation(operation); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testOperationDoAs() throws Exception { + createHttpFSServer(); + UserGroupInformation ugi = UserGroupInformation.createProxyUser(HadoopUsersConfTestHelper.getHadoopUsers()[0], + UserGroupInformation.getCurrentUser()); + ugi.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + operation(operation); + return null; + } + }); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java new file mode 100644 index 0000000000..7c5b94c7c7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestWebhdfsFileSystem.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.test.TestJettyHelper; +import org.junit.Assert; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.net.URI; + +@RunWith(value = Parameterized.class) +public class TestWebhdfsFileSystem extends TestHttpFSFileSystem { + + public TestWebhdfsFileSystem(TestHttpFSFileSystem.Operation operation) { + super(operation); + } + + @Override + protected FileSystem getHttpFileSystem() throws Exception { + Configuration conf = new Configuration(); + conf.set("fs.webhdfs.impl", WebHdfsFileSystem.class.getName()); + URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority()); + return FileSystem.get(uri, conf); + } + + @Override + protected void testGet() throws Exception { + FileSystem fs = getHttpFileSystem(); + Assert.assertNotNull(fs); + URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority()); + Assert.assertEquals(fs.getUri(), uri); + fs.close(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java new file mode 100644 index 0000000000..3247c14fb4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -0,0 +1,164 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.http.server; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.HFSTestCase; +import org.apache.hadoop.test.HadoopUsersConfTestHelper; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestHdfs; +import org.apache.hadoop.test.TestHdfsHelper; +import org.apache.hadoop.test.TestJetty; +import org.apache.hadoop.test.TestJettyHelper; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.webapp.WebAppContext; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URL; +import java.text.MessageFormat; + +public class TestHttpFSServer extends HFSTestCase { + + @Test + @TestDir + @TestJetty + public void server() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration hoopConf = new Configuration(false); + HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf); + server.init(); + server.destroy(); + } + + private void createHttpFSServer() throws Exception { + File homeDir = TestDirHelper.getTestDir(); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath()); + + String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name"); + Configuration conf = new Configuration(false); + conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName); + File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml"); + OutputStream os = new FileOutputStream(hoopSite); + conf.writeXml(os); + os.close(); + + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + URL url = cl.getResource("webapp"); + WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs"); + Server server = TestJettyHelper.getJettyServer(); + server.addHandler(context); + server.start(); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void instrumentation() throws Exception { + createHttpFSServer(); + + URL url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody")); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED); + + url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root")); + conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String line = reader.readLine(); + reader.close(); + Assert.assertTrue(line.contains("\"counters\":{")); + + url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root")); + conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testHdfsAccess() throws Exception { + createHttpFSServer(); + + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + URL url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user)); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + reader.readLine(); + reader.close(); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGlobFilter() throws Exception { + createHttpFSServer(); + + FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); + fs.mkdirs(new Path("/tmp")); + fs.create(new Path("/tmp/foo.txt")).close(); + + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + URL url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user)); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + reader.readLine(); + reader.close(); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testPutNoOperation() throws Exception { + createHttpFSServer(); + + String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; + URL url = new URL(TestJettyHelper.getJettyURL(), + MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user)); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setDoInput(true); + conn.setDoOutput(true); + conn.setRequestMethod("PUT"); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java new file mode 100644 index 0000000000..6079cf256f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestRunnableCallable.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.lang; + + +import junit.framework.Assert; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + +import java.util.concurrent.Callable; + +public class TestRunnableCallable extends HTestCase { + + public static class R implements Runnable { + boolean RUN; + + @Override + public void run() { + RUN = true; + } + } + + public static class C implements Callable { + boolean RUN; + + @Override + public Object call() throws Exception { + RUN = true; + return null; + } + } + + public static class CEx implements Callable { + + @Override + public Object call() throws Exception { + throw new Exception(); + } + } + + @Test + public void runnable() throws Exception { + R r = new R(); + RunnableCallable rc = new RunnableCallable(r); + rc.run(); + Assert.assertTrue(r.RUN); + + r = new R(); + rc = new RunnableCallable(r); + rc.call(); + Assert.assertTrue(r.RUN); + + Assert.assertEquals(rc.toString(), "R"); + } + + @Test + public void callable() throws Exception { + C c = new C(); + RunnableCallable rc = new RunnableCallable(c); + rc.run(); + Assert.assertTrue(c.RUN); + + c = new C(); + rc = new RunnableCallable(c); + rc.call(); + Assert.assertTrue(c.RUN); + + Assert.assertEquals(rc.toString(), "C"); + } + + @Test(expected = RuntimeException.class) + public void callableExRun() throws Exception { + CEx c = new CEx(); + RunnableCallable rc = new RunnableCallable(c); + rc.run(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java new file mode 100644 index 0000000000..0feca3044b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.lang; + + +import junit.framework.Assert; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + +public class TestXException extends HTestCase { + + public static enum TestERROR implements XException.ERROR { + TC; + + @Override + public String getTemplate() { + return "{0}"; + } + } + + @Test + public void testXException() throws Exception { + XException ex = new XException(TestERROR.TC); + Assert.assertEquals(ex.getError(), TestERROR.TC); + Assert.assertEquals(ex.getMessage(), "TC: {0}"); + Assert.assertNull(ex.getCause()); + + ex = new XException(TestERROR.TC, "msg"); + Assert.assertEquals(ex.getError(), TestERROR.TC); + Assert.assertEquals(ex.getMessage(), "TC: msg"); + Assert.assertNull(ex.getCause()); + + Exception cause = new Exception(); + ex = new XException(TestERROR.TC, cause); + Assert.assertEquals(ex.getError(), TestERROR.TC); + Assert.assertEquals(ex.getMessage(), "TC: " + cause.toString()); + Assert.assertEquals(ex.getCause(), cause); + + XException xcause = ex; + ex = new XException(xcause); + Assert.assertEquals(ex.getError(), TestERROR.TC); + Assert.assertEquals(ex.getMessage(), xcause.getMessage()); + Assert.assertEquals(ex.getCause(), xcause); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java new file mode 100644 index 0000000000..037fd63a4c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestBaseService.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestBaseService extends HTestCase { + + public static class MyService extends BaseService { + static Boolean INIT; + + public MyService() { + super("myservice"); + } + + @Override + protected void init() throws ServiceException { + INIT = true; + } + + @Override + public Class getInterface() { + return null; + } + } + + @Test + public void baseService() throws Exception { + BaseService service = new MyService(); + Assert.assertNull(service.getInterface()); + Assert.assertEquals(service.getPrefix(), "myservice"); + Assert.assertEquals(service.getServiceDependencies().length, 0); + + Server server = Mockito.mock(Server.class); + Configuration conf = new Configuration(false); + conf.set("server.myservice.foo", "FOO"); + conf.set("server.myservice1.bar", "BAR"); + Mockito.when(server.getConfig()).thenReturn(conf); + Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo"); + Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice."); + + service.init(server); + Assert.assertEquals(service.getPrefixedName("foo"), "server.myservice.foo"); + Assert.assertEquals(service.getServiceConfig().size(), 1); + Assert.assertEquals(service.getServiceConfig().get("foo"), "FOO"); + Assert.assertTrue(MyService.INIT); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java new file mode 100644 index 0000000000..efd366b22d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServer.java @@ -0,0 +1,790 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.lib.lang.XException; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestException; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Writer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class TestServer extends HTestCase { + + @Test + @TestDir + public void constructorsGetters() throws Exception { + Server server = new Server("server", "/a", "/b", "/c", "/d", new Configuration(false)); + Assert.assertEquals(server.getHomeDir(), "/a"); + Assert.assertEquals(server.getConfigDir(), "/b"); + Assert.assertEquals(server.getLogDir(), "/c"); + Assert.assertEquals(server.getTempDir(), "/d"); + Assert.assertEquals(server.getName(), "server"); + Assert.assertEquals(server.getPrefix(), "server"); + Assert.assertEquals(server.getPrefixedName("name"), "server.name"); + Assert.assertNotNull(server.getConfig()); + + server = new Server("server", "/a", "/b", "/c", "/d"); + Assert.assertEquals(server.getHomeDir(), "/a"); + Assert.assertEquals(server.getConfigDir(), "/b"); + Assert.assertEquals(server.getLogDir(), "/c"); + Assert.assertEquals(server.getTempDir(), "/d"); + Assert.assertEquals(server.getName(), "server"); + Assert.assertEquals(server.getPrefix(), "server"); + Assert.assertEquals(server.getPrefixedName("name"), "server.name"); + Assert.assertNull(server.getConfig()); + + server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); + Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + Assert.assertEquals(server.getName(), "server"); + Assert.assertEquals(server.getPrefix(), "server"); + Assert.assertEquals(server.getPrefixedName("name"), "server.name"); + Assert.assertNotNull(server.getConfig()); + + server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath()); + Assert.assertEquals(server.getHomeDir(), TestDirHelper.getTestDir().getAbsolutePath()); + Assert.assertEquals(server.getConfigDir(), TestDirHelper.getTestDir() + "/conf"); + Assert.assertEquals(server.getLogDir(), TestDirHelper.getTestDir() + "/log"); + Assert.assertEquals(server.getTempDir(), TestDirHelper.getTestDir() + "/temp"); + Assert.assertEquals(server.getName(), "server"); + Assert.assertEquals(server.getPrefix(), "server"); + Assert.assertEquals(server.getPrefixedName("name"), "server.name"); + Assert.assertNull(server.getConfig()); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S01.*") + @TestDir + public void initNoHomeDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S02.*") + @TestDir + public void initHomeDirNotDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + new FileOutputStream(homeDir).close(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S01.*") + @TestDir + public void initNoConfigDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S02.*") + @TestDir + public void initConfigDirNotDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + File configDir = new File(homeDir, "conf"); + new FileOutputStream(configDir).close(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S01.*") + @TestDir + public void initNoLogDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S02.*") + @TestDir + public void initLogDirNotDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "temp").mkdir()); + File logDir = new File(homeDir, "log"); + new FileOutputStream(logDir).close(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S01.*") + @TestDir + public void initNoTempDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S02.*") + @TestDir + public void initTempDirNotDir() throws Exception { + File homeDir = new File(TestDirHelper.getTestDir(), "home"); + Assert.assertTrue(homeDir.mkdir()); + Assert.assertTrue(new File(homeDir, "conf").mkdir()); + Assert.assertTrue(new File(homeDir, "log").mkdir()); + File tempDir = new File(homeDir, "temp"); + new FileOutputStream(tempDir).close(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = new Server("server", homeDir.getAbsolutePath(), conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S05.*") + @TestDir + public void siteFileNotAFile() throws Exception { + String homeDir = TestDirHelper.getTestDir().getAbsolutePath(); + File siteFile = new File(homeDir, "server-site.xml"); + Assert.assertTrue(siteFile.mkdir()); + Server server = new Server("server", homeDir, homeDir, homeDir, homeDir); + server.init(); + } + + private Server createServer(Configuration conf) { + return new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), + TestDirHelper.getTestDir().getAbsolutePath(), + TestDirHelper.getTestDir().getAbsolutePath(), TestDirHelper.getTestDir().getAbsolutePath(), conf); + } + + @Test + @TestDir + public void log4jFile() throws Exception { + InputStream is = Server.getResource("default-log4j.properties"); + OutputStream os = new FileOutputStream(new File(TestDirHelper.getTestDir(), "server-log4j.properties")); + IOUtils.copyBytes(is, os, 1024, true); + Configuration conf = new Configuration(false); + Server server = createServer(conf); + server.init(); + } + + public static class LifeCycleService extends BaseService { + + public LifeCycleService() { + super("lifecycle"); + } + + @Override + protected void init() throws ServiceException { + Assert.assertEquals(getServer().getStatus(), Server.Status.BOOTING); + } + + @Override + public void destroy() { + Assert.assertEquals(getServer().getStatus(), Server.Status.SHUTTING_DOWN); + super.destroy(); + } + + @Override + public Class getInterface() { + return LifeCycleService.class; + } + } + + @Test + @TestDir + public void lifeCycle() throws Exception { + Configuration conf = new Configuration(false); + conf.set("server.services", LifeCycleService.class.getName()); + Server server = createServer(conf); + Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + server.init(); + Assert.assertNotNull(server.get(LifeCycleService.class)); + Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + server.destroy(); + Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + } + + @Test + @TestDir + public void startWithStatusNotNormal() throws Exception { + Configuration conf = new Configuration(false); + conf.set("server.startup.status", "ADMIN"); + Server server = createServer(conf); + server.init(); + Assert.assertEquals(server.getStatus(), Server.Status.ADMIN); + server.destroy(); + } + + @Test(expected = IllegalArgumentException.class) + @TestDir + public void nonSeteableStatus() throws Exception { + Configuration conf = new Configuration(false); + Server server = createServer(conf); + server.init(); + server.setStatus(Server.Status.SHUTDOWN); + } + + public static class TestService implements Service { + static List LIFECYCLE = new ArrayList(); + + @Override + public void init(Server server) throws ServiceException { + LIFECYCLE.add("init"); + } + + @Override + public void postInit() throws ServiceException { + LIFECYCLE.add("postInit"); + } + + @Override + public void destroy() { + LIFECYCLE.add("destroy"); + } + + @Override + public Class[] getServiceDependencies() { + return new Class[0]; + } + + @Override + public Class getInterface() { + return TestService.class; + } + + @Override + public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException { + LIFECYCLE.add("serverStatusChange"); + } + } + + public static class TestServiceExceptionOnStatusChange extends TestService { + + @Override + public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException { + throw new RuntimeException(); + } + } + + @Test + @TestDir + public void changeStatus() throws Exception { + TestService.LIFECYCLE.clear(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = createServer(conf); + server.init(); + server.setStatus(Server.Status.ADMIN); + Assert.assertTrue(TestService.LIFECYCLE.contains("serverStatusChange")); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S11.*") + @TestDir + public void changeStatusServiceException() throws Exception { + TestService.LIFECYCLE.clear(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestServiceExceptionOnStatusChange.class.getName()); + Server server = createServer(conf); + server.init(); + } + + @Test + @TestDir + public void setSameStatus() throws Exception { + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = createServer(conf); + server.init(); + TestService.LIFECYCLE.clear(); + server.setStatus(server.getStatus()); + Assert.assertFalse(TestService.LIFECYCLE.contains("serverStatusChange")); + } + + @Test + @TestDir + public void serviceLifeCycle() throws Exception { + TestService.LIFECYCLE.clear(); + Configuration conf = new Configuration(false); + conf.set("server.services", TestService.class.getName()); + Server server = createServer(conf); + server.init(); + Assert.assertNotNull(server.get(TestService.class)); + server.destroy(); + Assert.assertEquals(TestService.LIFECYCLE, Arrays.asList("init", "postInit", "serverStatusChange", "destroy")); + } + + @Test + @TestDir + public void loadingDefaultConfig() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Server server = new Server("testserver", dir, dir, dir, dir); + server.init(); + Assert.assertEquals(server.getConfig().get("testserver.a"), "default"); + } + + @Test + @TestDir + public void loadingSiteConfig() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + File configFile = new File(dir, "testserver-site.xml"); + Writer w = new FileWriter(configFile); + w.write("testserver.asite"); + w.close(); + Server server = new Server("testserver", dir, dir, dir, dir); + server.init(); + Assert.assertEquals(server.getConfig().get("testserver.a"), "site"); + } + + @Test + @TestDir + public void loadingSysPropConfig() throws Exception { + try { + System.setProperty("testserver.a", "sysprop"); + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + File configFile = new File(dir, "testserver-site.xml"); + Writer w = new FileWriter(configFile); + w.write("testserver.asite"); + w.close(); + Server server = new Server("testserver", dir, dir, dir, dir); + server.init(); + Assert.assertEquals(server.getConfig().get("testserver.a"), "sysprop"); + } finally { + System.getProperties().remove("testserver.a"); + } + } + + @Test(expected = IllegalStateException.class) + @TestDir + public void illegalState1() throws Exception { + Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); + server.destroy(); + } + + @Test(expected = IllegalStateException.class) + @TestDir + public void illegalState2() throws Exception { + Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); + server.get(Object.class); + } + + @Test(expected = IllegalStateException.class) + @TestDir + public void illegalState3() throws Exception { + Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false)); + server.setService(null); + } + + @Test(expected = IllegalStateException.class) + @TestDir + public void illegalState4() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Server server = new Server("server", dir, dir, dir, dir, new Configuration(false)); + server.init(); + server.init(); + } + + private static List ORDER = new ArrayList(); + + public abstract static class MyService implements Service, XException.ERROR { + private String id; + private Class serviceInterface; + private Class[] dependencies; + private boolean failOnInit; + private boolean failOnDestroy; + + protected MyService(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit, + boolean failOnDestroy) { + this.id = id; + this.serviceInterface = serviceInterface; + this.dependencies = dependencies; + this.failOnInit = failOnInit; + this.failOnDestroy = failOnDestroy; + } + + + @Override + public void init(Server server) throws ServiceException { + ORDER.add(id + ".init"); + if (failOnInit) { + throw new ServiceException(this); + } + } + + @Override + public void postInit() throws ServiceException { + ORDER.add(id + ".postInit"); + } + + @Override + public String getTemplate() { + return ""; + } + + @Override + public void destroy() { + ORDER.add(id + ".destroy"); + if (failOnDestroy) { + throw new RuntimeException(); + } + } + + @Override + public Class[] getServiceDependencies() { + return dependencies; + } + + @Override + public Class getInterface() { + return serviceInterface; + } + + @Override + public void serverStatusChange(Server.Status oldStatus, Server.Status newStatus) throws ServiceException { + } + } + + public static class MyService1 extends MyService { + + public MyService1() { + super("s1", MyService1.class, null, false, false); + } + + protected MyService1(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit, + boolean failOnDestroy) { + super(id, serviceInterface, dependencies, failOnInit, failOnDestroy); + } + } + + public static class MyService2 extends MyService { + public MyService2() { + super("s2", MyService2.class, null, true, false); + } + } + + + public static class MyService3 extends MyService { + public MyService3() { + super("s3", MyService3.class, null, false, false); + } + } + + public static class MyService1a extends MyService1 { + public MyService1a() { + super("s1a", MyService1.class, null, false, false); + } + } + + public static class MyService4 extends MyService1 { + + public MyService4() { + super("s4a", String.class, null, false, false); + } + } + + public static class MyService5 extends MyService { + + public MyService5() { + super("s5", MyService5.class, null, false, true); + } + + protected MyService5(String id, Class serviceInterface, Class[] dependencies, boolean failOnInit, + boolean failOnDestroy) { + super(id, serviceInterface, dependencies, failOnInit, failOnDestroy); + } + } + + public static class MyService5a extends MyService5 { + + public MyService5a() { + super("s5a", MyService5.class, null, false, false); + } + } + + public static class MyService6 extends MyService { + + public MyService6() { + super("s6", MyService6.class, new Class[]{MyService1.class}, false, false); + } + } + + public static class MyService7 extends MyService { + + @SuppressWarnings({"UnusedParameters"}) + public MyService7(String foo) { + super("s6", MyService7.class, new Class[]{MyService1.class}, false, false); + } + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S08.*") + @TestDir + public void invalidSservice() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", "foo"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S07.*") + @TestDir + public void serviceWithNoDefaultConstructor() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", MyService7.class.getName()); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S04.*") + @TestDir + public void serviceNotImplementingServiceInterface() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", MyService4.class.getName()); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServerException.class, msgRegExp = "S10.*") + @TestDir + public void serviceWithMissingDependency() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + String services = StringUtils.join(",", Arrays.asList(MyService3.class.getName(), MyService6.class.getName()) + ); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestDir + public void services() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf; + Server server; + + // no services + ORDER.clear(); + conf = new Configuration(false); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertEquals(ORDER.size(), 0); + + // 2 services init/destroy + ORDER.clear(); + String services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName()) + ); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + Assert.assertEquals(server.get(MyService3.class).getInterface(), MyService3.class); + Assert.assertEquals(ORDER.size(), 4); + Assert.assertEquals(ORDER.get(0), "s1.init"); + Assert.assertEquals(ORDER.get(1), "s3.init"); + Assert.assertEquals(ORDER.get(2), "s1.postInit"); + Assert.assertEquals(ORDER.get(3), "s3.postInit"); + server.destroy(); + Assert.assertEquals(ORDER.size(), 6); + Assert.assertEquals(ORDER.get(4), "s3.destroy"); + Assert.assertEquals(ORDER.get(5), "s1.destroy"); + + // 3 services, 2nd one fails on init + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService2.class.getName(), + MyService3.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + + server = new Server("server", dir, dir, dir, dir, conf); + try { + server.init(); + Assert.fail(); + } catch (ServerException ex) { + Assert.assertEquals(MyService2.class, ex.getError().getClass()); + } catch (Exception ex) { + Assert.fail(); + } + Assert.assertEquals(ORDER.size(), 3); + Assert.assertEquals(ORDER.get(0), "s1.init"); + Assert.assertEquals(ORDER.get(1), "s2.init"); + Assert.assertEquals(ORDER.get(2), "s1.destroy"); + + // 2 services one fails on destroy + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService5.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertEquals(ORDER.size(), 4); + Assert.assertEquals(ORDER.get(0), "s1.init"); + Assert.assertEquals(ORDER.get(1), "s5.init"); + Assert.assertEquals(ORDER.get(2), "s1.postInit"); + Assert.assertEquals(ORDER.get(3), "s5.postInit"); + server.destroy(); + Assert.assertEquals(ORDER.size(), 6); + Assert.assertEquals(ORDER.get(4), "s5.destroy"); + Assert.assertEquals(ORDER.get(5), "s1.destroy"); + + + // service override via ext + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())); + String servicesExt = StringUtils.join(",", Arrays.asList(MyService1a.class.getName())); + + conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.services.ext", servicesExt); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + + Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + Assert.assertEquals(ORDER.size(), 4); + Assert.assertEquals(ORDER.get(0), "s1a.init"); + Assert.assertEquals(ORDER.get(1), "s3.init"); + Assert.assertEquals(ORDER.get(2), "s1a.postInit"); + Assert.assertEquals(ORDER.get(3), "s3.postInit"); + server.destroy(); + Assert.assertEquals(ORDER.size(), 6); + Assert.assertEquals(ORDER.get(4), "s3.destroy"); + Assert.assertEquals(ORDER.get(5), "s1a.destroy"); + + // service override via setService + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + + server.setService(MyService1a.class); + Assert.assertEquals(ORDER.size(), 6); + Assert.assertEquals(ORDER.get(4), "s1.destroy"); + Assert.assertEquals(ORDER.get(5), "s1a.init"); + + Assert.assertEquals(server.get(MyService1.class).getClass(), MyService1a.class); + + server.destroy(); + Assert.assertEquals(ORDER.size(), 8); + Assert.assertEquals(ORDER.get(6), "s3.destroy"); + Assert.assertEquals(ORDER.get(7), "s1a.destroy"); + + // service add via setService + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + + server.setService(MyService5.class); + Assert.assertEquals(ORDER.size(), 5); + Assert.assertEquals(ORDER.get(4), "s5.init"); + + Assert.assertEquals(server.get(MyService5.class).getClass(), MyService5.class); + + server.destroy(); + Assert.assertEquals(ORDER.size(), 8); + Assert.assertEquals(ORDER.get(5), "s5.destroy"); + Assert.assertEquals(ORDER.get(6), "s3.destroy"); + Assert.assertEquals(ORDER.get(7), "s1.destroy"); + + // service add via setService exception + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService3.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + try { + server.setService(MyService7.class); + Assert.fail(); + } catch (ServerException ex) { + Assert.assertEquals(ServerException.ERROR.S09, ex.getError()); + } catch (Exception ex) { + Assert.fail(); + } + Assert.assertEquals(ORDER.size(), 6); + Assert.assertEquals(ORDER.get(4), "s3.destroy"); + Assert.assertEquals(ORDER.get(5), "s1.destroy"); + + // service with dependency + ORDER.clear(); + services = StringUtils.join(",", Arrays.asList(MyService1.class.getName(), MyService6.class.getName())); + conf = new Configuration(false); + conf.set("server.services", services); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertEquals(server.get(MyService1.class).getInterface(), MyService1.class); + Assert.assertEquals(server.get(MyService6.class).getInterface(), MyService6.class); + server.destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java new file mode 100644 index 0000000000..72913eebb5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/server/TestServerConstructor.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +@RunWith(value = Parameterized.class) +public class TestServerConstructor extends HTestCase { + + @Parameterized.Parameters + public static Collection constructorFailParams() { + return Arrays.asList(new Object[][]{ + {null, null, null, null, null, null}, + {"", null, null, null, null, null}, + {null, null, null, null, null, null}, + {"server", null, null, null, null, null}, + {"server", "", null, null, null, null}, + {"server", "foo", null, null, null, null}, + {"server", "/tmp", null, null, null, null}, + {"server", "/tmp", "", null, null, null}, + {"server", "/tmp", "foo", null, null, null}, + {"server", "/tmp", "/tmp", null, null, null}, + {"server", "/tmp", "/tmp", "", null, null}, + {"server", "/tmp", "/tmp", "foo", null, null}, + {"server", "/tmp", "/tmp", "/tmp", null, null}, + {"server", "/tmp", "/tmp", "/tmp", "", null}, + {"server", "/tmp", "/tmp", "/tmp", "foo", null}}); + } + + private String name; + private String homeDir; + private String configDir; + private String logDir; + private String tempDir; + private Configuration conf; + + public TestServerConstructor(String name, String homeDir, String configDir, String logDir, String tempDir, + Configuration conf) { + this.name = name; + this.homeDir = homeDir; + this.configDir = configDir; + this.logDir = logDir; + this.tempDir = tempDir; + this.conf = conf; + } + + + @Test(expected = IllegalArgumentException.class) + public void constructorFail() { + new Server(name, homeDir, configDir, logDir, tempDir, conf); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java new file mode 100644 index 0000000000..84ff45a165 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/hadoop/TestFileSystemAccessService.java @@ -0,0 +1,306 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.hadoop; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.FileSystemAccess; +import org.apache.hadoop.lib.service.FileSystemAccessException; +import org.apache.hadoop.lib.service.instrumentation.InstrumentationService; +import org.apache.hadoop.test.HFSTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestException; +import org.apache.hadoop.test.TestHdfs; +import org.apache.hadoop.test.TestHdfsHelper; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import java.io.IOException; +import java.util.Arrays; + +public class TestFileSystemAccessService extends HFSTestCase { + + @Test + @TestDir + public void simpleSecurity() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertNotNull(server.get(FileSystemAccess.class)); + server.destroy(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "H01.*") + @TestDir + public void noKerberosKeytabProperty() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.authentication.type", "kerberos"); + conf.set("server.hadoop.authentication.kerberos.keytab", " "); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "H01.*") + @TestDir + public void noKerberosPrincipalProperty() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.authentication.type", "kerberos"); + conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo"); + conf.set("server.hadoop.authentication.kerberos.principal", " "); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "H02.*") + @TestDir + public void kerberosInitializationFailure() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.authentication.type", "kerberos"); + conf.set("server.hadoop.authentication.kerberos.keytab", "/tmp/foo"); + conf.set("server.hadoop.authentication.kerberos.principal", "foo@FOO"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "H09.*") + @TestDir + public void invalidSecurity() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.authentication.type", "foo"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestDir + public void serviceHadoopConf() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.conf:foo", "FOO"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); + server.destroy(); + } + + @Test + @TestDir + public void inWhitelists() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + fsAccess.validateNamenode("NN"); + server.destroy(); + + conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.name.node.whitelist", "*"); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + fsAccess.validateNamenode("NN"); + server.destroy(); + + conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.name.node.whitelist", "NN"); + server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + fsAccess.validateNamenode("NN"); + server.destroy(); + } + + @Test + @TestException(exception = FileSystemAccessException.class, msgRegExp = "H05.*") + @TestDir + public void NameNodeNotinWhitelists() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + conf.set("server.hadoop.name.node.whitelist", "NN"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); + fsAccess.validateNamenode("NNx"); + } + + @Test + @TestDir + @TestHdfs + public void createFileSystem() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccess hadoop = server.get(FileSystemAccess.class); + FileSystem fs = hadoop.createFileSystem("u", TestHdfsHelper.getHdfsConf()); + Assert.assertNotNull(fs); + fs.mkdirs(new Path("/tmp/foo")); + hadoop.releaseFileSystem(fs); + try { + fs.mkdirs(new Path("/tmp/foo")); + Assert.fail(); + } catch (IOException ex) { + } catch (Exception ex) { + Assert.fail(); + } + server.destroy(); + } + + @Test + @TestDir + @TestHdfs + public void fileSystemExecutor() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccess hadoop = server.get(FileSystemAccess.class); + + final FileSystem fsa[] = new FileSystem[1]; + + hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() { + @Override + public Void execute(FileSystem fs) throws IOException { + fs.mkdirs(new Path("/tmp/foo")); + fsa[0] = fs; + return null; + } + }); + try { + fsa[0].mkdirs(new Path("/tmp/foo")); + Assert.fail(); + } catch (IOException ex) { + } catch (Exception ex) { + Assert.fail(); + } + server.destroy(); + } + + @Test + @TestException(exception = FileSystemAccessException.class, msgRegExp = "H06.*") + @TestDir + @TestHdfs + public void fileSystemExecutorNoNameNode() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccess fsAccess = server.get(FileSystemAccess.class); + + Configuration hdfsConf = TestHdfsHelper.getHdfsConf(); + hdfsConf.set("fs.default.name", ""); + fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor() { + @Override + public Void execute(FileSystem fs) throws IOException { + return null; + } + }); + } + + @Test + @TestDir + @TestHdfs + public void fileSystemExecutorException() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + FileSystemAccessService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + FileSystemAccess hadoop = server.get(FileSystemAccess.class); + + final FileSystem fsa[] = new FileSystem[1]; + try { + hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor() { + @Override + public Void execute(FileSystem fs) throws IOException { + fsa[0] = fs; + throw new IOException(); + } + }); + Assert.fail(); + } catch (FileSystemAccessException ex) { + Assert.assertEquals(ex.getError(), FileSystemAccessException.ERROR.H03); + } catch (Exception ex) { + Assert.fail(); + } + + try { + fsa[0].mkdirs(new Path("/tmp/foo")); + Assert.fail(); + } catch (IOException ex) { + } catch (Exception ex) { + Assert.fail(); + } + server.destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java new file mode 100644 index 0000000000..7f773c3bf9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/instrumentation/TestInstrumentationService.java @@ -0,0 +1,404 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.instrumentation; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.service.Instrumentation; +import org.apache.hadoop.lib.service.scheduler.SchedulerService; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.util.StringUtils; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.junit.Test; + +import java.io.StringWriter; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestInstrumentationService extends HTestCase { + + @Override + protected float getWaitForRatio() { + return 1; + } + + @Test + public void cron() { + InstrumentationService.Cron cron = new InstrumentationService.Cron(); + Assert.assertEquals(cron.start, 0); + Assert.assertEquals(cron.lapStart, 0); + Assert.assertEquals(cron.own, 0); + Assert.assertEquals(cron.total, 0); + long begin = System.currentTimeMillis(); + Assert.assertEquals(cron.start(), cron); + Assert.assertEquals(cron.start(), cron); + Assert.assertEquals(cron.start, begin, 20); + Assert.assertEquals(cron.start, cron.lapStart); + sleep(100); + Assert.assertEquals(cron.stop(), cron); + long end = System.currentTimeMillis(); + long delta = end - begin; + Assert.assertEquals(cron.own, delta, 20); + Assert.assertEquals(cron.total, 0); + Assert.assertEquals(cron.lapStart, 0); + sleep(100); + long reStart = System.currentTimeMillis(); + cron.start(); + Assert.assertEquals(cron.start, begin, 20); + Assert.assertEquals(cron.lapStart, reStart, 20); + sleep(100); + cron.stop(); + long reEnd = System.currentTimeMillis(); + delta += reEnd - reStart; + Assert.assertEquals(cron.own, delta, 20); + Assert.assertEquals(cron.total, 0); + Assert.assertEquals(cron.lapStart, 0); + cron.end(); + Assert.assertEquals(cron.total, reEnd - begin, 20); + + try { + cron.start(); + Assert.fail(); + } catch (IllegalStateException ex) { + } catch (Exception ex) { + Assert.fail(); + } + + try { + cron.stop(); + Assert.fail(); + } catch (IllegalStateException ex) { + } catch (Exception ex) { + Assert.fail(); + } + } + + @Test + public void timer() throws Exception { + InstrumentationService.Timer timer = new InstrumentationService.Timer(2); + InstrumentationService.Cron cron = new InstrumentationService.Cron(); + + long ownStart; + long ownEnd; + long totalStart; + long totalEnd; + long ownDelta; + long totalDelta; + long avgTotal; + long avgOwn; + + cron.start(); + ownStart = System.currentTimeMillis(); + totalStart = ownStart; + ownDelta = 0; + sleep(100); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + sleep(100); + + cron.start(); + ownStart = System.currentTimeMillis(); + sleep(100); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + totalEnd = ownEnd; + totalDelta = totalEnd - totalStart; + + avgTotal = totalDelta; + avgOwn = ownDelta; + + timer.addCron(cron); + long[] values = timer.getValues(); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + + cron = new InstrumentationService.Cron(); + + cron.start(); + ownStart = System.currentTimeMillis(); + totalStart = ownStart; + ownDelta = 0; + sleep(200); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + sleep(200); + + cron.start(); + ownStart = System.currentTimeMillis(); + sleep(200); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + totalEnd = ownEnd; + totalDelta = totalEnd - totalStart; + + avgTotal = (avgTotal * 1 + totalDelta) / 2; + avgOwn = (avgOwn * 1 + ownDelta) / 2; + + timer.addCron(cron); + values = timer.getValues(); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + + avgTotal = totalDelta; + avgOwn = ownDelta; + + cron = new InstrumentationService.Cron(); + + cron.start(); + ownStart = System.currentTimeMillis(); + totalStart = ownStart; + ownDelta = 0; + sleep(300); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + sleep(300); + + cron.start(); + ownStart = System.currentTimeMillis(); + sleep(300); + + cron.stop(); + ownEnd = System.currentTimeMillis(); + ownDelta += ownEnd - ownStart; + totalEnd = ownEnd; + totalDelta = totalEnd - totalStart; + + avgTotal = (avgTotal * 1 + totalDelta) / 2; + avgOwn = (avgOwn * 1 + ownDelta) / 2; + + cron.stop(); + timer.addCron(cron); + values = timer.getValues(); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_TOTAL], totalDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.LAST_OWN], ownDelta, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_TOTAL], avgTotal, 20); + Assert.assertEquals(values[InstrumentationService.Timer.AVG_OWN], avgOwn, 20); + + JSONObject json = (JSONObject) new JSONParser().parse(timer.toJSONString()); + Assert.assertEquals(json.size(), 4); + Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + + StringWriter writer = new StringWriter(); + timer.writeJSONString(writer); + writer.close(); + json = (JSONObject) new JSONParser().parse(writer.toString()); + Assert.assertEquals(json.size(), 4); + Assert.assertEquals(json.get("lastTotal"), values[InstrumentationService.Timer.LAST_TOTAL]); + Assert.assertEquals(json.get("lastOwn"), values[InstrumentationService.Timer.LAST_OWN]); + Assert.assertEquals(json.get("avgTotal"), values[InstrumentationService.Timer.AVG_TOTAL]); + Assert.assertEquals(json.get("avgOwn"), values[InstrumentationService.Timer.AVG_OWN]); + } + + @Test + public void sampler() throws Exception { + final long value[] = new long[1]; + Instrumentation.Variable var = new Instrumentation.Variable() { + @Override + public Long getValue() { + return value[0]; + } + }; + + InstrumentationService.Sampler sampler = new InstrumentationService.Sampler(); + sampler.init(4, var); + Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + sampler.sample(); + Assert.assertEquals(sampler.getRate(), 0f, 0.0001); + value[0] = 1; + sampler.sample(); + Assert.assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001); + value[0] = 2; + sampler.sample(); + Assert.assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001); + value[0] = 3; + sampler.sample(); + Assert.assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001); + value[0] = 4; + sampler.sample(); + Assert.assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001); + + JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString()); + Assert.assertEquals(json.size(), 2); + Assert.assertEquals(json.get("sampler"), sampler.getRate()); + Assert.assertEquals(json.get("size"), 4L); + + StringWriter writer = new StringWriter(); + sampler.writeJSONString(writer); + writer.close(); + json = (JSONObject) new JSONParser().parse(writer.toString()); + Assert.assertEquals(json.size(), 2); + Assert.assertEquals(json.get("sampler"), sampler.getRate()); + Assert.assertEquals(json.get("size"), 4L); + } + + @Test + public void variableHolder() throws Exception { + InstrumentationService.VariableHolder variableHolder = + new InstrumentationService.VariableHolder(); + + variableHolder.var = new Instrumentation.Variable() { + @Override + public String getValue() { + return "foo"; + } + }; + + JSONObject json = (JSONObject) new JSONParser().parse(variableHolder.toJSONString()); + Assert.assertEquals(json.size(), 1); + Assert.assertEquals(json.get("value"), "foo"); + + StringWriter writer = new StringWriter(); + variableHolder.writeJSONString(writer); + writer.close(); + json = (JSONObject) new JSONParser().parse(writer.toString()); + Assert.assertEquals(json.size(), 1); + Assert.assertEquals(json.get("value"), "foo"); + } + + @Test + @TestDir + @SuppressWarnings("unchecked") + public void service() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + + Instrumentation instrumentation = server.get(Instrumentation.class); + Assert.assertNotNull(instrumentation); + instrumentation.incr("g", "c", 1); + instrumentation.incr("g", "c", 2); + instrumentation.incr("g", "c1", 2); + + Instrumentation.Cron cron = instrumentation.createCron(); + cron.start(); + sleep(100); + cron.stop(); + instrumentation.addCron("g", "t", cron); + cron = instrumentation.createCron(); + cron.start(); + sleep(200); + cron.stop(); + instrumentation.addCron("g", "t", cron); + + Instrumentation.Variable var = new Instrumentation.Variable() { + @Override + public String getValue() { + return "foo"; + } + }; + instrumentation.addVariable("g", "v", var); + + Instrumentation.Variable varToSample = new Instrumentation.Variable() { + @Override + public Long getValue() { + return 1L; + } + }; + instrumentation.addSampler("g", "s", 10, varToSample); + + Map snapshot = instrumentation.getSnapshot(); + Assert.assertNotNull(snapshot.get("os-env")); + Assert.assertNotNull(snapshot.get("sys-props")); + Assert.assertNotNull(snapshot.get("jvm")); + Assert.assertNotNull(snapshot.get("counters")); + Assert.assertNotNull(snapshot.get("timers")); + Assert.assertNotNull(snapshot.get("variables")); + Assert.assertNotNull(snapshot.get("samplers")); + Assert.assertNotNull(((Map) snapshot.get("os-env")).get("PATH")); + Assert.assertNotNull(((Map) snapshot.get("sys-props")).get("java.version")); + Assert.assertNotNull(((Map) snapshot.get("jvm")).get("free.memory")); + Assert.assertNotNull(((Map) snapshot.get("jvm")).get("max.memory")); + Assert.assertNotNull(((Map) snapshot.get("jvm")).get("total.memory")); + Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g")); + Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g")); + Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g")); + Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g")); + Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c")); + Assert.assertNotNull(((Map>) snapshot.get("counters")).get("g").get("c1")); + Assert.assertNotNull(((Map>) snapshot.get("timers")).get("g").get("t")); + Assert.assertNotNull(((Map>) snapshot.get("variables")).get("g").get("v")); + Assert.assertNotNull(((Map>) snapshot.get("samplers")).get("g").get("s")); + + StringWriter writer = new StringWriter(); + JSONObject.writeJSONString(snapshot, writer); + writer.close(); + server.destroy(); + } + + @Test + @TestDir + @SuppressWarnings("unchecked") + public void sampling() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + SchedulerService.class.getName())); + Configuration conf = new Configuration(false); + conf.set("server.services", services); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Instrumentation instrumentation = server.get(Instrumentation.class); + + final AtomicInteger count = new AtomicInteger(); + + Instrumentation.Variable varToSample = new Instrumentation.Variable() { + @Override + public Long getValue() { + return (long) count.incrementAndGet(); + } + }; + instrumentation.addSampler("g", "s", 10, varToSample); + + sleep(2000); + int i = count.get(); + Assert.assertTrue(i > 0); + + Map> snapshot = instrumentation.getSnapshot(); + Map> samplers = (Map>) snapshot.get("samplers"); + InstrumentationService.Sampler sampler = (InstrumentationService.Sampler) samplers.get("g").get("s"); + Assert.assertTrue(sampler.getRate() > 0); + + server.destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java new file mode 100644 index 0000000000..5e4a982b63 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/scheduler/TestSchedulerService.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.scheduler; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.service.Scheduler; +import org.apache.hadoop.lib.service.instrumentation.InstrumentationService; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import java.util.Arrays; + +public class TestSchedulerService extends HTestCase { + + @Test + @TestDir + public void service() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), + SchedulerService.class.getName()))); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Assert.assertNotNull(server.get(Scheduler.class)); + server.destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java new file mode 100644 index 0000000000..bb4a29cae2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestGroupsService.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.security; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.service.Groups; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +public class TestGroupsService extends HTestCase { + + @Test + @TestDir + public void service() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName()))); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Groups groups = server.get(Groups.class); + Assert.assertNotNull(groups); + List g = groups.getGroups(System.getProperty("user.name")); + Assert.assertNotSame(g.size(), 0); + server.destroy(); + } + + @Test(expected = RuntimeException.class) + @TestDir + public void invalidGroupsMapping() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName()))); + conf.set("server.groups.hadoop.security.group.mapping", String.class.getName()); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java new file mode 100644 index 0000000000..3d4115e8c7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestProxyUserService.java @@ -0,0 +1,225 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.service.security; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.lib.server.ServiceException; +import org.apache.hadoop.lib.service.Groups; +import org.apache.hadoop.lib.service.ProxyUser; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.apache.hadoop.test.TestException; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import java.security.AccessControlException; +import java.util.Arrays; +import java.util.List; + +public class TestProxyUserService extends HTestCase { + + @Test + @TestDir + public void service() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + server.destroy(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*") + @TestDir + public void wrongConfigGroups() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "PRXU01.*") + @TestDir + public void wrongHost() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "otherhost"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*") + @TestDir + public void wrongConfigHosts() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + } + + @Test + @TestDir + public void validateAnyHostAnyUser() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "*"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "localhost", "bar"); + server.destroy(); + } + + @Test(expected = AccessControlException.class) + @TestDir + public void invalidProxyUser() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "*"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("bar", "localhost", "foo"); + server.destroy(); + } + + @Test + @TestDir + public void validateHost() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "localhost"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "localhost", "bar"); + server.destroy(); + } + + private String getGroup() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName()))); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + Groups groups = server.get(Groups.class); + List g = groups.getGroups(System.getProperty("user.name")); + server.destroy(); + return g.get(0); + } + + @Test + @TestDir + public void validateGroup() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "*"); + conf.set("server.proxyuser.foo.groups", getGroup()); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "localhost", System.getProperty("user.name")); + server.destroy(); + } + + + @Test(expected = AccessControlException.class) + @TestDir + public void unknownHost() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "localhost"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "unknownhost.bar.foo", "bar"); + server.destroy(); + } + + @Test(expected = AccessControlException.class) + @TestDir + public void invalidHost() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "localhost"); + conf.set("server.proxyuser.foo.groups", "*"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "www.yahoo.com", "bar"); + server.destroy(); + } + + @Test(expected = AccessControlException.class) + @TestDir + public void invalidGroup() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + Configuration conf = new Configuration(false); + conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(), + ProxyUserService.class.getName()))); + conf.set("server.proxyuser.foo.hosts", "localhost"); + conf.set("server.proxyuser.foo.groups", "nobody"); + Server server = new Server("server", dir, dir, dir, dir, conf); + server.init(); + ProxyUser proxyUser = server.get(ProxyUser.class); + Assert.assertNotNull(proxyUser); + proxyUser.validate("foo", "localhost", System.getProperty("user.name")); + server.destroy(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java new file mode 100644 index 0000000000..f30ab0c0cd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import junit.framework.Assert; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + + +public class TestHostnameFilter extends HTestCase { + + @Test + public void hostname() throws Exception { + ServletRequest request = Mockito.mock(ServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn("localhost"); + + ServletResponse response = Mockito.mock(ServletResponse.class); + + final AtomicBoolean invoked = new AtomicBoolean(); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) + throws IOException, ServletException { + Assert.assertEquals(HostnameFilter.get(), "localhost"); + invoked.set(true); + } + }; + + Filter filter = new HostnameFilter(); + filter.init(null); + Assert.assertNull(HostnameFilter.get()); + filter.doFilter(request, response, chain); + Assert.assertTrue(invoked.get()); + Assert.assertNull(HostnameFilter.get()); + filter.destroy(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java new file mode 100644 index 0000000000..216af5fa50 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestMDCFilter.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import junit.framework.Assert; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; +import org.mockito.Mockito; +import org.slf4j.MDC; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import java.io.IOException; +import java.security.Principal; +import java.util.concurrent.atomic.AtomicBoolean; + + +public class TestMDCFilter extends HTestCase { + + @Test + public void mdc() throws Exception { + HttpServletRequest request = Mockito.mock(HttpServletRequest.class); + Mockito.when(request.getUserPrincipal()).thenReturn(null); + Mockito.when(request.getMethod()).thenReturn("METHOD"); + Mockito.when(request.getPathInfo()).thenReturn("/pathinfo"); + + ServletResponse response = Mockito.mock(ServletResponse.class); + + final AtomicBoolean invoked = new AtomicBoolean(); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) + throws IOException, ServletException { + Assert.assertEquals(MDC.get("hostname"), null); + Assert.assertEquals(MDC.get("user"), null); + Assert.assertEquals(MDC.get("method"), "METHOD"); + Assert.assertEquals(MDC.get("path"), "/pathinfo"); + invoked.set(true); + } + }; + + MDC.clear(); + Filter filter = new MDCFilter(); + filter.init(null); + + filter.doFilter(request, response, chain); + Assert.assertTrue(invoked.get()); + Assert.assertNull(MDC.get("hostname")); + Assert.assertNull(MDC.get("user")); + Assert.assertNull(MDC.get("method")); + Assert.assertNull(MDC.get("path")); + + Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { + @Override + public String getName() { + return "name"; + } + }); + + invoked.set(false); + chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) + throws IOException, ServletException { + Assert.assertEquals(MDC.get("hostname"), null); + Assert.assertEquals(MDC.get("user"), "name"); + Assert.assertEquals(MDC.get("method"), "METHOD"); + Assert.assertEquals(MDC.get("path"), "/pathinfo"); + invoked.set(true); + } + }; + filter.doFilter(request, response, chain); + Assert.assertTrue(invoked.get()); + + HostnameFilter.HOSTNAME_TL.set("HOST"); + + invoked.set(false); + chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) + throws IOException, ServletException { + Assert.assertEquals(MDC.get("hostname"), "HOST"); + Assert.assertEquals(MDC.get("user"), "name"); + Assert.assertEquals(MDC.get("method"), "METHOD"); + Assert.assertEquals(MDC.get("path"), "/pathinfo"); + invoked.set(true); + } + }; + filter.doFilter(request, response, chain); + Assert.assertTrue(invoked.get()); + + HostnameFilter.HOSTNAME_TL.remove(); + + filter.destroy(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java new file mode 100644 index 0000000000..380fa3e081 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestServerWebApp.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.servlet; + +import junit.framework.Assert; +import org.apache.hadoop.lib.server.Server; +import org.apache.hadoop.test.HTestCase; +import org.apache.hadoop.test.TestDir; +import org.apache.hadoop.test.TestDirHelper; +import org.junit.Test; + +public class TestServerWebApp extends HTestCase { + + @Test(expected = IllegalArgumentException.class) + public void getHomeDirNotDef() { + ServerWebApp.getHomeDir("TestServerWebApp00"); + } + + @Test + public void getHomeDir() { + System.setProperty("TestServerWebApp0.home.dir", "/tmp"); + Assert.assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"), "/tmp"); + Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmp/log"); + System.setProperty("TestServerWebApp0.log.dir", "/tmplog"); + Assert.assertEquals(ServerWebApp.getDir("TestServerWebApp0", ".log.dir", "/tmp/log"), "/tmplog"); + } + + @Test + @TestDir + public void lifecycle() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + System.setProperty("TestServerWebApp1.home.dir", dir); + System.setProperty("TestServerWebApp1.config.dir", dir); + System.setProperty("TestServerWebApp1.log.dir", dir); + System.setProperty("TestServerWebApp1.temp.dir", dir); + ServerWebApp server = new ServerWebApp("TestServerWebApp1") { + }; + + Assert.assertEquals(server.getStatus(), Server.Status.UNDEF); + server.contextInitialized(null); + Assert.assertEquals(server.getStatus(), Server.Status.NORMAL); + server.contextDestroyed(null); + Assert.assertEquals(server.getStatus(), Server.Status.SHUTDOWN); + } + + @Test(expected = RuntimeException.class) + @TestDir + public void failedInit() throws Exception { + String dir = TestDirHelper.getTestDir().getAbsolutePath(); + System.setProperty("TestServerWebApp2.home.dir", dir); + System.setProperty("TestServerWebApp2.config.dir", dir); + System.setProperty("TestServerWebApp2.log.dir", dir); + System.setProperty("TestServerWebApp2.temp.dir", dir); + System.setProperty("testserverwebapp2.services", "FOO"); + ServerWebApp server = new ServerWebApp("TestServerWebApp2") { + }; + + server.contextInitialized(null); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java new file mode 100644 index 0000000000..532ad369de --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestCheck.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.util; + + +import junit.framework.Assert; +import org.apache.hadoop.test.HTestCase; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; + +public class TestCheck extends HTestCase { + + @Test + public void notNullNotNull() { + Assert.assertEquals(Check.notNull("value", "name"), "value"); + } + + @Test(expected = IllegalArgumentException.class) + public void notNullNull() { + Check.notNull(null, "name"); + } + + @Test + public void notNullElementsNotNull() { + Check.notNullElements(new ArrayList(), "name"); + Check.notNullElements(Arrays.asList("a"), "name"); + } + + @Test(expected = IllegalArgumentException.class) + public void notNullElementsNullList() { + Check.notNullElements(null, "name"); + } + + @Test(expected = IllegalArgumentException.class) + public void notNullElementsNullElements() { + Check.notNullElements(Arrays.asList("a", "", null), "name"); + } + + @Test + public void notEmptyElementsNotNull() { + Check.notEmptyElements(new ArrayList(), "name"); + Check.notEmptyElements(Arrays.asList("a"), "name"); + } + + @Test(expected = IllegalArgumentException.class) + public void notEmptyElementsNullList() { + Check.notEmptyElements(null, "name"); + } + + @Test(expected = IllegalArgumentException.class) + public void notEmptyElementsNullElements() { + Check.notEmptyElements(Arrays.asList("a", null), "name"); + } + + + @Test(expected = IllegalArgumentException.class) + public void notEmptyElementsEmptyElements() { + Check.notEmptyElements(Arrays.asList("a", ""), "name"); + } + + + @Test + public void notEmptyNotEmtpy() { + Assert.assertEquals(Check.notEmpty("value", "name"), "value"); + } + + @Test(expected = IllegalArgumentException.class) + public void notEmptyNull() { + Check.notEmpty(null, "name"); + } + + @Test(expected = IllegalArgumentException.class) + public void notEmptyEmpty() { + Check.notEmpty("", "name"); + } + + @Test + public void validIdentifierValid() throws Exception { + Assert.assertEquals(Check.validIdentifier("a", 1, ""), "a"); + Assert.assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); + Assert.assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); + Assert.assertEquals(Check.validIdentifier("_", 1, ""), "_"); + } + + @Test(expected = IllegalArgumentException.class) + public void validIdentifierInvalid1() throws Exception { + Check.validIdentifier("!", 1, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void validIdentifierInvalid2() throws Exception { + Check.validIdentifier("a1", 1, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void validIdentifierInvalid3() throws Exception { + Check.validIdentifier("1", 1, ""); + } + + @Test + public void checkGTZeroGreater() { + Assert.assertEquals(Check.gt0(120, "test"), 120); + } + + @Test(expected = IllegalArgumentException.class) + public void checkGTZeroZero() { + Check.gt0(0, "test"); + } + + @Test(expected = IllegalArgumentException.class) + public void checkGTZeroLessThanZero() { + Check.gt0(-1, "test"); + } + + @Test + public void checkGEZero() { + Assert.assertEquals(Check.ge0(120, "test"), 120); + Assert.assertEquals(Check.ge0(0, "test"), 0); + } + + @Test(expected = IllegalArgumentException.class) + public void checkGELessThanZero() { + Check.ge0(-1, "test"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java new file mode 100644 index 0000000000..48b5f9155f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/util/TestConfigurationUtils.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.util; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +public class TestConfigurationUtils { + + @Test + public void constructors() throws Exception { + Configuration conf = new Configuration(false); + Assert.assertEquals(conf.size(), 0); + + byte[] bytes = "aA".getBytes(); + InputStream is = new ByteArrayInputStream(bytes); + conf = new Configuration(false); + ConfigurationUtils.load(conf, is); + Assert.assertEquals(conf.size(), 1); + Assert.assertEquals(conf.get("a"), "A"); + } + + + @Test(expected = IOException.class) + public void constructorsFail3() throws Exception { + InputStream is = new ByteArrayInputStream("".getBytes()); + Configuration conf = new Configuration(false); + ConfigurationUtils.load(conf, is); + } + + @Test + public void copy() throws Exception { + Configuration srcConf = new Configuration(false); + Configuration targetConf = new Configuration(false); + + srcConf.set("testParameter1", "valueFromSource"); + srcConf.set("testParameter2", "valueFromSource"); + + targetConf.set("testParameter2", "valueFromTarget"); + targetConf.set("testParameter3", "valueFromTarget"); + + ConfigurationUtils.copy(srcConf, targetConf); + + Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); + Assert.assertEquals("valueFromSource", targetConf.get("testParameter2")); + Assert.assertEquals("valueFromTarget", targetConf.get("testParameter3")); + } + + @Test + public void injectDefaults() throws Exception { + Configuration srcConf = new Configuration(false); + Configuration targetConf = new Configuration(false); + + srcConf.set("testParameter1", "valueFromSource"); + srcConf.set("testParameter2", "valueFromSource"); + + targetConf.set("testParameter2", "originalValueFromTarget"); + targetConf.set("testParameter3", "originalValueFromTarget"); + + ConfigurationUtils.injectDefaults(srcConf, targetConf); + + Assert.assertEquals("valueFromSource", targetConf.get("testParameter1")); + Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter2")); + Assert.assertEquals("originalValueFromTarget", targetConf.get("testParameter3")); + + Assert.assertEquals("valueFromSource", srcConf.get("testParameter1")); + Assert.assertEquals("valueFromSource", srcConf.get("testParameter2")); + Assert.assertNull(srcConf.get("testParameter3")); + } + + + @Test + public void resolve() { + Configuration conf = new Configuration(false); + conf.set("a", "A"); + conf.set("b", "${a}"); + Assert.assertEquals(conf.getRaw("a"), "A"); + Assert.assertEquals(conf.getRaw("b"), "${a}"); + conf = ConfigurationUtils.resolve(conf); + Assert.assertEquals(conf.getRaw("a"), "A"); + Assert.assertEquals(conf.getRaw("b"), "A"); + } + + @Test + public void testVarResolutionAndSysProps() { + String userName = System.getProperty("user.name"); + Configuration conf = new Configuration(false); + conf.set("a", "A"); + conf.set("b", "${a}"); + conf.set("c", "${user.name}"); + conf.set("d", "${aaa}"); + Assert.assertEquals(conf.getRaw("a"), "A"); + Assert.assertEquals(conf.getRaw("b"), "${a}"); + Assert.assertEquals(conf.getRaw("c"), "${user.name}"); + Assert.assertEquals(conf.get("a"), "A"); + Assert.assertEquals(conf.get("b"), "A"); + Assert.assertEquals(conf.get("c"), userName); + Assert.assertEquals(conf.get("d"), "${aaa}"); + + conf.set("user.name", "foo"); + Assert.assertEquals(conf.get("user.name"), "foo"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java new file mode 100644 index 0000000000..b1b140d7cd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.junit.Test; + +public class TestBooleanParam { + + @Test + public void param() throws Exception { + BooleanParam param = new BooleanParam("p", "true") { + }; + Assert.assertEquals(param.getDomain(), "a boolean"); + Assert.assertEquals(param.value(), Boolean.TRUE); + Assert.assertEquals(param.toString(), "true"); + param = new BooleanParam("p", "false") { + }; + Assert.assertEquals(param.value(), Boolean.FALSE); + param = new BooleanParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new BooleanParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid() throws Exception { + new BooleanParam("p", "x") { + }; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java new file mode 100644 index 0000000000..6b1a5ef64c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + + +import junit.framework.Assert; +import org.junit.Test; + +public class TestByteParam { + + @Test + public void param() throws Exception { + ByteParam param = new ByteParam("p", "1") { + }; + Assert.assertEquals(param.getDomain(), "a byte"); + Assert.assertEquals(param.value(), new Byte((byte) 1)); + Assert.assertEquals(param.toString(), "1"); + param = new ByteParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new ByteParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid1() throws Exception { + new ByteParam("p", "x") { + }; + } + + @Test(expected = IllegalArgumentException.class) + public void invalid2() throws Exception { + new ByteParam("p", "256") { + }; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java new file mode 100644 index 0000000000..bb37f75f37 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + + +import junit.framework.Assert; +import org.junit.Test; + +public class TestEnumParam { + + public static enum ENUM { + FOO, BAR + } + + @Test + public void param() throws Exception { + EnumParam param = new EnumParam("p", "FOO", ENUM.class) { + }; + Assert.assertEquals(param.getDomain(), "FOO,BAR"); + Assert.assertEquals(param.value(), ENUM.FOO); + Assert.assertEquals(param.toString(), "FOO"); + param = new EnumParam("p", null, ENUM.class) { + }; + Assert.assertEquals(param.value(), null); + param = new EnumParam("p", "", ENUM.class) { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid1() throws Exception { + new EnumParam("p", "x", ENUM.class) { + }; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java new file mode 100644 index 0000000000..c3e0200d6e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestInputStreamEntity.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; + +public class TestInputStreamEntity { + + @Test + public void test() throws Exception { + InputStream is = new ByteArrayInputStream("abc".getBytes()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + InputStreamEntity i = new InputStreamEntity(is); + i.write(baos); + baos.close(); + Assert.assertEquals(new String(baos.toByteArray()), "abc"); + + is = new ByteArrayInputStream("abc".getBytes()); + baos = new ByteArrayOutputStream(); + i = new InputStreamEntity(is, 1, 1); + i.write(baos); + baos.close(); + Assert.assertEquals(baos.toByteArray()[0], 'b'); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java new file mode 100644 index 0000000000..634dbe7c2a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.junit.Test; + +public class TestIntegerParam { + + @Test + public void param() throws Exception { + IntegerParam param = new IntegerParam("p", "1") { + }; + Assert.assertEquals(param.getDomain(), "an integer"); + Assert.assertEquals(param.value(), new Integer(1)); + Assert.assertEquals(param.toString(), "1"); + param = new IntegerParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new IntegerParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid1() throws Exception { + new IntegerParam("p", "x") { + }; + } + + @Test(expected = IllegalArgumentException.class) + public void invalid2() throws Exception { + new IntegerParam("p", "" + Long.MAX_VALUE) { + }; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java new file mode 100644 index 0000000000..afb07572e7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONMapProvider.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.json.simple.JSONObject; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; +import java.util.Map; + +public class TestJSONMapProvider { + + @Test + @SuppressWarnings("unchecked") + public void test() throws Exception { + JSONMapProvider p = new JSONMapProvider(); + Assert.assertTrue(p.isWriteable(Map.class, null, null, null)); + Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); + Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JSONObject json = new JSONObject(); + json.put("a", "A"); + p.writeTo(json, JSONObject.class, null, null, null, null, baos); + baos.close(); + Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java new file mode 100644 index 0000000000..a9ac9a2d74 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestJSONProvider.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.json.simple.JSONObject; +import org.junit.Test; + +import java.io.ByteArrayOutputStream; + +public class TestJSONProvider { + + @Test + @SuppressWarnings("unchecked") + public void test() throws Exception { + JSONProvider p = new JSONProvider(); + Assert.assertTrue(p.isWriteable(JSONObject.class, null, null, null)); + Assert.assertFalse(p.isWriteable(this.getClass(), null, null, null)); + Assert.assertEquals(p.getSize(null, null, null, null, null), -1); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JSONObject json = new JSONObject(); + json.put("a", "A"); + p.writeTo(json, JSONObject.class, null, null, null, null, baos); + baos.close(); + Assert.assertEquals(new String(baos.toByteArray()).trim(), "{\"a\":\"A\"}"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java new file mode 100644 index 0000000000..1a7ddd8d35 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import junit.framework.Assert; +import org.junit.Test; + +public class TestLongParam { + + @Test + public void param() throws Exception { + LongParam param = new LongParam("p", "1") { + }; + Assert.assertEquals(param.getDomain(), "a long"); + Assert.assertEquals(param.value(), new Long(1)); + Assert.assertEquals(param.toString(), "1"); + param = new LongParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new LongParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid1() throws Exception { + new LongParam("p", "x") { + }; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java new file mode 100644 index 0000000000..b37bddffe4 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + + +import junit.framework.Assert; +import org.junit.Test; + +public class TestShortParam { + + @Test + public void param() throws Exception { + ShortParam param = new ShortParam("p", "1") { + }; + Assert.assertEquals(param.getDomain(), "a short"); + Assert.assertEquals(param.value(), new Short((short) 1)); + Assert.assertEquals(param.toString(), "1"); + param = new ShortParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new ShortParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void invalid1() throws Exception { + new ShortParam("p", "x") { + }; + } + + @Test(expected = IllegalArgumentException.class) + public void invalid2() throws Exception { + new ShortParam("p", "" + Integer.MAX_VALUE) { + }; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java new file mode 100644 index 0000000000..feb489e043 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + + +import junit.framework.Assert; +import org.junit.Test; + +import java.util.regex.Pattern; + +public class TestStringParam { + + @Test + public void param() throws Exception { + StringParam param = new StringParam("p", "s") { + }; + Assert.assertEquals(param.getDomain(), "a string"); + Assert.assertEquals(param.value(), "s"); + Assert.assertEquals(param.toString(), "s"); + param = new StringParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + param = new StringParam("p", "") { + }; + Assert.assertEquals(param.value(), null); + + param.setValue("S"); + Assert.assertEquals(param.value(), "S"); + } + + @Test + public void paramRegEx() throws Exception { + StringParam param = new StringParam("p", "Aaa", Pattern.compile("A.*")) { + }; + Assert.assertEquals(param.getDomain(), "A.*"); + Assert.assertEquals(param.value(), "Aaa"); + Assert.assertEquals(param.toString(), "Aaa"); + param = new StringParam("p", null) { + }; + Assert.assertEquals(param.value(), null); + } + + @Test(expected = IllegalArgumentException.class) + public void paramInvalidRegEx() throws Exception { + new StringParam("p", "Baa", Pattern.compile("A.*")) { + }; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java new file mode 100644 index 0000000000..72d79a9392 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestUserProvider.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.lib.wsrs; + +import com.sun.jersey.api.core.HttpContext; +import com.sun.jersey.api.core.HttpRequestContext; +import com.sun.jersey.core.spi.component.ComponentScope; +import junit.framework.Assert; +import org.junit.Test; +import org.mockito.Mockito; +import org.slf4j.MDC; + +import javax.ws.rs.core.MultivaluedMap; +import java.security.Principal; + +public class TestUserProvider { + + @Test + @SuppressWarnings("unchecked") + public void noUser() { + MDC.remove("user"); + HttpRequestContext request = Mockito.mock(HttpRequestContext.class); + Mockito.when(request.getUserPrincipal()).thenReturn(null); + MultivaluedMap map = Mockito.mock(MultivaluedMap.class); + Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn(null); + Mockito.when(request.getQueryParameters()).thenReturn(map); + HttpContext context = Mockito.mock(HttpContext.class); + Mockito.when(context.getRequest()).thenReturn(request); + UserProvider up = new UserProvider(); + Assert.assertNull(up.getValue(context)); + Assert.assertNull(MDC.get("user")); + } + + @Test + @SuppressWarnings("unchecked") + public void queryStringUser() { + MDC.remove("user"); + HttpRequestContext request = Mockito.mock(HttpRequestContext.class); + Mockito.when(request.getUserPrincipal()).thenReturn(null); + MultivaluedMap map = Mockito.mock(MultivaluedMap.class); + Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn("foo"); + Mockito.when(request.getQueryParameters()).thenReturn(map); + HttpContext context = Mockito.mock(HttpContext.class); + Mockito.when(context.getRequest()).thenReturn(request); + UserProvider up = new UserProvider(); + Assert.assertEquals(up.getValue(context).getName(), "foo"); + Assert.assertEquals(MDC.get("user"), "foo"); + } + + @Test + @SuppressWarnings("unchecked") + public void principalUser() { + MDC.remove("user"); + HttpRequestContext request = Mockito.mock(HttpRequestContext.class); + Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() { + @Override + public String getName() { + return "bar"; + } + }); + HttpContext context = Mockito.mock(HttpContext.class); + Mockito.when(context.getRequest()).thenReturn(request); + UserProvider up = new UserProvider(); + Assert.assertEquals(up.getValue(context).getName(), "bar"); + Assert.assertEquals(MDC.get("user"), "bar"); + } + + @Test + public void getters() { + UserProvider up = new UserProvider(); + Assert.assertEquals(up.getScope(), ComponentScope.PerRequest); + Assert.assertEquals(up.getInjectable(null, null, Principal.class), up); + Assert.assertNull(up.getInjectable(null, null, String.class)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HFSTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HFSTestCase.java new file mode 100644 index 0000000000..a13a9904ef --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HFSTestCase.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import org.junit.Rule; +import org.junit.rules.MethodRule; + +public abstract class HFSTestCase extends HTestCase { + + @Rule + public MethodRule hdfsTestHelper = new TestHdfsHelper(); + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java new file mode 100644 index 0000000000..d5cb2c8d38 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HTestCase.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import junit.framework.Assert; +import org.junit.Rule; +import org.junit.rules.MethodRule; + +import java.text.MessageFormat; + +public abstract class HTestCase { + + public static final String TEST_WAITFOR_RATIO_PROP = "test.waitfor.ratio"; + + static { + SysPropsForTestsLoader.init(); + } + + private static float WAITFOR_RATIO_DEFAULT = Float.parseFloat(System.getProperty(TEST_WAITFOR_RATIO_PROP, "1")); + + private float waitForRatio = WAITFOR_RATIO_DEFAULT; + + @Rule + public MethodRule testDir = new TestDirHelper(); + + @Rule + public MethodRule jettyTestHelper = new TestJettyHelper(); + + @Rule + public MethodRule exceptionHelper = new TestExceptionHelper(); + + /** + * Sets the 'wait for ratio' used in the {@link #sleep(long)}, + * {@link #waitFor(int, Predicate)} and + * {@link #waitFor(int, boolean, Predicate)} method for the current + * test class. + *

    + * This is useful when running tests in slow machine for tests + * that are time sensitive. + * + * @param ratio the 'wait for ratio' to set. + */ + protected void setWaitForRatio(float ratio) { + waitForRatio = ratio; + } + + /* + * Returns the 'wait for ratio' used in the {@link #sleep(long)}, + * {@link #waitFor(int, Predicate)} and + * {@link #waitFor(int, boolean, Predicate)} methods for the current + * test class. + *

    + * This is useful when running tests in slow machine for tests + * that are time sensitive. + *

    + * The default value is obtained from the Java System property + * test.wait.for.ratio which defaults to 1. + * + * @return the 'wait for ratio' for the current test class. + */ + protected float getWaitForRatio() { + return waitForRatio; + } + + /** + * A predicate 'closure' used by the {@link #waitFor(int, Predicate)} and + * {@link #waitFor(int, boolean, Predicate)} methods. + */ + public static interface Predicate { + + /** + * Perform a predicate evaluation. + * + * @return the boolean result of the evaluation. + * + * @throws Exception thrown if the predicate evaluation could not evaluate. + */ + public boolean evaluate() throws Exception; + + } + + /** + * Makes the current thread sleep for the specified number of milliseconds. + *

    + * The sleep time is multiplied by the {@link #getWaitForRatio()}. + * + * @param time the number of milliseconds to sleep. + */ + protected void sleep(long time) { + try { + Thread.sleep((long) (getWaitForRatio() * time)); + } catch (InterruptedException ex) { + System.err.println(MessageFormat.format("Sleep interrupted, {0}", ex.toString())); + } + } + + /** + * Waits up to the specified timeout for the given {@link Predicate} to + * become true, failing the test if the timeout is reached + * and the Predicate is still false. + *

    + * The timeout time is multiplied by the {@link #getWaitForRatio()}. + * + * @param timeout the timeout in milliseconds to wait for the predicate. + * @param predicate the predicate ot evaluate. + * + * @return the effective wait, in milli-seconds until the predicate become + * true. + */ + protected long waitFor(int timeout, Predicate predicate) { + return waitFor(timeout, false, predicate); + } + + /** + * Waits up to the specified timeout for the given {@link Predicate} to + * become true. + *

    + * The timeout time is multiplied by the {@link #getWaitForRatio()}. + * + * @param timeout the timeout in milliseconds to wait for the predicate. + * @param failIfTimeout indicates if the test should be failed if the + * predicate times out. + * @param predicate the predicate ot evaluate. + * + * @return the effective wait, in milli-seconds until the predicate become + * true or -1 if the predicate did not evaluate + * to true. + */ + protected long waitFor(int timeout, boolean failIfTimeout, Predicate predicate) { + long started = System.currentTimeMillis(); + long mustEnd = System.currentTimeMillis() + (long) (getWaitForRatio() * timeout); + long lastEcho = 0; + try { + long waiting = mustEnd - System.currentTimeMillis(); + System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting)); + boolean eval; + while (!(eval = predicate.evaluate()) && System.currentTimeMillis() < mustEnd) { + if ((System.currentTimeMillis() - lastEcho) > 5000) { + waiting = mustEnd - System.currentTimeMillis(); + System.out.println(MessageFormat.format("Waiting up to [{0}] msec", waiting)); + lastEcho = System.currentTimeMillis(); + } + Thread.sleep(100); + } + if (!eval) { + if (failIfTimeout) { + Assert.fail(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); + } else { + System.out.println(MessageFormat.format("Waiting timed out after [{0}] msec", timeout)); + } + } + return (eval) ? System.currentTimeMillis() - started : -1; + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java new file mode 100644 index 0000000000..398a8853dd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/HadoopUsersConfTestHelper.java @@ -0,0 +1,177 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; + +import java.util.ArrayList; +import java.util.List; + +/** + * Helper to configure FileSystemAccess user/group and proxyuser + * configuration for testing using Java System properties. + *

    + * It uses the {@link SysPropsForTestsLoader} to load JavaSystem + * properties for testing. + */ +public class HadoopUsersConfTestHelper { + + static { + SysPropsForTestsLoader.init(); + } + + public static final String HADOOP_PROXYUSER = "test.hadoop.proxyuser"; + + public static final String HADOOP_PROXYUSER_HOSTS = "test.hadoop.proxyuser.hosts"; + + public static final String HADOOP_PROXYUSER_GROUPS = "test.hadoop.proxyuser.groups"; + + public static final String HADOOP_USER_PREFIX = "test.hadoop.user."; + + /** + * Returns a valid FileSystemAccess proxyuser for the FileSystemAccess cluster. + *

    + * The user is read from the Java System property + * test.hadoop.proxyuser which defaults to the current user + * (java System property user.name). + *

    + * This property should be set in the test.properties file. + *

    + * When running FileSystemAccess minicluster it is used to configure the FileSystemAccess minicluster. + *

    + * When using an external FileSystemAccess cluster, it is expected this property is set to + * a valid proxy user. + * + * @return a valid FileSystemAccess proxyuser for the FileSystemAccess cluster. + */ + public static String getHadoopProxyUser() { + return System.getProperty(HADOOP_PROXYUSER, System.getProperty("user.name")); + } + + /** + * Returns the hosts for the FileSystemAccess proxyuser settings. + *

    + * The hosts are read from the Java System property + * test.hadoop.proxyuser.hosts which defaults to *. + *

    + * This property should be set in the test.properties file. + *

    + * This property is ONLY used when running FileSystemAccess minicluster, it is used to + * configure the FileSystemAccess minicluster. + *

    + * When using an external FileSystemAccess cluster this property is ignored. + * + * @return the hosts for the FileSystemAccess proxyuser settings. + */ + public static String getHadoopProxyUserHosts() { + return System.getProperty(HADOOP_PROXYUSER_HOSTS, "*"); + } + + /** + * Returns the groups for the FileSystemAccess proxyuser settings. + *

    + * The hosts are read from the Java System property + * test.hadoop.proxyuser.groups which defaults to *. + *

    + * This property should be set in the test.properties file. + *

    + * This property is ONLY used when running FileSystemAccess minicluster, it is used to + * configure the FileSystemAccess minicluster. + *

    + * When using an external FileSystemAccess cluster this property is ignored. + * + * @return the groups for the FileSystemAccess proxyuser settings. + */ + public static String getHadoopProxyUserGroups() { + return System.getProperty(HADOOP_PROXYUSER_GROUPS, "*"); + } + + private static final String[] DEFAULT_USERS = new String[]{"user1", "user2"}; + private static final String[] DEFAULT_USERS_GROUP = new String[]{"group1", "supergroup"}; + + /** + * Returns the FileSystemAccess users to be used for tests. These users are defined + * in the test.properties file in properties of the form + * test.hadoop.user.#USER#=#GROUP1#,#GROUP2#,.... + *

    + * These properties are used to configure the FileSystemAccess minicluster user/group + * information. + *

    + * When using an external FileSystemAccess cluster these properties should match the + * user/groups settings in the cluster. + * + * @return the FileSystemAccess users used for testing. + */ + public static String[] getHadoopUsers() { + List users = new ArrayList(); + for (String name : System.getProperties().stringPropertyNames()) { + if (name.startsWith(HADOOP_USER_PREFIX)) { + users.add(name.substring(HADOOP_USER_PREFIX.length())); + } + } + return (users.size() != 0) ? users.toArray(new String[users.size()]) : DEFAULT_USERS; + } + + /** + * Returns the groups a FileSystemAccess user belongs to during tests. These users/groups + * are defined in the test.properties file in properties of the + * form test.hadoop.user.#USER#=#GROUP1#,#GROUP2#,.... + *

    + * These properties are used to configure the FileSystemAccess minicluster user/group + * information. + *

    + * When using an external FileSystemAccess cluster these properties should match the + * user/groups settings in the cluster. + * + * @param user user name to get gropus. + * + * @return the groups of FileSystemAccess users used for testing. + */ + public static String[] getHadoopUserGroups(String user) { + if (getHadoopUsers() == DEFAULT_USERS) { + return DEFAULT_USERS_GROUP; + } else { + String groups = System.getProperty(HADOOP_USER_PREFIX + user); + return (groups != null) ? groups.split(",") : new String[0]; + } + } + + public static Configuration getBaseConf() { + Configuration conf = new Configuration(); + for (String name : System.getProperties().stringPropertyNames()) { + conf.set(name, System.getProperty(name)); + } + return conf; + } + + public static void addUserConf(Configuration conf) { + conf.set("hadoop.security.authentication", "simple"); + conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", + HadoopUsersConfTestHelper.getHadoopProxyUserHosts()); + conf.set("hadoop.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", + HadoopUsersConfTestHelper.getHadoopProxyUserGroups()); + + for (String user : HadoopUsersConfTestHelper.getHadoopUsers()) { + String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user); + UserGroupInformation.createUserForTesting(user, groups); + } + } + + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java new file mode 100644 index 0000000000..a974eafbca --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/SysPropsForTestsLoader.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.Properties; + +public class SysPropsForTestsLoader { + + public static final String TEST_PROPERTIES_PROP = "test.properties"; + + static { + try { + String testFileName = System.getProperty(TEST_PROPERTIES_PROP, "test.properties"); + File currentDir = new File(testFileName).getAbsoluteFile().getParentFile(); + File testFile = new File(currentDir, testFileName); + while (currentDir != null && !testFile.exists()) { + testFile = new File(testFile.getAbsoluteFile().getParentFile().getParentFile(), testFileName); + currentDir = currentDir.getParentFile(); + if (currentDir != null) { + testFile = new File(currentDir, testFileName); + } + } + + if (testFile.exists()) { + System.out.println(); + System.out.println(">>> " + TEST_PROPERTIES_PROP + " : " + testFile.getAbsolutePath()); + Properties testProperties = new Properties(); + testProperties.load(new FileReader(testFile)); + for (Map.Entry entry : testProperties.entrySet()) { + if (!System.getProperties().containsKey(entry.getKey())) { + System.setProperty((String) entry.getKey(), (String) entry.getValue()); + } + } + } else if (System.getProperty(TEST_PROPERTIES_PROP) != null) { + System.err.println(MessageFormat.format("Specified 'test.properties' file does not exist [{0}]", + System.getProperty(TEST_PROPERTIES_PROP))); + System.exit(-1); + + } else { + System.out.println(">>> " + TEST_PROPERTIES_PROP + " : "); + } + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + public static void init() { + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDir.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDir.java new file mode 100644 index 0000000000..28ab71d43d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDir.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +/** + * Annotation for {@link HTestCase} subclasses to indicate that the test method + * requires a test directory in the local file system. + *

    + * The test directory location can be retrieve using the + * {@link TestDirHelper#getTestDir()} method. + */ +@Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@Target(java.lang.annotation.ElementType.METHOD) +public @interface TestDir { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java new file mode 100644 index 0000000000..c3f3d53c38 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestDirHelper.java @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import org.junit.Test; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; + +import java.io.File; +import java.io.IOException; +import java.text.MessageFormat; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestDirHelper implements MethodRule { + + @Test + public void dummy() { + } + + static { + SysPropsForTestsLoader.init(); + } + + public static final String TEST_DIR_PROP = "test.dir"; + static String TEST_DIR_ROOT; + + private static void delete(File file) throws IOException { + if (file.getAbsolutePath().length() < 5) { + throw new IllegalArgumentException( + MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath())); + } + if (file.exists()) { + if (file.isDirectory()) { + File[] children = file.listFiles(); + if (children != null) { + for (File child : children) { + delete(child); + } + } + } + if (!file.delete()) { + throw new RuntimeException(MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath())); + } + } + } + + static { + try { + TEST_DIR_ROOT = System.getProperty(TEST_DIR_PROP, new File("target").getAbsolutePath()); + if (!TEST_DIR_ROOT.startsWith("/")) { + System.err.println(MessageFormat.format("System property [{0}]=[{1}] must be set to an absolute path", + TEST_DIR_PROP, TEST_DIR_ROOT)); + System.exit(-1); + } else if (TEST_DIR_ROOT.length() < 4) { + System.err.println(MessageFormat.format("System property [{0}]=[{1}] must be at least 4 chars", + TEST_DIR_PROP, TEST_DIR_ROOT)); + System.exit(-1); + } + + TEST_DIR_ROOT = new File(TEST_DIR_ROOT, "testdir").getAbsolutePath(); + System.setProperty(TEST_DIR_PROP, TEST_DIR_ROOT); + + File dir = new File(TEST_DIR_ROOT); + delete(dir); + if (!dir.mkdirs()) { + System.err.println(MessageFormat.format("Could not create test dir [{0}]", TEST_DIR_ROOT)); + System.exit(-1); + } + + System.setProperty("test.circus", "true"); + + System.out.println(">>> " + TEST_DIR_PROP + " : " + System.getProperty(TEST_DIR_PROP)); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + private static ThreadLocal TEST_DIR_TL = new InheritableThreadLocal(); + + @Override + public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + File testDir = null; + TestDir testDirAnnotation = frameworkMethod.getAnnotation(TestDir.class); + if (testDirAnnotation != null) { + testDir = resetTestCaseDir(frameworkMethod.getName()); + } + try { + TEST_DIR_TL.set(testDir); + statement.evaluate(); + } finally { + TEST_DIR_TL.remove(); + } + } + }; + } + + /** + * Returns the local test directory for the current test, only available when the + * test method has been annotated with {@link TestDir}. + * + * @return the test directory for the current test. It is an full/absolute + * File. + */ + public static File getTestDir() { + File testDir = TEST_DIR_TL.get(); + if (testDir == null) { + throw new IllegalStateException("This test does not use @TestDir"); + } + return testDir; + } + + private static AtomicInteger counter = new AtomicInteger(); + + private static File resetTestCaseDir(String testName) { + File dir = new File(TEST_DIR_ROOT); + dir = new File(dir, testName + "-" + counter.getAndIncrement()); + dir = dir.getAbsoluteFile(); + try { + delete(dir); + } catch (IOException ex) { + throw new RuntimeException(MessageFormat.format("Could not delete test dir[{0}], {1}", + dir, ex.getMessage()), ex); + } + if (!dir.mkdirs()) { + throw new RuntimeException(MessageFormat.format("Could not create test dir[{0}]", dir)); + } + return dir; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestException.java new file mode 100644 index 0000000000..cb71f4d678 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestException.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +@Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@Target(java.lang.annotation.ElementType.METHOD) +public @interface TestException { + Class exception(); + + String msgRegExp() default ".*"; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java new file mode 100644 index 0000000000..8411db4754 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestExceptionHelper.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import junit.framework.Assert; +import org.junit.Test; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; + +import java.util.regex.Pattern; + +public class TestExceptionHelper implements MethodRule { + + @Test + public void dummy() { + } + + @Override + public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + TestException testExceptionAnnotation = frameworkMethod.getAnnotation(TestException.class); + try { + statement.evaluate(); + if (testExceptionAnnotation != null) { + Class klass = testExceptionAnnotation.exception(); + Assert.fail("Expected Exception: " + klass.getSimpleName()); + } + } catch (Throwable ex) { + if (testExceptionAnnotation != null) { + Class klass = testExceptionAnnotation.exception(); + if (klass.isInstance(ex)) { + String regExp = testExceptionAnnotation.msgRegExp(); + Pattern pattern = Pattern.compile(regExp); + if (!pattern.matcher(ex.getMessage()).find()) { + Assert.fail("Expected Exception Message pattern: " + regExp + " got message: " + ex.getMessage()); + } + } else { + Assert.fail("Expected Exception: " + klass.getSimpleName() + " got: " + ex.getClass().getSimpleName()); + } + } else { + throw ex; + } + } + } + }; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java new file mode 100644 index 0000000000..31bef39220 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHFSTestCase.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test; + +import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URL; + +public class TestHFSTestCase extends HFSTestCase { + + @Test(expected = IllegalStateException.class) + public void testDirNoAnnotation() throws Exception { + TestDirHelper.getTestDir(); + } + + @Test(expected = IllegalStateException.class) + public void testJettyNoAnnotation() throws Exception { + TestJettyHelper.getJettyServer(); + } + + @Test(expected = IllegalStateException.class) + public void testJettyNoAnnotation2() throws Exception { + TestJettyHelper.getJettyURL(); + } + + @Test(expected = IllegalStateException.class) + public void testHdfsNoAnnotation() throws Exception { + TestHdfsHelper.getHdfsConf(); + } + + @Test(expected = IllegalStateException.class) + public void testHdfsNoAnnotation2() throws Exception { + TestHdfsHelper.getHdfsTestDir(); + } + + @Test + @TestDir + public void testDirAnnotation() throws Exception { + Assert.assertNotNull(TestDirHelper.getTestDir()); + } + + @Test + public void waitFor() { + long start = System.currentTimeMillis(); + long waited = waitFor(1000, new Predicate() { + public boolean evaluate() throws Exception { + return true; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, 0, 50); + Assert.assertEquals(end - start - waited, 0, 50); + } + + @Test + public void waitForTimeOutRatio1() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + long waited = waitFor(200, new Predicate() { + public boolean evaluate() throws Exception { + return false; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, -1); + Assert.assertEquals(end - start, 200, 50); + } + + @Test + public void waitForTimeOutRatio2() { + setWaitForRatio(2); + long start = System.currentTimeMillis(); + long waited = waitFor(200, new Predicate() { + public boolean evaluate() throws Exception { + return false; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, -1); + Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + } + + @Test + public void sleepRatio1() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + sleep(100); + long end = System.currentTimeMillis(); + Assert.assertEquals(end - start, 100, 50); + } + + @Test + public void sleepRatio2() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + sleep(100); + long end = System.currentTimeMillis(); + Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + } + + @Test + @TestHdfs + public void testHadoopFileSystem() throws Exception { + Configuration conf = TestHdfsHelper.getHdfsConf(); + FileSystem fs = FileSystem.get(conf); + try { + OutputStream os = fs.create(new Path(TestHdfsHelper.getHdfsTestDir(), "foo")); + os.write(new byte[]{1}); + os.close(); + InputStream is = fs.open(new Path(TestHdfsHelper.getHdfsTestDir(), "foo")); + Assert.assertEquals(is.read(), 1); + Assert.assertEquals(is.read(), -1); + is.close(); + } finally { + fs.close(); + } + } + + public static class MyServlet extends HttpServlet { + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + resp.getWriter().write("foo"); + } + } + + @Test + @TestJetty + public void testJetty() throws Exception { + Context context = new Context(); + context.setContextPath("/"); + context.addServlet(MyServlet.class, "/bar"); + Server server = TestJettyHelper.getJettyServer(); + server.addHandler(context); + server.start(); + URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + Assert.assertEquals(reader.readLine(), "foo"); + reader.close(); + } + + @Test + @TestException(exception = RuntimeException.class) + public void testException0() { + throw new RuntimeException("foo"); + } + + @Test + @TestException(exception = RuntimeException.class, msgRegExp = ".o.") + public void testException1() { + throw new RuntimeException("foo"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java new file mode 100644 index 0000000000..1349036f9c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHTestCase.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.test; + +import junit.framework.Assert; +import org.junit.Test; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.servlet.Context; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; + +public class TestHTestCase extends HTestCase { + + @Test(expected = IllegalStateException.class) + public void testDirNoAnnotation() throws Exception { + TestDirHelper.getTestDir(); + } + + @Test(expected = IllegalStateException.class) + public void testJettyNoAnnotation() throws Exception { + TestJettyHelper.getJettyServer(); + } + + @Test(expected = IllegalStateException.class) + public void testJettyNoAnnotation2() throws Exception { + TestJettyHelper.getJettyURL(); + } + + @Test + @TestDir + public void testDirAnnotation() throws Exception { + Assert.assertNotNull(TestDirHelper.getTestDir()); + } + + @Test + public void waitFor() { + long start = System.currentTimeMillis(); + long waited = waitFor(1000, new Predicate() { + public boolean evaluate() throws Exception { + return true; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, 0, 50); + Assert.assertEquals(end - start - waited, 0, 50); + } + + @Test + public void waitForTimeOutRatio1() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + long waited = waitFor(200, new Predicate() { + public boolean evaluate() throws Exception { + return false; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, -1); + Assert.assertEquals(end - start, 200, 50); + } + + @Test + public void waitForTimeOutRatio2() { + setWaitForRatio(2); + long start = System.currentTimeMillis(); + long waited = waitFor(200, new Predicate() { + public boolean evaluate() throws Exception { + return false; + } + }); + long end = System.currentTimeMillis(); + Assert.assertEquals(waited, -1); + Assert.assertEquals(end - start, 200 * getWaitForRatio(), 50 * getWaitForRatio()); + } + + @Test + public void sleepRatio1() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + sleep(100); + long end = System.currentTimeMillis(); + Assert.assertEquals(end - start, 100, 50); + } + + @Test + public void sleepRatio2() { + setWaitForRatio(1); + long start = System.currentTimeMillis(); + sleep(100); + long end = System.currentTimeMillis(); + Assert.assertEquals(end - start, 100 * getWaitForRatio(), 50 * getWaitForRatio()); + } + + public static class MyServlet extends HttpServlet { + @Override + protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { + resp.getWriter().write("foo"); + } + } + + @Test + @TestJetty + public void testJetty() throws Exception { + Context context = new Context(); + context.setContextPath("/"); + context.addServlet(MyServlet.class, "/bar"); + Server server = TestJettyHelper.getJettyServer(); + server.addHandler(context); + server.start(); + URL url = new URL(TestJettyHelper.getJettyURL(), "/bar"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + Assert.assertEquals(reader.readLine(), "foo"); + reader.close(); + } + + @Test + @TestException(exception = RuntimeException.class) + public void testException0() { + throw new RuntimeException("foo"); + } + + @Test + @TestException(exception = RuntimeException.class, msgRegExp = ".o.") + public void testException1() { + throw new RuntimeException("foo"); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfs.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfs.java new file mode 100644 index 0000000000..d9d165bcfe --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfs.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + + +/** + * Annotation for {@link HTestCase} subclasses to indicate that the test method + * requires a FileSystemAccess cluster. + *

    + * The {@link TestHdfsHelper#getHdfsConf()} returns a FileSystemAccess JobConf preconfigured to connect + * to the FileSystemAccess test minicluster or the FileSystemAccess cluster information. + *

    + * A HDFS test directory for the test will be created. The HDFS test directory + * location can be retrieve using the {@link TestHdfsHelper#getHdfsTestDir()} method. + *

    + * Refer to the {@link HTestCase} class for details on how to use and configure + * a FileSystemAccess test minicluster or a real FileSystemAccess cluster for the tests. + */ +@Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@Target(java.lang.annotation.ElementType.METHOD) +public @interface TestHdfs { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java new file mode 100644 index 0000000000..cd030695f9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Test; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; + +import java.io.File; +import java.util.concurrent.atomic.AtomicInteger; + +public class TestHdfsHelper extends TestDirHelper { + + @Test + public void dummy() { + } + + public static final String HADOOP_MINI_HDFS = "test.hadoop.hdfs"; + + private static ThreadLocal HDFS_CONF_TL = new InheritableThreadLocal(); + + private static ThreadLocal HDFS_TEST_DIR_TL = new InheritableThreadLocal(); + + @Override + public Statement apply(Statement statement, FrameworkMethod frameworkMethod, Object o) { + TestHdfs testHdfsAnnotation = frameworkMethod.getAnnotation(TestHdfs.class); + if (testHdfsAnnotation != null) { + statement = new HdfsStatement(statement, frameworkMethod.getName()); + } + return super.apply(statement, frameworkMethod, o); + } + + private static class HdfsStatement extends Statement { + private Statement statement; + private String testName; + + public HdfsStatement(Statement statement, String testName) { + this.statement = statement; + this.testName = testName; + } + + @Override + public void evaluate() throws Throwable { + MiniDFSCluster miniHdfs = null; + Configuration conf = HadoopUsersConfTestHelper.getBaseConf(); + if (Boolean.parseBoolean(System.getProperty(HADOOP_MINI_HDFS, "true"))) { + miniHdfs = startMiniHdfs(conf); + conf = miniHdfs.getConfiguration(0); + } + try { + HDFS_CONF_TL.set(conf); + HDFS_TEST_DIR_TL.set(resetHdfsTestDir(conf)); + statement.evaluate(); + } finally { + HDFS_CONF_TL.remove(); + HDFS_TEST_DIR_TL.remove(); + } + } + + private static AtomicInteger counter = new AtomicInteger(); + + private Path resetHdfsTestDir(Configuration conf) { + + Path testDir = new Path("./" + TEST_DIR_ROOT, testName + "-" + counter.getAndIncrement()); + try { + // currentUser + FileSystem fs = FileSystem.get(conf); + fs.delete(testDir, true); + fs.mkdirs(testDir); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + return testDir; + } + } + + /** + * Returns the HDFS test directory for the current test, only available when the + * test method has been annotated with {@link TestHdfs}. + * + * @return the HDFS test directory for the current test. It is an full/absolute + * Path. + */ + public static Path getHdfsTestDir() { + Path testDir = HDFS_TEST_DIR_TL.get(); + if (testDir == null) { + throw new IllegalStateException("This test does not use @TestHdfs"); + } + return testDir; + } + + /** + * Returns a FileSystemAccess JobConf preconfigured with the FileSystemAccess cluster + * settings for testing. This configuration is only available when the test + * method has been annotated with {@link TestHdfs}. Refer to {@link HTestCase} + * header for details) + * + * @return the FileSystemAccess JobConf preconfigured with the FileSystemAccess cluster + * settings for testing + */ + public static Configuration getHdfsConf() { + Configuration conf = HDFS_CONF_TL.get(); + if (conf == null) { + throw new IllegalStateException("This test does not use @TestHdfs"); + } + return new Configuration(conf); + } + + private static MiniDFSCluster MINI_DFS = null; + + private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) throws Exception { + if (MINI_DFS == null) { + if (System.getProperty("hadoop.log.dir") == null) { + System.setProperty("hadoop.log.dir", new File(TEST_DIR_ROOT, "hadoop-log").getAbsolutePath()); + } + if (System.getProperty("test.build.data") == null) { + System.setProperty("test.build.data", new File(TEST_DIR_ROOT, "hadoop-data").getAbsolutePath()); + } + + conf = new Configuration(conf); + HadoopUsersConfTestHelper.addUserConf(conf); + conf.set("fs.hdfs.impl.disable.cache", "true"); + conf.set("dfs.block.access.token.enable", "false"); + conf.set("dfs.permissions", "true"); + conf.set("hadoop.security.authentication", "simple"); + MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); + builder.numDataNodes(2); + MiniDFSCluster miniHdfs = builder.build(); + FileSystem fileSystem = miniHdfs.getFileSystem(); + fileSystem.mkdirs(new Path("/tmp")); + fileSystem.mkdirs(new Path("/user")); + fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx")); + fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx")); + MINI_DFS = miniHdfs; + } + return MINI_DFS; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJetty.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJetty.java new file mode 100644 index 0000000000..ebdf293c24 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJetty.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.lang.annotation.Retention; +import java.lang.annotation.Target; + +/** + * Annotation for {@link TestJettyHelper} subclasses to indicate that the test method + * requires a Jetty servlet-container. + *

    + * The {@link TestJettyHelper#getJettyServer()} returns a ready to configure Jetty + * servlet-container. After registering contexts, servlets, filters the the Jetty + * server must be started (getJettyServer.start(). The Jetty server + * is automatically stopped at the end of the test method invocation. + *

    + * Use the {@link TestJettyHelper#getJettyURL()} to obtain the base URL + * (schema://host:port) of the Jetty server. + *

    + * Refer to the {@link HTestCase} class for more details. + */ +@Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@Target(java.lang.annotation.ElementType.METHOD) +public @interface TestJetty { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java new file mode 100644 index 0000000000..1a4f5b215e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestJettyHelper.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import org.junit.Test; +import org.junit.rules.MethodRule; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.Statement; +import org.mortbay.jetty.Server; + +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.ServerSocket; +import java.net.URL; + +public class TestJettyHelper implements MethodRule { + + @Test + public void dummy() { + } + + private static ThreadLocal TEST_SERVLET_TL = new InheritableThreadLocal(); + + @Override + public Statement apply(final Statement statement, final FrameworkMethod frameworkMethod, final Object o) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + Server server = null; + TestJetty testJetty = frameworkMethod.getAnnotation(TestJetty.class); + if (testJetty != null) { + server = createJettyServer(); + } + try { + TEST_SERVLET_TL.set(server); + statement.evaluate(); + } finally { + TEST_SERVLET_TL.remove(); + if (server != null && server.isRunning()) { + try { + server.stop(); + } catch (Exception ex) { + throw new RuntimeException("Could not stop embedded servlet container, " + ex.getMessage(), ex); + } + } + } + } + }; + } + + private Server createJettyServer() { + try { + + String host = InetAddress.getLocalHost().getHostName(); + ServerSocket ss = new ServerSocket(0); + int port = ss.getLocalPort(); + ss.close(); + Server server = new Server(0); + server.getConnectors()[0].setHost(host); + server.getConnectors()[0].setPort(port); + return server; + } catch (Exception ex) { + throw new RuntimeException("Could not stop embedded servlet container, " + ex.getMessage(), ex); + } + } + + /** + * Returns a Jetty server ready to be configured and the started. This server + * is only available when the test method has been annotated with + * {@link TestJetty}. Refer to {@link HTestCase} header for details. + *

    + * Once configured, the Jetty server should be started. The server will be + * automatically stopped when the test method ends. + * + * @return a Jetty server ready to be configured and the started. + */ + public static Server getJettyServer() { + Server server = TEST_SERVLET_TL.get(); + if (server == null) { + throw new IllegalStateException("This test does not use @TestJetty"); + } + return server; + } + + /** + * Returns the base URL (SCHEMA://HOST:PORT) of the test Jetty server + * (see {@link #getJettyServer()}) once started. + * + * @return the base URL (SCHEMA://HOST:PORT) of the test Jetty server. + */ + public static URL getJettyURL() { + Server server = TEST_SERVLET_TL.get(); + if (server == null) { + throw new IllegalStateException("This test does not use @TestJetty"); + } + try { + return new URL("http://" + server.getConnectors()[0].getHost() + ":" + server.getConnectors()[0].getPort()); + } catch (MalformedURLException ex) { + throw new RuntimeException("It should never happen, " + ex.getMessage(), ex); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp1.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp1.properties new file mode 100644 index 0000000000..84f97bba9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp1.properties @@ -0,0 +1,13 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp2.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp2.properties new file mode 100644 index 0000000000..33fd831785 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/TestServerWebApp2.properties @@ -0,0 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/classutils.txt b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/classutils.txt new file mode 100644 index 0000000000..421376db9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/classutils.txt @@ -0,0 +1 @@ +dummy diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/default-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/default-log4j.properties new file mode 100644 index 0000000000..75175124c5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/default-log4j.properties @@ -0,0 +1,22 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#log4j.appender.test=org.apache.log4j.varia.NullAppender +#log4j.appender.test=org.apache.log4j.ConsoleAppender +log4j.appender.test=org.apache.log4j.FileAppender +log4j.appender.test.File=${test.dir}/test.log +log4j.appender.test.Append=true +log4j.appender.test.layout=org.apache.log4j.PatternLayout +log4j.appender.test.layout.ConversionPattern=%d{ISO8601} %5p %20c{1}: %4L - %m%n +log4j.rootLogger=ALL, test + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/server.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/server.properties new file mode 100644 index 0000000000..84f97bba9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/server.properties @@ -0,0 +1,13 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver-default.xml new file mode 100644 index 0000000000..423a3c9532 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver-default.xml @@ -0,0 +1,20 @@ + + + + + testserver.a + default + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver.properties new file mode 100644 index 0000000000..84f97bba9e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/testserver.properties @@ -0,0 +1,13 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2dbdf9799c..df736a6d1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -24,6 +24,9 @@ Trunk (unreleased changes) HDFS-2430. The number of failed or low-resource volumes the NN can tolerate should be configurable. (atm) + HDFS-2178. Contributing Hoop to HDFS, replacement for HDFS proxy with + read/write capabilities. (tucu) + IMPROVEMENTS HADOOP-7524 Change RPC to allow multiple protocols including multuple diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index 2a1c82fa6b..824edc3210 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -29,6 +29,7 @@ hadoop-hdfs + hadoop-hdfs-httpfs diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index ec4edda4f2..19bde74059 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -88,6 +88,12 @@ hadoop-hdfs ${project.version} + + org.apache.hadoop + hadoop-hdfs + ${project.version} + test-jar + org.apache.hadoop hadoop-mapreduce-client-app @@ -428,6 +434,16 @@ commons-daemon ${commons-daemon.version} + + org.jdom + jdom + 1.1 + + + com.googlecode.json-simple + json-simple + 1.1 + @@ -452,7 +468,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.9 + 2.10 org.apache.maven.plugins