Merge changes from trunk

This commit is contained in:
Jing Zhao 2014-09-03 15:36:47 -07:00
commit 45d5b13256
257 changed files with 7891 additions and 2781 deletions

1
.gitignore vendored
View File

@ -9,6 +9,7 @@
.project
.settings
target
build
hadoop-common-project/hadoop-kms/downloads/
hadoop-hdfs-project/hadoop-hdfs/downloads
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads

View File

@ -70,8 +70,10 @@ fi
ARTIFACTS_DIR="target/artifacts"
# Create staging dir for release artifacts
# mvn clean for sanity
run ${MVN} clean
# Create staging dir for release artifacts
run mkdir -p ${ARTIFACTS_DIR}
# Create RAT report
@ -80,10 +82,17 @@ run ${MVN} apache-rat:check
# Create SRC and BIN tarballs for release,
# Using 'install goal instead of 'package' so artifacts are available
# in the Maven local cache for the site generation
run ${MVN} install -Pdist,docs,src,native -DskipTests -Dtar
run ${MVN} install -Pdist,src,native -DskipTests -Dtar
# Create site for release
run ${MVN} site site:stage -Pdist -Psrc
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce
run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-hdfs/
run cp ./hadoop-yarn-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn/
run cp ./hadoop-mapreduce-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce/
run mv target/staging/hadoop-project target/r${HADOOP_VERSION}/
run cd target/
run tar czf hadoop-site-${HADOOP_VERSION}.tar.gz r${HADOOP_VERSION}/*
@ -94,14 +103,19 @@ find . -name rat.txt | xargs -I% cat % > ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSIO
# Stage CHANGES.txt files
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-COMMON-${HADOOP_VERSION}${RC_LABEL}.txt
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS--${HADOOP_VERSION}${RC_LABEL}.txt
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS-${HADOOP_VERSION}${RC_LABEL}.txt
run cp ./hadoop-mapreduce-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-MAPREDUCE-${HADOOP_VERSION}${RC_LABEL}.txt
run cp ./hadoop-yarn-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-YARN-${HADOOP_VERSION}${RC_LABEL}.txt
# Stage BIN tarball
# Prepare and stage BIN tarball
run cd hadoop-dist/target/
run tar -xzf hadoop-${HADOOP_VERSION}.tar.gz
run cp -r ../../target/r${HADOOP_VERSION}/* hadoop-${HADOOP_VERSION}/share/doc/hadoop/
run tar -czf hadoop-${HADOOP_VERSION}.tar.gz hadoop-${HADOOP_VERSION}
run cd ../..
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz
# State SRC tarball
# Stage SRC tarball
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz
# Stage SITE tarball

View File

@ -16,7 +16,7 @@
ulimit -n 1024
### Setup some variables.
### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
### BUILD_URL is set by Hudson if it is run by patch process
### Read variables from properties file
bindir=$(dirname $0)
@ -36,7 +36,7 @@ BUILD_NATIVE=true
PS=${PS:-ps}
AWK=${AWK:-awk}
WGET=${WGET:-wget}
SVN=${SVN:-svn}
GIT=${GIT:-git}
GREP=${GREP:-grep}
PATCH=${PATCH:-patch}
DIFF=${DIFF:-diff}
@ -59,13 +59,13 @@ printUsage() {
echo "--mvn-cmd=<cmd> The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
echo "--svn-cmd=<cmd> The 'svn' command to use (default 'svn')"
echo "--git-cmd=<cmd> The 'git' command to use (default 'git')"
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
echo "--patch-cmd=<cmd> The 'patch' command to use (default 'patch')"
echo "--diff-cmd=<cmd> The 'diff' command to use (default 'diff')"
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
echo "--forrest-home=<path> Forrest home directory (default FORREST_HOME environment variable)"
echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes"
echo "--dirty-workspace Allow the local git workspace to have uncommitted changes"
echo "--run-tests Run all tests below the base directory"
echo "--build-native=<bool> If true, then build native components (default 'true')"
echo
@ -107,8 +107,8 @@ parseArgs() {
--wget-cmd=*)
WGET=${i#*=}
;;
--svn-cmd=*)
SVN=${i#*=}
--git-cmd=*)
GIT=${i#*=}
;;
--grep-cmd=*)
GREP=${i#*=}
@ -197,7 +197,7 @@ checkout () {
echo ""
### When run by a developer, if the workspace contains modifications, do not continue
### unless the --dirty-workspace option was set
status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
status=`$GIT status --porcelain`
if [[ $JENKINS == "false" ]] ; then
if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
echo "ERROR: can't run in a workspace that contains the following modifications"
@ -207,10 +207,12 @@ checkout () {
echo
else
cd $BASEDIR
$SVN revert -R .
rm -rf `$SVN status --no-ignore`
$SVN update
$GIT reset --hard
$GIT clean -xdf
$GIT checkout trunk
$GIT pull --rebase
fi
GIT_REVISION=`git rev-parse --verify --short HEAD`
return $?
}
@ -229,10 +231,10 @@ downloadPatch () {
echo "$defect patch is being downloaded at `date` from"
echo "$patchURL"
$WGET -q -O $PATCH_DIR/patch $patchURL
VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
VERSION=${GIT_REVISION}_${defect}_PATCH-${patchNum}
JIRA_COMMENT="Here are the results of testing the latest attachment
$patchURL
against trunk revision ${SVN_REVISION}."
against trunk revision ${GIT_REVISION}."
### Copy in any supporting files needed by this process
cp -r $SUPPORT_DIR/lib/* ./lib

View File

@ -23,6 +23,14 @@
</formats>
<includeBaseDirectory>true</includeBaseDirectory>
<fileSets>
<fileSet>
<directory>.</directory>
<includes>
<include>LICENCE.txt</include>
<include>README.txt</include>
<include>NOTICE.txt</include>
</includes>
</fileSet>
<fileSet>
<directory>.</directory>
<useDefaultExcludes>true</useDefaultExcludes>

View File

@ -61,6 +61,16 @@
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.tomcat.embed</groupId>
<artifactId>tomcat-embed-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.tomcat.embed</groupId>
<artifactId>tomcat-embed-logging-juli</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>

View File

@ -519,9 +519,7 @@ public static void createAuthCookie(HttpServletResponse resp, String token,
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
.append("=");
if (token != null && token.length() > 0) {
sb.append("\"")
.append(token)
.append("\"");
sb.append(token);
}
sb.append("; Version=1");

View File

@ -13,7 +13,22 @@
*/
package org.apache.hadoop.security.authentication.client;
import org.apache.catalina.deploy.FilterDef;
import org.apache.catalina.deploy.FilterMap;
import org.apache.catalina.startup.Tomcat;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.http.HttpResponse;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.Credentials;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.AuthPolicy;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.impl.auth.SPNegoSchemeFactory;
import org.apache.http.impl.client.SystemDefaultHttpClient;
import org.apache.http.util.EntityUtils;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
@ -24,16 +39,19 @@
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.ServerSocket;
import java.net.URL;
import java.security.Principal;
import java.util.Properties;
import org.junit.Assert;
@ -41,10 +59,18 @@ public class AuthenticatorTestCase {
private Server server;
private String host = null;
private int port = -1;
private boolean useTomcat = false;
private Tomcat tomcat = null;
Context context;
private static Properties authenticatorConfig;
public AuthenticatorTestCase() {}
public AuthenticatorTestCase(boolean useTomcat) {
this.useTomcat = useTomcat;
}
protected static void setAuthenticationHandlerConfig(Properties config) {
authenticatorConfig = config;
}
@ -80,7 +106,19 @@ protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws S
}
}
protected int getLocalPort() throws Exception {
ServerSocket ss = new ServerSocket(0);
int ret = ss.getLocalPort();
ss.close();
return ret;
}
protected void start() throws Exception {
if (useTomcat) startTomcat();
else startJetty();
}
protected void startJetty() throws Exception {
server = new Server(0);
context = new Context();
context.setContextPath("/foo");
@ -88,16 +126,42 @@ protected void start() throws Exception {
context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
context.addServlet(new ServletHolder(TestServlet.class), "/bar");
host = "localhost";
ServerSocket ss = new ServerSocket(0);
port = ss.getLocalPort();
ss.close();
port = getLocalPort();
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
server.start();
System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
}
protected void startTomcat() throws Exception {
tomcat = new Tomcat();
File base = new File(System.getProperty("java.io.tmpdir"));
org.apache.catalina.Context ctx =
tomcat.addContext("/foo",base.getAbsolutePath());
FilterDef fd = new FilterDef();
fd.setFilterClass(TestFilter.class.getName());
fd.setFilterName("TestFilter");
FilterMap fm = new FilterMap();
fm.setFilterName("TestFilter");
fm.addURLPattern("/*");
fm.addServletName("/bar");
ctx.addFilterDef(fd);
ctx.addFilterMap(fm);
tomcat.addServlet(ctx, "/bar", TestServlet.class.getName());
ctx.addServletMapping("/bar", "/bar");
host = "localhost";
port = getLocalPort();
tomcat.setHostname(host);
tomcat.setPort(port);
tomcat.start();
}
protected void stop() throws Exception {
if (useTomcat) stopTomcat();
else stopJetty();
}
protected void stopJetty() throws Exception {
try {
server.stop();
} catch (Exception e) {
@ -109,6 +173,18 @@ protected void stop() throws Exception {
}
}
protected void stopTomcat() throws Exception {
try {
tomcat.stop();
} catch (Exception e) {
}
try {
tomcat.destroy();
} catch (Exception e) {
}
}
protected String getBaseURL() {
return "http://" + host + ":" + port + "/foo/bar";
}
@ -165,4 +241,57 @@ protected void _testAuthentication(Authenticator authenticator, boolean doPost)
}
}
private SystemDefaultHttpClient getHttpClient() {
final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new SPNegoSchemeFactory(true));
Credentials use_jaas_creds = new Credentials() {
public String getPassword() {
return null;
}
public Principal getUserPrincipal() {
return null;
}
};
httpClient.getCredentialsProvider().setCredentials(
AuthScope.ANY, use_jaas_creds);
return httpClient;
}
private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest request) throws Exception {
HttpResponse response = null;
try {
response = httpClient.execute(request);
final int httpStatus = response.getStatusLine().getStatusCode();
Assert.assertEquals(HttpURLConnection.HTTP_OK, httpStatus);
} finally {
if (response != null) EntityUtils.consumeQuietly(response.getEntity());
}
}
protected void _testAuthenticationHttpClient(Authenticator authenticator, boolean doPost) throws Exception {
start();
try {
SystemDefaultHttpClient httpClient = getHttpClient();
doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
// Always do a GET before POST to trigger the SPNego negotiation
if (doPost) {
HttpPost post = new HttpPost(getBaseURL());
byte [] postBytes = POST.getBytes();
ByteArrayInputStream bis = new ByteArrayInputStream(postBytes);
InputStreamEntity entity = new InputStreamEntity(bis, postBytes.length);
// Important that the entity is not repeatable -- this means if
// we have to renegotiate (e.g. b/c the cookie wasn't handled properly)
// the test will fail.
Assert.assertFalse(entity.isRepeatable());
post.setEntity(entity);
doHttpClientRequest(httpClient, post);
}
} finally {
stop();
}
}
}

View File

@ -20,16 +20,36 @@
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.junit.Assert;
import org.junit.Before;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.runner.RunWith;
import org.junit.Test;
import java.io.File;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;
import java.util.concurrent.Callable;
@RunWith(Parameterized.class)
public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
private boolean useTomcat = false;
public TestKerberosAuthenticator(boolean useTomcat) {
this.useTomcat = useTomcat;
}
@Parameterized.Parameters
public static Collection booleans() {
return Arrays.asList(new Object[][] {
{ false },
{ true }
});
}
@Before
public void setup() throws Exception {
// create keytab
@ -53,7 +73,7 @@ private Properties getAuthenticationHandlerConfiguration() {
@Test(timeout=60000)
public void testFallbacktoPseudoAuthenticator() throws Exception {
AuthenticatorTestCase auth = new AuthenticatorTestCase();
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
Properties props = new Properties();
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
@ -63,7 +83,7 @@ public void testFallbacktoPseudoAuthenticator() throws Exception {
@Test(timeout=60000)
public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
AuthenticatorTestCase auth = new AuthenticatorTestCase();
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
Properties props = new Properties();
props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
@ -73,7 +93,7 @@ public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
@Test(timeout=60000)
public void testNotAuthenticated() throws Exception {
AuthenticatorTestCase auth = new AuthenticatorTestCase();
AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
auth.start();
try {
@ -89,7 +109,7 @@ public void testNotAuthenticated() throws Exception {
@Test(timeout=60000)
public void testAuthentication() throws Exception {
final AuthenticatorTestCase auth = new AuthenticatorTestCase();
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
AuthenticatorTestCase.setAuthenticationHandlerConfig(
getAuthenticationHandlerConfiguration());
KerberosTestUtils.doAsClient(new Callable<Void>() {
@ -103,7 +123,7 @@ public Void call() throws Exception {
@Test(timeout=60000)
public void testAuthenticationPost() throws Exception {
final AuthenticatorTestCase auth = new AuthenticatorTestCase();
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
AuthenticatorTestCase.setAuthenticationHandlerConfig(
getAuthenticationHandlerConfiguration());
KerberosTestUtils.doAsClient(new Callable<Void>() {
@ -114,4 +134,32 @@ public Void call() throws Exception {
}
});
}
@Test(timeout=60000)
public void testAuthenticationHttpClient() throws Exception {
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
AuthenticatorTestCase.setAuthenticationHandlerConfig(
getAuthenticationHandlerConfiguration());
KerberosTestUtils.doAsClient(new Callable<Void>() {
@Override
public Void call() throws Exception {
auth._testAuthenticationHttpClient(new KerberosAuthenticator(), false);
return null;
}
});
}
@Test(timeout=60000)
public void testAuthenticationHttpClientPost() throws Exception {
final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
AuthenticatorTestCase.setAuthenticationHandlerConfig(
getAuthenticationHandlerConfiguration());
KerberosTestUtils.doAsClient(new Callable<Void>() {
@Override
public Void call() throws Exception {
auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true);
return null;
}
});
}
}

View File

@ -125,6 +125,12 @@ Trunk (Unreleased)
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw)
HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel)
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -232,9 +238,6 @@ Trunk (Unreleased)
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
to RPC Server and Client classes. (Brandon Li via suresh)
HADOOP-8815. RandomDatum needs to override hashCode().
(Brandon Li via suresh)
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
required context item is not configured
(Brahma Reddy Battula via harsh)
@ -323,62 +326,16 @@ Trunk (Unreleased)
HADOOP-10996. Stop violence in the *_HOME (aw)
HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HADOOP-10734. Implement high-performance secure random number sources.
(Yi Liu via Colin Patrick McCabe)
HADOOP-10603. Crypto input and output streams implementing Hadoop stream
interfaces. (Yi Liu and Charles Lamb)
HADOOP-10628. Javadoc and few code style improvement for Crypto
input and output streams. (Yi Liu via clamb)
HADOOP-10632. Minor improvements to Crypto input and output streams.
(Yi Liu)
HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
HADOOP-10653. Add a new constructor for CryptoInputStream that
receives current position of wrapped stream. (Yi Liu)
HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
(wang via yliu)
HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
(Yi Liu via cmccabe)
HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
format. (Yi Liu)
HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
JCE if non native support. (Yi Liu)
HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
openssl versions (cmccabe)
HADOOP-10853. Refactor get instance of CryptoCodec and support create via
algorithm/mode/padding. (Yi Liu)
HADOOP-10919. Copy command should preserve raw.* namespace
extended attributes. (clamb)
HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
loaded. (umamahesh)
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@ -516,6 +473,29 @@ Release 2.6.0 - UNRELEASED
HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
HADOOP-10880. Move HTTP delegation tokens out of URL querystring to
a header. (tucu)
HADOOP-11005. Fix HTTP content type for ReconfigurationServlet.
(Lei Xu via wang)
HADOOP-10814. Update Tomcat version used by HttpFS and KMS to latest
6.x version. (rkanter via tucu)
HADOOP-10994. KeyProviderCryptoExtension should use CryptoCodec for
generation/decryption of keys. (tucu)
HADOOP-11021. Configurable replication factor in the hadoop archive
command. (Zhe Zhang via wang)
HADOOP-11030. Define a variable jackson.version instead of using constant
at multiple places. (Juan Yu via kasha)
HADOOP-10990. Add missed NFSv3 request and response classes (brandonli)
HADOOP-10863. KMS should have a blacklist for decrypting EEKs.
(asuresh via tucu)
OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
@ -568,6 +548,8 @@ Release 2.6.0 - UNRELEASED
schedules incoming calls and multiplexes outgoing calls. (Chris Li via
Arpit Agarwal)
HADOOP-10833. Remove unused cache in UserProvider. (Benoy Antony)
BUG FIXES
HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
@ -699,6 +681,68 @@ Release 2.6.0 - UNRELEASED
HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
return 0 on failure. (cnauroth)
HADOOP-8815. RandomDatum needs to override hashCode().
(Brandon Li via suresh)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HADOOP-10734. Implement high-performance secure random number sources.
(Yi Liu via Colin Patrick McCabe)
HADOOP-10603. Crypto input and output streams implementing Hadoop stream
interfaces. (Yi Liu and Charles Lamb)
HADOOP-10628. Javadoc and few code style improvement for Crypto
input and output streams. (Yi Liu via clamb)
HADOOP-10632. Minor improvements to Crypto input and output streams.
(Yi Liu)
HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
HADOOP-10653. Add a new constructor for CryptoInputStream that
receives current position of wrapped stream. (Yi Liu)
HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[].
(wang via yliu)
HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL.
(Yi Liu via cmccabe)
HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
format. (Yi Liu)
HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
JCE if non native support. (Yi Liu)
HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
openssl versions (cmccabe)
HADOOP-10853. Refactor get instance of CryptoCodec and support create via
algorithm/mode/padding. (Yi Liu)
HADOOP-10919. Copy command should preserve raw.* namespace
extended attributes. (clamb)
HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not
loaded. (umamahesh)
--
HADOOP-10911. hadoop.auth cookie after HADOOP-10710 still not proper
according to RFC2109. (gchanan via tucu)
HADOOP-11036. Add build directory to .gitignore (Tsuyoshi OZAWA via aw)
HADOOP-11012. hadoop fs -text of zero-length file causes EOFException
(Eric Payne via jlowe)
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
@ -706,11 +750,16 @@ Release 2.5.1 - UNRELEASED
NEW FEATURES
IMPROVEMENTS
HADOOP-10956. Fix create-release script to include docs and necessary txt
files. (kasha)
OPTIMIZATIONS
BUG FIXES
HADOOP-11001. Fix test-patch to work with the git repo. (kasha)
Release 2.5.0 - 2014-08-11
INCOMPATIBLE CHANGES

View File

@ -224,6 +224,10 @@
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.htrace</groupId>
<artifactId>htrace-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>

View File

@ -114,6 +114,7 @@ case ${COMMAND} in
;;
archive)
CLASS=org.apache.hadoop.tools.HadoopArchives
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
checknative)
@ -136,10 +137,12 @@ case ${COMMAND} in
;;
distch)
CLASS=org.apache.hadoop.tools.DistCh
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
distcp)
CLASS=org.apache.hadoop.tools.DistCp
hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
hadoop_add_classpath "${TOOL_PATH}"
;;
fs)
@ -168,11 +171,11 @@ case ${COMMAND} in
esac
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
export CLASSPATH
hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"

View File

@ -129,6 +129,11 @@ while [[ -z "${_hadoop_common_done}" ]]; do
hadoop_exit_with_usage 1
fi
;;
--debug)
shift
# shellcheck disable=SC2034
HADOOP_SHELL_SCRIPT_DEBUG=true
;;
--help|-help|-h|help|--h|--\?|-\?|\?)
hadoop_exit_with_usage 0
;;

View File

@ -21,6 +21,13 @@ function hadoop_error
echo "$*" 1>&2
}
function hadoop_debug
{
if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
echo "DEBUG: $*" 1>&2
fi
}
function hadoop_bootstrap_init
{
# NOTE: This function is not user replaceable.
@ -62,6 +69,7 @@ function hadoop_bootstrap_init
# defaults
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
}
function hadoop_find_confdir
@ -80,6 +88,8 @@ function hadoop_find_confdir
conf_dir="etc/hadoop"
fi
export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
}
function hadoop_exec_hadoopenv
@ -105,6 +115,7 @@ function hadoop_basic_init
# CLASSPATH initially contains $HADOOP_CONF_DIR
CLASSPATH="${HADOOP_CONF_DIR}"
hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
@ -116,19 +127,19 @@ function hadoop_basic_init
# define HADOOP_HDFS_HOME
if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
[[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_YARN_HOME
if [[ -z "${HADOOP_YARN_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
[[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
fi
# define HADOOP_MAPRED_HOME
if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
[[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
fi
@ -274,6 +285,9 @@ function hadoop_add_param
if [[ ! ${!1} =~ $2 ]] ; then
# shellcheck disable=SC2086
eval $1="'${!1} $3'"
hadoop_debug "$1 accepted $3"
else
hadoop_debug "$1 declined $3"
fi
}
@ -283,8 +297,8 @@ function hadoop_add_classpath
# $1 = directory, file, wildcard, whatever to add
# $2 = before or after, which determines where in the
# classpath this object should go. default is after
# return 0 = success
# return 1 = failure (duplicate, doesn't exist, whatever)
# return 0 = success (added or duplicate)
# return 1 = failure (doesn't exist, whatever)
# However, with classpath (& JLP), we can do dedupe
# along with some sanity checking (e.g., missing directories)
@ -295,23 +309,29 @@ function hadoop_add_classpath
if [[ $1 =~ ^.*\*$ ]]; then
local mp=$(dirname "$1")
if [[ ! -d "${mp}" ]]; then
hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
return 1
fi
# no wildcard in the middle, so check existence
# (doesn't matter *what* it is)
elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
return 1
fi
if [[ -z "${CLASSPATH}" ]]; then
CLASSPATH=$1
hadoop_debug "Initial CLASSPATH=$1"
elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
if [[ "$2" = "before" ]]; then
CLASSPATH="$1:${CLASSPATH}"
hadoop_debug "Prepend CLASSPATH: $1"
else
CLASSPATH+=:$1
hadoop_debug "Append CLASSPATH: $1"
fi
else
hadoop_debug "Dupe CLASSPATH: $1"
fi
return 0
}
@ -331,14 +351,20 @@ function hadoop_add_colonpath
if [[ -z "${!1}" ]]; then
# shellcheck disable=SC2086
eval $1="'$2'"
hadoop_debug "Initial colonpath($1): $2"
elif [[ "$3" = "before" ]]; then
# shellcheck disable=SC2086
eval $1="'$2:${!1}'"
hadoop_debug "Prepend colonpath($1): $2"
else
# shellcheck disable=SC2086
eval $1+="'$2'"
hadoop_debug "Append colonpath($1): $2"
fi
return 0
fi
hadoop_debug "Rejected colonpath($1): $2"
return 1
}
function hadoop_add_javalibpath
@ -397,6 +423,7 @@ function hadoop_add_to_classpath_hdfs
function hadoop_add_to_classpath_yarn
{
local i
#
# get all of the yarn jars+config in the path
#
@ -459,7 +486,7 @@ function hadoop_add_to_classpath_userpath
local i
local j
let c=0
if [[ -n "${HADOOP_CLASSPATH}" ]]; then
# I wonder if Java runs on VMS.
for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
@ -490,10 +517,12 @@ function hadoop_os_tricks
# examples for OS X and Linux. Vendors, replace this with your special sauce.
case ${HADOOP_OS_TYPE} in
Darwin)
if [[ -x /usr/libexec/java_home ]]; then
export JAVA_HOME="$(/usr/libexec/java_home)"
else
export JAVA_HOME=/Library/Java/Home
if [[ -z "${JAVA_HOME}" ]]; then
if [[ -x /usr/libexec/java_home ]]; then
export JAVA_HOME="$(/usr/libexec/java_home)"
else
export JAVA_HOME=/Library/Java/Home
fi
fi
;;
Linux)
@ -715,6 +744,11 @@ function hadoop_java_exec
local command=$1
local class=$2
shift 2
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
@ -727,6 +761,11 @@ function hadoop_start_daemon
local command=$1
local class=$2
shift 2
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
export CLASSPATH
#shellcheck disable=SC2086
exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
}
@ -807,6 +846,9 @@ function hadoop_start_secure_daemon
# note that shellcheck will throw a
# bogus for-our-use-case 2086 here.
# it doesn't properly support multi-line situations
hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
exec "${jsvc}" \
"-Dproc_${daemonname}" \

View File

@ -23,6 +23,7 @@ this="$bin/$script"
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
# shellcheck disable=SC2034
HADOOP_NEW_CONFIG=true
. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
@ -33,10 +34,10 @@ fi
CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
hadoop_add_param HADOOP_OPTS Xmx "$JAVA_HEAP_MAX"
hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
hadoop_finalize
export CLASSPATH
hadoop_java_exec rcc "${CLASS}" "$@"

View File

@ -200,6 +200,7 @@ private void applyChanges(PrintWriter out, Reconfigurable reconf,
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
LOG.info("GET");
resp.setContentType("text/html");
PrintWriter out = resp.getWriter();
Reconfigurable reconf = getReconfigurable(req);
@ -214,6 +215,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
LOG.info("POST");
resp.setContentType("text/html");
PrintWriter out = resp.getWriter();
Reconfigurable reconf = getReconfigurable(req);

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -48,7 +49,7 @@ public abstract class CryptoCodec implements Configurable {
*
* @param conf
* the configuration
* @param CipherSuite
* @param cipherSuite
* algorithm/mode/padding
* @return CryptoCodec the codec object. Null value will be returned if no
* crypto codec classes with cipher suite configured.
@ -66,15 +67,18 @@ public static CryptoCodec getInstance(Configuration conf,
CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
if (codec == null) {
LOG.debug("Using crypto codec {}.", klass.getName());
PerformanceAdvisory.LOG.debug("Using crypto codec {}.",
klass.getName());
codec = c;
}
} else {
LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.",
PerformanceAdvisory.LOG.debug(
"Crypto codec {} doesn't meet the cipher suite {}.",
klass.getName(), cipherSuite.getName());
}
} catch (Exception e) {
LOG.warn("Crypto codec {} is not available.", klass.getName());
PerformanceAdvisory.LOG.debug("Crypto codec {} is not available.",
klass.getName());
}
}
@ -108,7 +112,8 @@ private static List<Class<? extends CryptoCodec>> getCodecClasses(
cipherSuite.getConfigSuffix();
String codecString = conf.get(configName);
if (codecString == null) {
LOG.warn("No crypto codec classes with cipher suite configured.");
PerformanceAdvisory.LOG.debug(
"No crypto codec classes with cipher suite configured.");
return null;
}
for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
@ -117,9 +122,9 @@ private static List<Class<? extends CryptoCodec>> getCodecClasses(
Class<?> cls = conf.getClassByName(c);
result.add(cls.asSubclass(CryptoCodec.class));
} catch (ClassCastException e) {
LOG.warn("Class " + c + " is not a CryptoCodec.");
PerformanceAdvisory.LOG.debug("Class {} is not a CryptoCodec.", c);
} catch (ClassNotFoundException e) {
LOG.warn("Crypto codec " + c + " not found.");
PerformanceAdvisory.LOG.debug("Crypto codec {} not found.", c);
}
}

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
/**
* OpenSSL cipher using JNI.
@ -82,6 +83,7 @@ static int get(String padding) throws NoSuchPaddingException {
String loadingFailure = null;
try {
if (!NativeCodeLoader.buildSupportsOpenssl()) {
PerformanceAdvisory.LOG.debug("Build does not support openssl");
loadingFailure = "build does not support openssl.";
} else {
initIDs();

View File

@ -108,6 +108,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
super(conf);
this.uri = uri;
path = ProviderUtils.unnestUri(uri);
fs = path.getFileSystem(conf);

View File

@ -56,6 +56,8 @@ public abstract class KeyProvider {
"hadoop.security.key.default.bitlength";
public static final int DEFAULT_BITLENGTH = 128;
private final Configuration conf;
/**
* The combination of both the key version name and the key material.
*/
@ -353,6 +355,24 @@ public String toString() {
}
}
/**
* Constructor.
*
* @param conf configuration for the provider
*/
public KeyProvider(Configuration conf) {
this.conf = new Configuration(conf);
}
/**
* Return the provider configuration.
*
* @return the provider configuration
*/
public Configuration getConf() {
return conf;
}
/**
* A helper function to create an options object.
* @param conf the configuration to use

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
@ -29,6 +30,9 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.Decryptor;
import org.apache.hadoop.crypto.Encryptor;
/**
* A KeyProvider with Cryptographic Extensions specifically for generating
@ -239,18 +243,25 @@ public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
Preconditions.checkNotNull(encryptionKey,
"No KeyVersion exists for key '%s' ", encryptionKeyName);
// Generate random bytes for new key and IV
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
final byte[] newKey = new byte[encryptionKey.getMaterial().length];
RANDOM.get().nextBytes(newKey);
final byte[] iv = new byte[cipher.getBlockSize()];
RANDOM.get().nextBytes(iv);
cc.generateSecureRandom(newKey);
final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
cc.generateSecureRandom(iv);
// Encryption key IV is derived from new key's IV
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
// Encrypt the new key
cipher.init(Cipher.ENCRYPT_MODE,
new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
new IvParameterSpec(encryptionIV));
final byte[] encryptedKey = cipher.doFinal(newKey);
Encryptor encryptor = cc.createEncryptor();
encryptor.init(encryptionKey.getMaterial(), encryptionIV);
int keyLen = newKey.length;
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
bbIn.put(newKey);
bbIn.flip();
encryptor.encrypt(bbIn, bbOut);
bbOut.flip();
byte[] encryptedKey = new byte[keyLen];
bbOut.get(encryptedKey);
return new EncryptedKeyVersion(encryptionKeyName,
encryptionKey.getVersionName(), iv,
new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
@ -274,19 +285,25 @@ public KeyVersion decryptEncryptedKey(
KeyProviderCryptoExtension.EEK,
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
);
final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
// Encryption key IV is determined from encrypted key's IV
final byte[] encryptionIV =
EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
// Init the cipher with encryption key parameters
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
cipher.init(Cipher.DECRYPT_MODE,
new SecretKeySpec(encryptionKeyMaterial, "AES"),
new IvParameterSpec(encryptionIV));
// Decrypt the encrypted key
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
Decryptor decryptor = cc.createDecryptor();
decryptor.init(encryptionKey.getMaterial(), encryptionIV);
final KeyVersion encryptedKV =
encryptedKeyVersion.getEncryptedKeyVersion();
final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial());
int keyLen = encryptedKV.getMaterial().length;
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
bbIn.put(encryptedKV.getMaterial());
bbIn.flip();
decryptor.decrypt(bbIn, bbOut);
bbOut.flip();
byte[] decryptedKey = new byte[keyLen];
bbOut.get(decryptedKey);
return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
}

View File

@ -40,6 +40,7 @@ public static interface Extension {
private E extension;
public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
super(keyProvider.getConf());
this.keyProvider = keyProvider;
this.extension = extensions;
}

View File

@ -44,7 +44,8 @@ public class UserProvider extends KeyProvider {
private final Credentials credentials;
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
private UserProvider() throws IOException {
private UserProvider(Configuration conf) throws IOException {
super(conf);
user = UserGroupInformation.getCurrentUser();
credentials = user.getCredentials();
}
@ -145,7 +146,7 @@ public static class Factory extends KeyProviderFactory {
public KeyProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new UserProvider();
return new UserProvider(conf);
}
return null;
}

View File

@ -283,6 +283,7 @@ public HttpURLConnection configure(HttpURLConnection conn)
}
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
super(conf);
Path path = ProviderUtils.unnestUri(uri);
URL url = path.toUri().toURL();
kmsUrl = createServiceURL(url);

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
/**
* OpenSSL secure random using JNI.
@ -67,6 +68,8 @@ public static boolean isNativeCodeLoaded() {
public OpensslSecureRandom() {
if (!nativeEnabled) {
PerformanceAdvisory.LOG.debug("Build does not support openssl, " +
"falling back to Java SecureRandom.");
fallback = new java.security.SecureRandom();
}
}

View File

@ -381,7 +381,8 @@ public ChecksumFSOutputSummer(ChecksumFileSystem fs,
long blockSize,
Progressable progress)
throws IOException {
super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize,
replication, blockSize, progress);
@ -405,10 +406,11 @@ public void close() throws IOException {
}
@Override
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
sums.write(checksum);
sums.write(checksum, ckoff, cklen);
}
@Override

View File

@ -337,7 +337,8 @@ public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file,
final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws IOException {
super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
// checksumOpt is passed down to the raw fs. Unless it implements
// checksum impelemts internally, checksumOpt will be ignored.
@ -370,10 +371,11 @@ public void close() throws IOException {
}
@Override
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
sums.write(checksum);
sums.write(checksum, ckoff, cklen);
}
@Override

View File

@ -18,13 +18,14 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.DataChecksum;
import java.io.IOException;
import java.io.OutputStream;
import java.util.zip.Checksum;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This is a generic output stream for generating checksums for
* data before it is written to the underlying stream
@ -33,7 +34,7 @@
@InterfaceStability.Unstable
abstract public class FSOutputSummer extends OutputStream {
// data checksum
private Checksum sum;
private final DataChecksum sum;
// internal buffer for storing data before it is checksumed
private byte buf[];
// internal buffer for storing checksum
@ -41,18 +42,24 @@ abstract public class FSOutputSummer extends OutputStream {
// The number of valid bytes in the buffer.
private int count;
protected FSOutputSummer(Checksum sum, int maxChunkSize, int checksumSize) {
// We want this value to be a multiple of 3 because the native code checksums
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
// limiting the number of JNI calls and flushing to the underlying stream
// relatively frequently.
private static final int BUFFER_NUM_CHUNKS = 9;
protected FSOutputSummer(DataChecksum sum) {
this.sum = sum;
this.buf = new byte[maxChunkSize];
this.checksum = new byte[checksumSize];
this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS];
this.count = 0;
}
/* write the data chunk in <code>b</code> staring at <code>offset</code> with
* a length of <code>len</code>, and its checksum
* a length of <code>len > 0</code>, and its checksum
*/
protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksum)
throws IOException;
protected abstract void writeChunk(byte[] b, int bOffset, int bLen,
byte[] checksum, int checksumOffset, int checksumLen) throws IOException;
/**
* Check if the implementing OutputStream is closed and should no longer
@ -66,7 +73,6 @@ protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksu
/** Write one byte */
@Override
public synchronized void write(int b) throws IOException {
sum.update(b);
buf[count++] = (byte)b;
if(count == buf.length) {
flushBuffer();
@ -111,18 +117,17 @@ public synchronized void write(byte b[], int off, int len)
*/
private int write1(byte b[], int off, int len) throws IOException {
if(count==0 && len>=buf.length) {
// local buffer is empty and user data has one chunk
// checksum and output data
// local buffer is empty and user buffer size >= local buffer size, so
// simply checksum the user buffer and send it directly to the underlying
// stream
final int length = buf.length;
sum.update(b, off, length);
writeChecksumChunk(b, off, length, false);
writeChecksumChunks(b, off, length);
return length;
}
// copy user data to local buffer
int bytesToCopy = buf.length-count;
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
sum.update(b, off, bytesToCopy);
System.arraycopy(b, off, buf, count, bytesToCopy);
count += bytesToCopy;
if (count == buf.length) {
@ -136,22 +141,45 @@ private int write1(byte b[], int off, int len) throws IOException {
* the underlying output stream.
*/
protected synchronized void flushBuffer() throws IOException {
flushBuffer(false);
flushBuffer(false, true);
}
/* Forces any buffered output bytes to be checksumed and written out to
* the underlying output stream. If keep is true, then the state of
* this object remains intact.
/* Forces buffered output bytes to be checksummed and written out to
* the underlying output stream. If there is a trailing partial chunk in the
* buffer,
* 1) flushPartial tells us whether to flush that chunk
* 2) if flushPartial is true, keep tells us whether to keep that chunk in the
* buffer (if flushPartial is false, it is always kept in the buffer)
*
* Returns the number of bytes that were flushed but are still left in the
* buffer (can only be non-zero if keep is true).
*/
protected synchronized void flushBuffer(boolean keep) throws IOException {
if (count != 0) {
int chunkLen = count;
protected synchronized int flushBuffer(boolean keep,
boolean flushPartial) throws IOException {
int bufLen = count;
int partialLen = bufLen % sum.getBytesPerChecksum();
int lenToFlush = flushPartial ? bufLen : bufLen - partialLen;
if (lenToFlush != 0) {
writeChecksumChunks(buf, 0, lenToFlush);
if (!flushPartial || keep) {
count = partialLen;
System.arraycopy(buf, bufLen - count, buf, 0, count);
} else {
count = 0;
writeChecksumChunk(buf, 0, chunkLen, keep);
if (keep) {
count = chunkLen;
}
}
// total bytes left minus unflushed bytes left
return count - (bufLen - lenToFlush);
}
/**
* Checksums all complete data chunks and flushes them to the underlying
* stream. If there is a trailing partial chunk, it is not flushed and is
* maintained in the buffer.
*/
public void flush() throws IOException {
flushBuffer(false, false);
}
/**
@ -161,18 +189,18 @@ protected synchronized int getBufferedDataSize() {
return count;
}
/** Generate checksum for the data chunk and output data chunk & checksum
* to the underlying output stream. If keep is true then keep the
* current checksum intact, do not reset it.
/** Generate checksums for the given data chunks and output chunks & checksums
* to the underlying output stream.
*/
private void writeChecksumChunk(byte b[], int off, int len, boolean keep)
private void writeChecksumChunks(byte b[], int off, int len)
throws IOException {
int tempChecksum = (int)sum.getValue();
if (!keep) {
sum.reset();
sum.calculateChunkedSums(b, off, len, checksum, 0);
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * sum.getChecksumSize();
writeChunk(b, off + i, chunkLen, checksum, ckOffset,
sum.getChecksumSize());
}
int2byte(tempChecksum, checksum);
writeChunk(b, off, len, checksum);
}
/**
@ -196,9 +224,14 @@ static byte[] int2byte(int integer, byte[] bytes) {
/**
* Resets existing buffer with a new one of the specified size.
*/
protected synchronized void resetChecksumChunk(int size) {
sum.reset();
protected synchronized void setChecksumBufSize(int size) {
this.buf = new byte[size];
this.checksum = new byte[((size - 1) / sum.getBytesPerChecksum() + 1) *
sum.getChecksumSize()];
this.count = 0;
}
protected synchronized void resetChecksumBufSize() {
setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS);
}
}

View File

@ -232,6 +232,10 @@ public FileStatus[] glob() throws IOException {
}
}
for (FileStatus child : children) {
if (componentIdx < components.size() - 1) {
// Don't try to recurse into non-directories. See HADOOP-10957.
if (!child.isDirectory()) continue;
}
// Set the child path based on the parent path.
child.setPath(new Path(candidate.getPath(),
child.getPath().getName()));
@ -249,8 +253,8 @@ public FileStatus[] glob() throws IOException {
new Path(candidate.getPath(), component));
if (childStatus != null) {
newCandidates.add(childStatus);
}
}
}
}
}
candidates = newCandidates;
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.fs.shell;
import java.io.ByteArrayOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
@ -126,8 +127,17 @@ public static class Text extends Cat {
protected InputStream getInputStream(PathData item) throws IOException {
FSDataInputStream i = (FSDataInputStream)super.getInputStream(item);
// Handle 0 and 1-byte files
short leadBytes;
try {
leadBytes = i.readShort();
} catch (EOFException e) {
i.seek(0);
return i;
}
// Check type of stream first
switch(i.readShort()) {
switch(leadBytes) {
case 0x1f8b: { // RFC 1952
// Must be gzip
i.seek(0);

View File

@ -44,6 +44,7 @@
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
@ -415,6 +416,17 @@ private void addManagedListener(Connector connector) {
private static WebAppContext createWebAppContext(String name,
Configuration conf, AccessControlList adminsAcl, final String appDir) {
WebAppContext ctx = new WebAppContext();
ctx.setDefaultsDescriptor(null);
ServletHolder holder = new ServletHolder(new DefaultServlet());
Map<String, String> params = ImmutableMap. <String, String> builder()
.put("acceptRanges", "true")
.put("dirAllowed", "false")
.put("gzip", "true")
.put("useFileMappedBuffer", "true")
.build();
holder.setInitParameters(params);
ctx.setWelcomeFiles(new String[] {"index.html"});
ctx.addServlet(holder, "/");
ctx.setDisplayName(name);
ctx.setContextPath("/");
ctx.setWar(appDir + "/" + name);

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -196,7 +197,7 @@ public boolean verifyCanMlock() {
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
LOG.error("Unable to initialize NativeIO libraries", t);
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
@ -574,7 +575,7 @@ public static boolean access(String path, AccessRight desiredAccess)
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
LOG.error("Unable to initialize NativeIO libraries", t);
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}
@ -593,7 +594,7 @@ public static boolean access(String path, AccessRight desiredAccess)
// This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO
// after warning
LOG.error("Unable to initialize NativeIO libraries", t);
PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
}
}
}

View File

@ -88,6 +88,7 @@
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.htrace.Trace;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -694,6 +695,9 @@ private synchronized void setupIOstreams() {
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to "+server);
}
if (Trace.isTracing()) {
Trace.addTimelineAnnotation("IPC client connecting to " + server);
}
short numRetries = 0;
Random rand = null;
while (true) {
@ -758,6 +762,10 @@ public AuthMethod run()
// update last activity time
touch();
if (Trace.isTracing()) {
Trace.addTimelineAnnotation("IPC client connected to " + server);
}
// start the receiver thread after the socket connection has been set
// up
start();

View File

@ -48,6 +48,9 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.Time;
import org.htrace.Sampler;
import org.htrace.Trace;
import org.htrace.TraceScope;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
@ -191,6 +194,16 @@ public Object invoke(Object proxy, Method method, Object[] args)
+ method.getName() + "]");
}
TraceScope traceScope = null;
// if Tracing is on then start a new span for this rpc.
// guard it in the if statement to make sure there isn't
// any extra string manipulation.
if (Trace.isTracing()) {
traceScope = Trace.startSpan(
method.getDeclaringClass().getCanonicalName() +
"." + method.getName());
}
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
if (LOG.isTraceEnabled()) {
@ -212,8 +225,13 @@ public Object invoke(Object proxy, Method method, Object[] args)
remoteId + ": " + method.getName() +
" {" + e + "}");
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation(
"Call got exception: " + e.getMessage());
}
throw new ServiceException(e);
} finally {
if (traceScope != null) traceScope.close();
}
if (LOG.isDebugEnabled()) {

View File

@ -79,6 +79,7 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
@ -115,6 +116,10 @@
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.htrace.Span;
import org.htrace.Trace;
import org.htrace.TraceInfo;
import org.htrace.TraceScope;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
@ -506,6 +511,7 @@ public static class Call implements Schedulable {
private ByteBuffer rpcResponse; // the response for this call
private final RPC.RpcKind rpcKind;
private final byte[] clientId;
private final Span traceSpan; // the tracing span on the server side
public Call(int id, int retryCount, Writable param,
Connection connection) {
@ -515,6 +521,11 @@ public Call(int id, int retryCount, Writable param,
public Call(int id, int retryCount, Writable param, Connection connection,
RPC.RpcKind kind, byte[] clientId) {
this(id, retryCount, param, connection, kind, clientId, null);
}
public Call(int id, int retryCount, Writable param, Connection connection,
RPC.RpcKind kind, byte[] clientId, Span span) {
this.callId = id;
this.retryCount = retryCount;
this.rpcRequest = param;
@ -523,6 +534,7 @@ public Call(int id, int retryCount, Writable param, Connection connection,
this.rpcResponse = null;
this.rpcKind = kind;
this.clientId = clientId;
this.traceSpan = span;
}
@Override
@ -1921,9 +1933,18 @@ private void processRpcRequest(RpcRequestHeaderProto header,
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
}
Span traceSpan = null;
if (header.hasTraceInfo()) {
// If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
}
Call call = new Call(header.getCallId(), header.getRetryCount(),
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()), header
.getClientId().toByteArray());
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
header.getClientId().toByteArray(), traceSpan);
callQueue.put(call); // queue the call; maybe blocked here
incRpcCount(); // Increment the rpc count
}
@ -2067,6 +2088,7 @@ public void run() {
ByteArrayOutputStream buf =
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
while (running) {
TraceScope traceScope = null;
try {
final Call call = callQueue.take(); // pop the queue; maybe blocked here
if (LOG.isDebugEnabled()) {
@ -2083,6 +2105,10 @@ public void run() {
Writable value = null;
CurCall.set(call);
if (call.traceSpan != null) {
traceScope = Trace.continueSpan(call.traceSpan);
}
try {
// Make the call as the user via Subject.doAs, thus associating
// the call with the Subject
@ -2156,9 +2182,22 @@ public Writable run() throws Exception {
} catch (InterruptedException e) {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
StringUtils.stringifyException(e));
}
}
} catch (Exception e) {
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("Exception: " +
StringUtils.stringifyException(e));
}
} finally {
if (traceScope != null) {
traceScope.close();
}
IOUtils.cleanup(LOG, traceScope);
}
}
LOG.debug(Thread.currentThread().getName() + ": exiting");

View File

@ -41,6 +41,8 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.htrace.Trace;
import org.htrace.TraceScope;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
@ -227,9 +229,19 @@ public Object invoke(Object proxy, Method method, Object[] args)
if (LOG.isDebugEnabled()) {
startTime = Time.now();
}
ObjectWritable value = (ObjectWritable)
client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
TraceScope traceScope = null;
if (Trace.isTracing()) {
traceScope = Trace.startSpan(
method.getDeclaringClass().getCanonicalName() +
"." + method.getName());
}
ObjectWritable value;
try {
value = (ObjectWritable)
client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
} finally {
if (traceScope != null) traceScope.close();
}
if (LOG.isDebugEnabled()) {
long callTime = Time.now() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);

View File

@ -24,6 +24,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.PerformanceAdvisory;
public class JniBasedUnixGroupsMappingWithFallback implements
GroupMappingServiceProvider {
@ -37,7 +38,7 @@ public JniBasedUnixGroupsMappingWithFallback() {
if (NativeCodeLoader.isNativeCodeLoaded()) {
this.impl = new JniBasedUnixGroupsMapping();
} else {
LOG.debug("Falling back to shell based");
PerformanceAdvisory.LOG.debug("Falling back to shell based");
this.impl = new ShellBasedUnixGroupsMapping();
}
if (LOG.isDebugEnabled()){

View File

@ -21,9 +21,7 @@
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -41,8 +39,6 @@ public class UserProvider extends CredentialProvider {
public static final String SCHEME_NAME = "user";
private final UserGroupInformation user;
private final Credentials credentials;
private final Map<String, CredentialEntry> cache = new HashMap<String,
CredentialEntry>();
private UserProvider() throws IOException {
user = UserGroupInformation.getCurrentUser();
@ -86,7 +82,6 @@ public synchronized void deleteCredentialEntry(String name) throws IOException {
throw new IOException("Credential " + name +
" does not exist in " + this);
}
cache.remove(name);
}
@Override

View File

@ -221,7 +221,13 @@ Collection<String> getGroups() {
return groups;
}
public boolean isUserAllowed(UserGroupInformation ugi) {
/**
* Checks if a user represented by the provided {@link UserGroupInformation}
* is a member of the Access Control List
* @param ugi UserGroupInformation to check if contained in the ACL
* @return true if ugi is member of the list
*/
public final boolean isUserInList(UserGroupInformation ugi) {
if (allAllowed || users.contains(ugi.getShortUserName())) {
return true;
} else {
@ -234,6 +240,10 @@ public boolean isUserAllowed(UserGroupInformation ugi) {
return false;
}
public boolean isUserAllowed(UserGroupInformation ugi) {
return isUserInList(ugi);
}
/**
* Returns descriptive way of users and groups that are part of this ACL.
* Use {@link #getAclString()} to get the exact String that can be given to

View File

@ -125,6 +125,8 @@ public static void setDefaultDelegationTokenAuthenticator(
}
}
private boolean useQueryStringforDelegationToken = false;
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
@ -170,6 +172,34 @@ public DelegationTokenAuthenticatedURL(
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
}
/**
* Sets if delegation token should be transmitted in the URL query string.
* By default it is transmitted using the
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
* <p/>
* This method is provided to enable WebHDFS backwards compatibility.
*
* @param useQueryString <code>TRUE</code> if the token is transmitted in the
* URL query string, <code>FALSE</code> if the delegation token is transmitted
* using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
* header.
*/
@Deprecated
protected void setUseQueryStringForDelegationToken(boolean useQueryString) {
useQueryStringforDelegationToken = useQueryString;
}
/**
* Returns if delegation token is transmitted as a HTTP header.
*
* @return <code>TRUE</code> if the token is transmitted in the URL query
* string, <code>FALSE</code> if the delegation token is transmitted using the
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
*/
public boolean useQueryStringForDelegationToken() {
return useQueryStringforDelegationToken;
}
/**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
@ -235,23 +265,41 @@ private URL augmentURL(URL url, Map<String, String> params)
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@SuppressWarnings("unchecked")
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
creds.getToken(service);
if (dt != null) {
extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dt.encodeToUrlString());
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
= null;
// if we have valid auth token, it takes precedence over a delegation token
// and we don't even look for one.
if (!token.isSet()) {
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().
getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
dToken = creds.getToken(service);
if (dToken != null) {
if (useQueryStringForDelegationToken()) {
// delegation token will go in the query string, injecting it
extraParams.put(
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dToken.encodeToUrlString());
} else {
// delegation token will go as request header, setting it in the
// auth-token to ensure no authentication handshake is triggered
// (if we have a delegation token, we are authenticated)
// the delegation token header is injected in the connection request
// at the end of this method.
token.delegationToken = (org.apache.hadoop.security.token.Token
<AbstractDelegationTokenIdentifier>) dToken;
}
}
}
}
@ -261,7 +309,14 @@ public HttpURLConnection openConnection(URL url, Token token, String doAs)
}
url = augmentURL(url, extraParams);
return super.openConnection(url, token);
HttpURLConnection conn = super.openConnection(url, token);
if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
// injecting the delegation token header in the connection request
conn.setRequestProperty(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
dToken.encodeToUrlString());
}
return conn;
}
/**

View File

@ -331,8 +331,7 @@ public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
String delegationParam = getDelegationToken(request);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
@ -356,4 +355,15 @@ public AuthenticationToken authenticate(HttpServletRequest request,
return token;
}
private String getDelegationToken(HttpServletRequest request)
throws IOException {
String dToken = request.getHeader(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
if (dToken == null) {
dToken = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
}
return dToken;
}
}

View File

@ -56,6 +56,9 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
public static final String OP_PARAM = "op";
public static final String DELEGATION_TOKEN_HEADER =
"X-Hadoop-Delegation-Token";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
@ -101,15 +104,23 @@ public void setConnectionConfigurator(ConnectionConfigurator configurator) {
authenticator.setConnectionConfigurator(configurator);
}
private boolean hasDelegationToken(URL url) {
String queryStr = url.getQuery();
return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) {
boolean hasDt = false;
if (token instanceof DelegationTokenAuthenticatedURL.Token) {
hasDt = ((DelegationTokenAuthenticatedURL.Token) token).
getDelegationToken() != null;
}
if (!hasDt) {
String queryStr = url.getQuery();
hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
}
return hasDt;
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
if (!hasDelegationToken(url, token)) {
authenticator.authenticate(url, token);
}
}

View File

@ -0,0 +1,153 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.htrace.HTraceConfiguration;
import org.htrace.SpanReceiver;
import org.htrace.Trace;
/**
* This class provides functions for reading the names of SpanReceivers from
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
* and closing those SpanReceivers when appropriate.
* This class does nothing If no SpanReceiver is configured.
*/
@InterfaceAudience.Private
public class SpanReceiverHost {
public static final String SPAN_RECEIVERS_CONF_KEY = "hadoop.trace.spanreceiver.classes";
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
private Collection<SpanReceiver> receivers = new HashSet<SpanReceiver>();
private boolean closed = false;
private static enum SingletonHolder {
INSTANCE;
Object lock = new Object();
SpanReceiverHost host = null;
}
public static SpanReceiverHost getInstance(Configuration conf) {
if (SingletonHolder.INSTANCE.host != null) {
return SingletonHolder.INSTANCE.host;
}
synchronized (SingletonHolder.INSTANCE.lock) {
if (SingletonHolder.INSTANCE.host != null) {
return SingletonHolder.INSTANCE.host;
}
SpanReceiverHost host = new SpanReceiverHost();
host.loadSpanReceivers(conf);
SingletonHolder.INSTANCE.host = host;
ShutdownHookManager.get().addShutdownHook(new Runnable() {
public void run() {
SingletonHolder.INSTANCE.host.closeReceivers();
}
}, 0);
return SingletonHolder.INSTANCE.host;
}
}
/**
* Reads the names of classes specified in the
* "hadoop.trace.spanreceiver.classes" property and instantiates and registers
* them with the Tracer as SpanReceiver's.
*
* The nullary constructor is called during construction, but if the classes
* specified implement the Configurable interface, setConfiguration() will be
* called on them. This allows SpanReceivers to use values from the Hadoop
* configuration.
*/
public void loadSpanReceivers(Configuration conf) {
Class<?> implClass = null;
String[] receiverNames = conf.getTrimmedStrings(SPAN_RECEIVERS_CONF_KEY);
if (receiverNames == null || receiverNames.length == 0) {
return;
}
for (String className : receiverNames) {
className = className.trim();
try {
implClass = Class.forName(className);
receivers.add(loadInstance(implClass, conf));
LOG.info("SpanReceiver " + className + " was loaded successfully.");
} catch (ClassNotFoundException e) {
LOG.warn("Class " + className + " cannot be found.", e);
} catch (IOException e) {
LOG.warn("Load SpanReceiver " + className + " failed.", e);
}
}
for (SpanReceiver rcvr : receivers) {
Trace.addReceiver(rcvr);
}
}
private SpanReceiver loadInstance(Class<?> implClass, Configuration conf)
throws IOException {
SpanReceiver impl;
try {
Object o = ReflectionUtils.newInstance(implClass, conf);
impl = (SpanReceiver)o;
impl.configure(wrapHadoopConf(conf));
} catch (SecurityException e) {
throw new IOException(e);
} catch (IllegalArgumentException e) {
throw new IOException(e);
} catch (RuntimeException e) {
throw new IOException(e);
}
return impl;
}
private static HTraceConfiguration wrapHadoopConf(final Configuration conf) {
return new HTraceConfiguration() {
public static final String HTRACE_CONF_PREFIX = "hadoop.";
@Override
public String get(String key) {
return conf.get(HTRACE_CONF_PREFIX + key);
}
@Override
public String get(String key, String defaultValue) {
return conf.get(HTRACE_CONF_PREFIX + key, defaultValue);
}
};
}
/**
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
*/
public synchronized void closeReceivers() {
if (closed) return;
closed = true;
for (SpanReceiver rcvr : receivers) {
try {
rcvr.close();
} catch (IOException e) {
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
}
}
}
}

View File

@ -339,6 +339,7 @@ private void verifyChunkedSums(
byte[] data, int dataOff, int dataLen,
byte[] checksums, int checksumsOff, String fileName,
long basePos) throws ChecksumException {
if (type.size == 0) return;
if (NativeCrc32.isAvailable()) {
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id,
@ -421,6 +422,7 @@ public void calculateChunkedSums(ByteBuffer data, ByteBuffer checksums) {
public void calculateChunkedSums(
byte[] data, int dataOffset, int dataLength,
byte[] sums, int sumsOffset) {
if (type.size == 0) return;
if (NativeCrc32.isAvailable()) {
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id,

View File

@ -42,7 +42,7 @@ public static boolean isAvailable() {
* modified.
*
* @param bytesPerSum the chunk size (eg 512 bytes)
* @param checksumType the DataChecksum type constant
* @param checksumType the DataChecksum type constant (NULL is not supported)
* @param sums the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param data the DirectByteBuffer pointing at the beginning of the

View File

@ -0,0 +1,25 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class PerformanceAdvisory {
public static final Logger LOG =
LoggerFactory.getLogger(PerformanceAdvisory.class);
}

View File

@ -27,6 +27,8 @@
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
import org.htrace.Span;
import org.htrace.Trace;
import com.google.protobuf.ByteString;
@ -165,6 +167,15 @@ public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
// Add tracing info if we are currently tracing.
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
.setParentId(s.getSpanId())
.setTraceId(s.getTraceId()).build());
}
return result.build();
}
}

View File

@ -170,7 +170,8 @@ public static String getProtocVersion(){
public static void main(String[] args) {
LOG.debug("version: "+ getVersion());
System.out.println("Hadoop " + getVersion());
System.out.println("Subversion " + getUrl() + " -r " + getRevision());
System.out.println("Source code repository " + getUrl() + " -r " +
getRevision());
System.out.println("Compiled by " + getUser() + " on " + getDate());
System.out.println("Compiled with protoc " + getProtocVersion());
System.out.println("From source with checksum " + getSrcChecksum());

View File

@ -1,10 +1,11 @@
Package: libhadoop
Authors: Arun C Murthy <arunc@yahoo-inc.com>
MOTIVATION
The libhadoop package contains the native code for any of hadoop (http://hadoop.apache.org/core).
The libhadoop package contains the native code for Apache Hadoop (http://hadoop.apache.org/).
IMPROVEMENTS
Any suggestions for improvements or patched should be sent to core-dev@hadoop.apache.org. Please go through http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute.
Any suggestions for improvements or patched should be sent to common-dev@hadoop.apache.org.
Please see http://wiki.apache.org/hadoop/HowToContribute for more information on how to contribute.

View File

@ -53,6 +53,18 @@ enum RpcKindProto {
/**
* Used to pass through the information necessary to continue
* a trace after an RPC is made. All we need is the traceid
* (so we know the overarching trace this message is a part of), and
* the id of the current span when this message was sent, so we know
* what span caused the new span we will create when this message is received.
*/
message RPCTraceInfoProto {
optional int64 traceId = 1;
optional int64 parentId = 2;
}
message RpcRequestHeaderProto { // the header for the RpcRequest
enum OperationProto {
RPC_FINAL_PACKET = 0; // The final RPC Packet
@ -67,6 +79,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
// clientId + callId uniquely identifies a request
// retry count, 1 means this is the first retry
optional sint32 retryCount = 5 [default = -1];
optional RPCTraceInfoProto traceInfo = 6; // tracing info
}

View File

@ -0,0 +1,169 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License. See accompanying LICENSE file.
---
Hadoop Distributed File System-${project.version} - Enabling Dapper-like Tracing
---
---
${maven.build.timestamp}
Enabling Dapper-like Tracing in HDFS
%{toc|section=1|fromDepth=0}
* {Dapper-like Tracing in HDFS}
** HTrace
{{{https://issues.apache.org/jira/browse/HDFS-5274}HDFS-5274}}
added support for tracing requests through HDFS,
using the open source tracing library, {{{https://github.com/cloudera/htrace}HTrace}}.
Setting up tracing is quite simple, however it requires some very minor changes to your client code.
** SpanReceivers
The tracing system works by collecting information in structs called 'Spans'.
It is up to you to choose how you want to receive this information
by implementing the SpanReceiver interface, which defines one method:
+----
public void receiveSpan(Span span);
+----
Configure what SpanReceivers you'd like to use
by putting a comma separated list of the fully-qualified class name of
classes implementing SpanReceiver
in <<<hdfs-site.xml>>> property: <<<hadoop.trace.spanreceiver.classes>>>.
+----
<property>
<name>hadoop.trace.spanreceiver.classes</name>
<value>org.htrace.impl.LocalFileSpanReceiver</value>
</property>
<property>
<name>hadoop.local-file-span-receiver.path</name>
<value>/var/log/hadoop/htrace.out</value>
</property>
+----
** Setting up ZipkinSpanReceiver
Instead of implementing SpanReceiver by yourself,
you can use <<<ZipkinSpanReceiver>>> which uses
{{{https://github.com/twitter/zipkin}Zipkin}}
for collecting and dispalying tracing data.
In order to use <<<ZipkinSpanReceiver>>>,
you need to download and setup {{{https://github.com/twitter/zipkin}Zipkin}} first.
you also need to add the jar of <<<htrace-zipkin>>> to the classpath of Hadoop on each node.
Here is example setup procedure.
+----
$ git clone https://github.com/cloudera/htrace
$ cd htrace/htrace-zipkin
$ mvn compile assembly:single
$ cp target/htrace-zipkin-*-jar-with-dependencies.jar $HADOOP_HOME/share/hadoop/hdfs/lib/
+----
The sample configuration for <<<ZipkinSpanReceiver>>> is shown below.
By adding these to <<<hdfs-site.xml>>> of NameNode and DataNodes,
<<<ZipkinSpanReceiver>>> is initialized on the startup.
You also need this configuration on the client node in addition to the servers.
+----
<property>
<name>hadoop.trace.spanreceiver.classes</name>
<value>org.htrace.impl.ZipkinSpanReceiver</value>
</property>
<property>
<name>hadoop.zipkin.collector-hostname</name>
<value>192.168.1.2</value>
</property>
<property>
<name>hadoop.zipkin.collector-port</name>
<value>9410</value>
</property>
+----
** Turning on tracing by HTrace API
In order to turn on Dapper-like tracing,
you will need to wrap the traced logic with <<tracing span>> as shown below.
When there is running tracing spans,
the tracing information is propagated to servers along with RPC requests.
In addition, you need to initialize <<<SpanReceiver>>> once per process.
+----
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.htrace.Sampler;
import org.htrace.Trace;
import org.htrace.TraceScope;
...
SpanReceiverHost.getInstance(new HdfsConfiguration());
...
TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
try {
... // traced logic
} finally {
if (ts != null) ts.close();
}
+----
** Sample code for tracing
The <<<TracingFsShell.java>>> shown below is the wrapper of FsShell
which start tracing span before invoking HDFS shell command.
+----
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.util.ToolRunner;
import org.htrace.Sampler;
import org.htrace.Trace;
import org.htrace.TraceScope;
public class TracingFsShell {
public static void main(String argv[]) throws Exception {
Configuration conf = new Configuration();
FsShell shell = new FsShell();
conf.setQuietMode(false);
shell.setConf(conf);
int res = 0;
SpanReceiverHost.init(new HdfsConfiguration());
TraceScope ts = null;
try {
ts = Trace.startSpan("FsShell", Sampler.ALWAYS);
res = ToolRunner.run(shell, argv);
} finally {
shell.close();
if (ts != null) ts.close();
}
System.exit(res);
}
}
+----
You can compile and execute this code as shown below.
+----
$ javac -cp `hadoop classpath` TracingFsShell.java
$ HADOOP_CLASSPATH=. hdfs TracingFsShell -put sample.txt /tmp/
+----

View File

@ -19,6 +19,7 @@
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.junit.Assert;
import org.junit.Test;
@ -32,6 +33,7 @@ public void testCurrentKey() throws Exception {
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
@ -58,6 +60,7 @@ public void testKeyVersion() throws Exception {
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
@ -88,6 +91,7 @@ public void testMetadata() throws Exception {
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
@ -112,6 +116,7 @@ public void testRollNewVersion() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
@ -134,6 +139,7 @@ public void testDeleteKey() throws Exception {
.thenReturn(mockKey);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));

View File

@ -159,6 +159,10 @@ private static class MyKeyProvider extends KeyProvider {
private int size;
private byte[] material;
public MyKeyProvider(Configuration conf) {
super(conf);
}
@Override
public KeyVersion getKeyVersion(String versionName)
throws IOException {
@ -216,7 +220,7 @@ protected byte[] generateKey(int size, String algorithm)
@Test
public void testMaterialGeneration() throws Exception {
MyKeyProvider kp = new MyKeyProvider();
MyKeyProvider kp = new MyKeyProvider(new Configuration());
KeyProvider.Options options = new KeyProvider.Options(new Configuration());
options.setCipher(CIPHER);
options.setBitLength(128);
@ -225,10 +229,19 @@ public void testMaterialGeneration() throws Exception {
Assert.assertEquals(CIPHER, kp.algorithm);
Assert.assertNotNull(kp.material);
kp = new MyKeyProvider();
kp = new MyKeyProvider(new Configuration());
kp.rollNewVersion("hello");
Assert.assertEquals(128, kp.size);
Assert.assertEquals(CIPHER, kp.algorithm);
Assert.assertNotNull(kp.material);
}
@Test
public void testConfiguration() throws Exception {
Configuration conf = new Configuration(false);
conf.set("a", "A");
MyKeyProvider kp = new MyKeyProvider(conf);
Assert.assertEquals("A", kp.getConf().get("a"));
}
}

View File

@ -29,13 +29,18 @@
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestKeyProviderDelegationTokenExtension {
public static abstract class MockKeyProvider extends
KeyProvider implements DelegationTokenExtension {
public MockKeyProvider() {
super(new Configuration(false));
}
}
@Test
public void testCreateExtension() throws Exception {
Configuration conf = new Configuration();
@ -50,9 +55,11 @@ public void testCreateExtension() throws Exception {
Assert.assertNull(kpDTE1.addDelegationTokens("user", credentials));
MockKeyProvider mock = mock(MockKeyProvider.class);
Mockito.when(mock.getConf()).thenReturn(new Configuration());
when(mock.addDelegationTokens("renewer", credentials)).thenReturn(
new Token<?>[] { new Token(null, null, new Text("kind"), new Text(
"service")) });
new Token<?>[]{new Token(null, null, new Text("kind"), new Text(
"service"))}
);
KeyProviderDelegationTokenExtension kpDTE2 =
KeyProviderDelegationTokenExtension
.createKeyProviderDelegationTokenExtension(mock);

View File

@ -42,29 +42,14 @@ public class TestTextCommand {
System.getProperty("test.build.data", "build/test/data/") + "/testText";
private static final String AVRO_FILENAME =
new Path(TEST_ROOT_DIR, "weather.avro").toUri().getPath();
private static final String TEXT_FILENAME =
new Path(TEST_ROOT_DIR, "testtextfile.txt").toUri().getPath();
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test (timeout = 30000)
public void testDisplayForAvroFiles() throws Exception {
// Create a small Avro data file on the local file system.
createAvroFile(generateWeatherAvroBinaryData());
// Prepare and call the Text command's protected getInputStream method
// using reflection.
Configuration conf = new Configuration();
URI localPath = new URI(AVRO_FILENAME);
PathData pathData = new PathData(localPath, conf);
Display.Text text = new Display.Text();
text.setConf(conf);
Method method = text.getClass().getDeclaredMethod(
"getInputStream", PathData.class);
method.setAccessible(true);
InputStream stream = (InputStream) method.invoke(text, pathData);
String output = inputStreamToString(stream);
// Check the output.
String expectedOutput =
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
System.getProperty("line.separator") +
@ -77,18 +62,72 @@ public void testDisplayForAvroFiles() throws Exception {
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
System.getProperty("line.separator");
String output = readUsingTextCommand(AVRO_FILENAME,
generateWeatherAvroBinaryData());
assertEquals(expectedOutput, output);
}
/**
* Tests that a zero-length file is displayed correctly.
*/
@Test (timeout = 30000)
public void testEmptyTextFil() throws Exception {
byte[] emptyContents = { };
String output = readUsingTextCommand(TEXT_FILENAME, emptyContents);
assertTrue("".equals(output));
}
/**
* Tests that a one-byte file is displayed correctly.
*/
@Test (timeout = 30000)
public void testOneByteTextFil() throws Exception {
byte[] oneByteContents = { 'x' };
String output = readUsingTextCommand(TEXT_FILENAME, oneByteContents);
assertTrue(new String(oneByteContents).equals(output));
}
/**
* Tests that a one-byte file is displayed correctly.
*/
@Test (timeout = 30000)
public void testTwoByteTextFil() throws Exception {
byte[] twoByteContents = { 'x', 'y' };
String output = readUsingTextCommand(TEXT_FILENAME, twoByteContents);
assertTrue(new String(twoByteContents).equals(output));
}
// Create a file on the local file system and read it using
// the Display.Text class.
private String readUsingTextCommand(String fileName, byte[] fileContents)
throws Exception {
createFile(fileName, fileContents);
// Prepare and call the Text command's protected getInputStream method
// using reflection.
Configuration conf = new Configuration();
URI localPath = new URI(fileName);
PathData pathData = new PathData(localPath, conf);
Display.Text text = new Display.Text() {
@Override
public InputStream getInputStream(PathData item) throws IOException {
return super.getInputStream(item);
}
};
text.setConf(conf);
InputStream stream = (InputStream) text.getInputStream(pathData);
return inputStreamToString(stream);
}
private String inputStreamToString(InputStream stream) throws IOException {
StringWriter writer = new StringWriter();
IOUtils.copy(stream, writer);
return writer.toString();
}
private void createAvroFile(byte[] contents) throws IOException {
private void createFile(String fileName, byte[] contents) throws IOException {
(new File(TEST_ROOT_DIR)).mkdir();
File file = new File(AVRO_FILENAME);
File file = new File(fileName);
file.createNewFile();
FileOutputStream stream = new FileOutputStream(file);
stream.write(contents);

View File

@ -284,11 +284,13 @@ private void testRenewToken() throws Exception {
@Test
public void testAuthenticate() throws Exception {
testValidDelegationToken();
testInvalidDelegationToken();
testValidDelegationTokenQueryString();
testValidDelegationTokenHeader();
testInvalidDelegationTokenQueryString();
testInvalidDelegationTokenHeader();
}
private void testValidDelegationToken() throws Exception {
private void testValidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
@ -307,7 +309,26 @@ private void testValidDelegationToken() throws Exception {
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken() throws Exception {
private void testValidDelegationTokenHeader() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getHeader(Mockito.eq(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
@ -323,4 +344,21 @@ private void testInvalidDelegationToken() throws Exception {
}
}
private void testInvalidDelegationTokenHeader() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(Mockito.eq(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
"invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -149,6 +149,15 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)
!= null) {
resp.setHeader("UsingHeader", "true");
}
if (req.getQueryString() != null &&
req.getQueryString().contains(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) {
resp.setHeader("UsingQueryString", "true");
}
}
@Override
@ -314,7 +323,20 @@ public void testRawHttpCalls() throws Exception {
}
@Test
public void testDelegationTokenAuthenticatorCalls() throws Exception {
public void testDelegationTokenAuthenticatorCallsWithHeader()
throws Exception {
testDelegationTokenAuthenticatorCalls(false);
}
@Test
public void testDelegationTokenAuthenticatorCallsWithQueryString()
throws Exception {
testDelegationTokenAuthenticatorCalls(true);
}
private void testDelegationTokenAuthenticatorCalls(final boolean useQS)
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
@ -324,14 +346,15 @@ public void testDelegationTokenAuthenticatorCalls() throws Exception {
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.setUseQueryStringForDelegationToken(useQS);
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
@ -379,6 +402,27 @@ public void testDelegationTokenAuthenticatorCalls() throws Exception {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, "foo");
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token());
Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
if (useQS) {
Assert.assertNull(conn.getHeaderField("UsingHeader"));
Assert.assertNotNull(conn.getHeaderField("UsingQueryString"));
} else {
Assert.assertNotNull(conn.getHeaderField("UsingHeader"));
Assert.assertNull(conn.getHeaderField("UsingQueryString"));
}
return null;
}
});
} finally {
jetty.stop();
}

View File

@ -34,7 +34,6 @@
<description>Apache Hadoop KMS</description>
<properties>
<tomcat.version>6.0.36</tomcat.version>
<kms.tomcat.dist.dir>
${project.build.directory}/${project.artifactId}-${project.version}/share/hadoop/kms/tomcat
</kms.tomcat.dist.dir>

View File

@ -26,10 +26,10 @@
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
@ -73,29 +73,14 @@ public KMS() throws Exception {
kmsAudit= KMSWebApp.getKMSAudit();
}
private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:%s not allowed to do '%s'";
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
KMSOp operation) throws AccessControlException {
assertAccess(aclType, ugi, operation, null);
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null);
}
private void assertAccess(KMSACLs.Type aclType,
UserGroupInformation ugi, KMSOp operation, String key)
throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
KMSWebApp.getUnauthorizedCallsMeter().mark();
kmsAudit.unauthorized(ugi, operation, key);
throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY,
ugi.getShortUserName(), operation, key));
}
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
KMSOp operation, String key) throws AccessControlException {
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key);
}
private static KeyProvider.KeyVersion removeKeyMaterial(

View File

@ -19,8 +19,11 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -39,14 +42,23 @@
public class KMSACLs implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:%s not allowed to do '%s'";
public enum Type {
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
public String getConfigKey() {
public String getAclConfigKey() {
return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
}
public String getBlacklistConfigKey() {
return KMSConfiguration.CONFIG_PREFIX + "blacklist." + this.toString();
}
}
public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE;
@ -54,6 +66,7 @@ public String getConfigKey() {
public static final int RELOADER_SLEEP_MILLIS = 1000;
private volatile Map<Type, AccessControlList> acls;
private volatile Map<Type, AccessControlList> blacklistedAcls;
private ScheduledExecutorService executorService;
private long lastReload;
@ -70,12 +83,20 @@ public KMSACLs() {
private void setACLs(Configuration conf) {
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
for (Type aclType : Type.values()) {
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT);
tempAcls.put(aclType, new AccessControlList(aclStr));
String blacklistStr = conf.get(aclType.getBlacklistConfigKey());
if (blacklistStr != null) {
// Only add if blacklist is present
tempBlacklist.put(aclType, new AccessControlList(blacklistStr));
LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr);
}
LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
acls = tempAcls;
blacklistedAcls = tempBlacklist;
}
@Override
@ -109,12 +130,38 @@ private Configuration loadACLs() {
lastReload = System.currentTimeMillis();
Configuration conf = KMSConfiguration.getACLsConf();
// triggering the resource loading.
conf.get(Type.CREATE.getConfigKey());
conf.get(Type.CREATE.getAclConfigKey());
return conf;
}
/**
* First Check if user is in ACL for the KMS operation, if yes, then
* return true if user is not present in any configured blacklist for
* the operation
* @param type KMS Operation
* @param ugi UserGroupInformation of user
* @return true is user has access
*/
public boolean hasAccess(Type type, UserGroupInformation ugi) {
return acls.get(type).isUserAllowed(ugi);
boolean access = acls.get(type).isUserAllowed(ugi);
if (access) {
AccessControlList blacklist = blacklistedAcls.get(type);
access = (blacklist == null) || !blacklist.isUserInList(ugi);
}
return access;
}
public void assertAccess(KMSACLs.Type aclType,
UserGroupInformation ugi, KMSOp operation, String key)
throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
KMSWebApp.getUnauthorizedCallsMeter().mark();
KMSWebApp.getKMSAudit().unauthorized(ugi, operation, key);
throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY,
ugi.getShortUserName(), operation, key));
}
}
}

View File

@ -274,8 +274,13 @@ $ keytool -genkey -alias tomcat -keyalg RSA
KMS ACLs configuration are defined in the KMS <<<etc/hadoop/kms-acls.xml>>>
configuration file. This file is hot-reloaded when it changes.
KMS supports a fine grained access control via a set ACL
configuration properties:
KMS supports both fine grained access control as well as blacklist for kms
operations via a set ACL configuration properties.
A user accessing KMS is first checked for inclusion in the Access Control
List for the requested operation and then checked for exclusion in the
Black list for the operation before access is granted.
+---+
<property>
@ -288,6 +293,16 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.CREATE</name>
<value>hdfs,foo</value>
<description>
Blacklist for create-key operations.
If the user does is in the Blacklist, the key material is not returned
as part of the response.
</description>
</property>
<property>
<name>hadoop.kms.acl.DELETE</name>
<value>*</value>
@ -296,6 +311,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.DELETE</name>
<value>hdfs,foo</value>
<description>
Blacklist for delete-key operations.
</description>
</property>
<property>
<name>hadoop.kms.acl.ROLLOVER</name>
<value>*</value>
@ -306,6 +329,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.ROLLOVER</name>
<value>hdfs,foo</value>
<description>
Blacklist for rollover-key operations.
</description>
</property>
<property>
<name>hadoop.kms.acl.GET</name>
<value>*</value>
@ -314,6 +345,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.GET</name>
<value>hdfs,foo</value>
<description>
ACL for get-key-version and get-current-key operations.
</description>
</property>
<property>
<name>hadoop.kms.acl.GET_KEYS</name>
<value>*</value>
@ -322,6 +361,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.GET_KEYS</name>
<value>hdfs,foo</value>
<description>
Blacklist for get-keys operation.
</description>
</property>
<property>
<name>hadoop.kms.acl.GET_METADATA</name>
<value>*</value>
@ -330,6 +377,14 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.GET_METADATA</name>
<value>hdfs,foo</value>
<description>
Blacklist for get-key-metadata and get-keys-metadata operations.
</description>
</property>
<property>
<name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
<value>*</value>
@ -339,6 +394,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.SET_KEY_MATERIAL</name>
<value>hdfs,foo</value>
<description>
Complimentary Blacklist for CREATE and ROLLOVER operation to allow the client
to provide the key material when creating or rolling a key.
</description>
</property>
<property>
<name>hadoop.kms.acl.GENERATE_EEK</name>
<value>*</value>
@ -348,6 +412,15 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
<property>
<name>hadoop.kms.blacklist.GENERATE_EEK</name>
<value>hdfs,foo</value>
<description>
Blacklist for generateEncryptedKey
CryptoExtension operations
</description>
</property>
<property>
<name>hadoop.kms.acl.DECRYPT_EEK</name>
<value>*</value>
@ -357,6 +430,17 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</description>
</property>
</configuration>
<property>
<name>hadoop.kms.blacklist.DECRYPT_EEK</name>
<value>hdfs,foo</value>
<description>
Blacklist for decrypt EncryptedKey
CryptoExtension operations
</description>
</property>
</configuration>
+---+
** KMS Delegation Token Configuration

View File

@ -268,6 +268,8 @@ public static void setUpMiniKdc() throws Exception {
List<String> principals = new ArrayList<String>();
principals.add("HTTP/localhost");
principals.add("client");
principals.add("hdfs");
principals.add("otheradmin");
principals.add("client/host");
principals.add("client1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
@ -621,12 +623,12 @@ public void testACLs() throws Exception {
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(), type.toString());
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getConfigKey(),
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(),
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
writeConf(testDir, conf);
@ -884,7 +886,7 @@ public Void run() throws Exception {
// test ACL reloading
Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo");
writeConf(testDir, conf);
Thread.sleep(1000);
@ -914,6 +916,92 @@ public Void run() throws Exception {
});
}
@Test
public void testKMSBlackList() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KMSClientProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
kp.generateEncryptedKey("ck0");
kp.decryptEncryptedKey(eek);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("hdfs", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KMSClientProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
kp.generateEncryptedKey("ck1");
kp.decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
doAs("otheradmin", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KMSClientProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck2",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
kp.generateEncryptedKey("ck2");
kp.decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
return null;
}
});
}
@Test
public void testServicePrincipalACLs() throws Exception {
Configuration conf = new Configuration();
@ -927,9 +1015,9 @@ public void testServicePrincipalACLs() throws Exception {
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(), " ");
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getConfigKey(), "client");
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
writeConf(testDir, conf);

View File

@ -37,7 +37,7 @@ public void testDefaults() {
public void testCustom() {
Configuration conf = new Configuration(false);
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(), type.toString() + " ");
conf.set(type.getAclConfigKey(), type.toString() + " ");
}
KMSACLs acls = new KMSACLs(conf);
for (KMSACLs.Type type : KMSACLs.Type.values()) {

View File

@ -53,9 +53,19 @@ public class Nfs3FileAttributes {
* For Hadoop, currently this field is always zero.
*/
public static class Specdata3 {
final static int specdata1 = 0;
final static int specdata2 = 0;
final int specdata1;
final int specdata2;
public Specdata3() {
specdata1 = 0;
specdata2 = 0;
}
public Specdata3(int specdata1, int specdata2) {
this.specdata1 = specdata1;
this.specdata2 = specdata2;
}
public int getSpecdata1() {
return specdata1;
}

View File

@ -19,13 +19,24 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* ACCESS3 Request
*/
public class ACCESS3Request extends RequestWithHandle {
public ACCESS3Request(XDR xdr) throws IOException {
super(xdr);
public static ACCESS3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new ACCESS3Request(handle);
}
public ACCESS3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -28,10 +29,17 @@ public class COMMIT3Request extends RequestWithHandle {
private final long offset;
private final int count;
public COMMIT3Request(XDR xdr) throws IOException {
super(xdr);
offset = xdr.readHyper();
count = xdr.readInt();
public static COMMIT3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
return new COMMIT3Request(handle, offset, count);
}
public COMMIT3Request(FileHandle handle, long offset, int count) {
super(handle);
this.offset = offset;
this.count = count;
}
public long getOffset() {
@ -41,4 +49,11 @@ public long getOffset() {
public int getCount() {
return this.count;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(offset);
xdr.writeInt(count);
}
}

View File

@ -29,8 +29,8 @@
public class CREATE3Request extends RequestWithHandle {
private final String name;
private final int mode;
private SetAttr3 objAttr = null;
private long verf;
private final SetAttr3 objAttr;
private long verf = 0;
public CREATE3Request(FileHandle handle, String name, int mode,
SetAttr3 objAttr, long verf) {
@ -41,12 +41,12 @@ public CREATE3Request(FileHandle handle, String name, int mode,
this.verf = verf;
}
public CREATE3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
mode = xdr.readInt();
objAttr = new SetAttr3();
public static CREATE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int mode = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
long verf = 0;
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
objAttr.deserialize(xdr);
@ -55,6 +55,7 @@ public CREATE3Request(XDR xdr) throws IOException {
} else {
throw new IOException("Wrong create mode:" + mode);
}
return new CREATE3Request(handle, name, mode, objAttr, verf);
}
public String getName() {
@ -81,4 +82,5 @@ public void serialize(XDR xdr) {
xdr.writeInt(mode);
objAttr.serialize(xdr);
}
}

View File

@ -19,13 +19,24 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* FSINFO3 Request
*/
public class FSINFO3Request extends RequestWithHandle {
public FSINFO3Request(XDR xdr) throws IOException {
super(xdr);
public static FSINFO3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new FSINFO3Request(handle);
}
public FSINFO3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -19,13 +19,24 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* FSSTAT3 Request
*/
public class FSSTAT3Request extends RequestWithHandle {
public FSSTAT3Request(XDR xdr) throws IOException {
super(xdr);
public static FSSTAT3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new FSSTAT3Request(handle);
}
public FSSTAT3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -19,13 +19,24 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* GETATTR3 Request
*/
public class GETATTR3Request extends RequestWithHandle {
public GETATTR3Request(XDR xdr) throws IOException {
super(xdr);
public static GETATTR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new GETATTR3Request(handle);
}
public GETATTR3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* LINK3 Request
*/
public class LINK3Request extends RequestWithHandle {
private final FileHandle fromDirHandle;
private final String fromName;
public LINK3Request(FileHandle handle, FileHandle fromDirHandle,
String fromName) {
super(handle);
this.fromDirHandle = fromDirHandle;
this.fromName = fromName;
}
public static LINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
FileHandle fromDirHandle = readHandle(xdr);
String fromName = xdr.readString();
return new LINK3Request(handle, fromDirHandle, fromName);
}
public FileHandle getFromDirHandle() {
return fromDirHandle;
}
public String getFromName() {
return fromName;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
fromDirHandle.serialize(xdr);
xdr.writeInt(fromName.length());
xdr.writeFixedOpaque(fromName.getBytes(), fromName.length());
}
}

View File

@ -35,9 +35,10 @@ public LOOKUP3Request(FileHandle handle, String name) {
this.name = name;
}
public LOOKUP3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
public static LOOKUP3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new LOOKUP3Request(handle, name);
}
public String getName() {
@ -51,7 +52,7 @@ public void setName(String name) {
@Override
@VisibleForTesting
public void serialize(XDR xdr) {
super.serialize(xdr);
handle.serialize(xdr);
xdr.writeInt(name.getBytes().length);
xdr.writeFixedOpaque(name.getBytes());
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -28,13 +29,20 @@ public class MKDIR3Request extends RequestWithHandle {
private final String name;
private final SetAttr3 objAttr;
public MKDIR3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
objAttr = new SetAttr3();
public static MKDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
SetAttr3 objAttr = new SetAttr3();
objAttr.deserialize(xdr);
return new MKDIR3Request(handle, name, objAttr);
}
public MKDIR3Request(FileHandle handle, String name, SetAttr3 objAttr) {
super(handle);
this.name = name;
this.objAttr = objAttr;
}
public String getName() {
return name;
}
@ -42,4 +50,12 @@ public String getName() {
public SetAttr3 getObjAttr() {
return objAttr;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes().length);
xdr.writeFixedOpaque(name.getBytes());
objAttr.serialize(xdr);
}
}

View File

@ -0,0 +1,89 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.NfsFileType;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes.Specdata3;
import org.apache.hadoop.oncrpc.XDR;
/**
* MKNOD3 Request
*/
public class MKNOD3Request extends RequestWithHandle {
private final String name;
private int type;
private SetAttr3 objAttr = null;
private Specdata3 spec = null;
public MKNOD3Request(FileHandle handle, String name, int type,
SetAttr3 objAttr, Specdata3 spec) {
super(handle);
this.name = name;
this.type = type;
this.objAttr = objAttr;
this.spec = spec;
}
public static MKNOD3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int type = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
Specdata3 spec = null;
if (type == NfsFileType.NFSCHR.toValue()
|| type == NfsFileType.NFSBLK.toValue()) {
objAttr.deserialize(xdr);
spec = new Specdata3(xdr.readInt(), xdr.readInt());
} else if (type == NfsFileType.NFSSOCK.toValue()
|| type == NfsFileType.NFSFIFO.toValue()) {
objAttr.deserialize(xdr);
}
return new MKNOD3Request(handle, name, type, objAttr, spec);
}
public String getName() {
return name;
}
public int getType() {
return type;
}
public SetAttr3 getObjAttr() {
return objAttr;
}
public Specdata3 getSpec() {
return spec;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.length());
xdr.writeFixedOpaque(name.getBytes(), name.length());
objAttr.serialize(xdr);
if (spec != null) {
xdr.writeInt(spec.getSpecdata1());
xdr.writeInt(spec.getSpecdata2());
}
}
}

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* An NFS request that uses {@link FileHandle} to identify a file.
*/
public abstract class NFS3Request {
/**
* Deserialize a handle from an XDR object
*/
static FileHandle readHandle(XDR xdr) throws IOException {
FileHandle handle = new FileHandle();
if (!handle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
return handle;
}
/**
* Subclass should implement. Usually handle is the first to be serialized
*/
public abstract void serialize(XDR xdr);
}

View File

@ -19,13 +19,24 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* PATHCONF3 Request
*/
public class PATHCONF3Request extends RequestWithHandle {
public PATHCONF3Request(XDR xdr) throws IOException {
super(xdr);
public static PATHCONF3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new PATHCONF3Request(handle);
}
public PATHCONF3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -31,10 +31,11 @@ public class READ3Request extends RequestWithHandle {
private final long offset;
private final int count;
public READ3Request(XDR xdr) throws IOException {
super(xdr);
offset = xdr.readHyper();
count = xdr.readInt();
public static READ3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
return new READ3Request(handle, offset, count);
}
@VisibleForTesting

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -29,13 +30,22 @@ public class READDIR3Request extends RequestWithHandle {
private final long cookieVerf;
private final int count;
public READDIR3Request(XDR xdr) throws IOException {
super(xdr);
cookie = xdr.readHyper();
cookieVerf = xdr.readHyper();
count = xdr.readInt();
public static READDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long cookie = xdr.readHyper();
long cookieVerf = xdr.readHyper();
int count = xdr.readInt();
return new READDIR3Request(handle, cookie, cookieVerf, count);
}
public READDIR3Request(FileHandle handle, long cookie, long cookieVerf,
int count) {
super(handle);
this.cookie = cookie;
this.cookieVerf = cookieVerf;
this.count = count;
}
public long getCookie() {
return this.cookie;
}
@ -47,4 +57,12 @@ public long getCookieVerf() {
public long getCount() {
return this.count;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(cookie);
xdr.writeLongAsHyper(cookieVerf);
xdr.writeInt(count);
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -30,14 +31,25 @@ public class READDIRPLUS3Request extends RequestWithHandle {
private final int dirCount;
private final int maxCount;
public READDIRPLUS3Request(XDR xdr) throws IOException {
super(xdr);
cookie = xdr.readHyper();
cookieVerf = xdr.readHyper();
dirCount = xdr.readInt();
maxCount = xdr.readInt();
public static READDIRPLUS3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long cookie = xdr.readHyper();
long cookieVerf = xdr.readHyper();
int dirCount = xdr.readInt();
int maxCount = xdr.readInt();
return new READDIRPLUS3Request(handle, cookie, cookieVerf, dirCount,
maxCount);
}
public READDIRPLUS3Request(FileHandle handle, long cookie, long cookieVerf,
int dirCount, int maxCount) {
super(handle);
this.cookie = cookie;
this.cookieVerf = cookieVerf;
this.dirCount = dirCount;
this.maxCount = maxCount;
}
public long getCookie() {
return this.cookie;
}
@ -53,4 +65,13 @@ public int getDirCount() {
public int getMaxCount() {
return maxCount;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(cookie);
xdr.writeLongAsHyper(cookieVerf);
xdr.writeInt(dirCount);
xdr.writeInt(maxCount);
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -26,7 +27,17 @@
*/
public class READLINK3Request extends RequestWithHandle {
public READLINK3Request(XDR xdr) throws IOException {
super(xdr);
public static READLINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new READLINK3Request(handle);
}
public READLINK3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -27,12 +28,25 @@
public class REMOVE3Request extends RequestWithHandle {
private final String name;
public REMOVE3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
public static REMOVE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new REMOVE3Request(handle, name);
}
public REMOVE3Request(FileHandle handle, String name) {
super(handle);
this.name = name;
}
public String getName() {
return this.name;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes().length);
xdr.writeFixedOpaque(name.getBytes());
}
}

View File

@ -25,23 +25,26 @@
/**
* RENAME3 Request
*/
public class RENAME3Request {
public class RENAME3Request extends NFS3Request {
private final FileHandle fromDirHandle;
private final String fromName;
private final FileHandle toDirHandle;
private final String toName;
public RENAME3Request(XDR xdr) throws IOException {
fromDirHandle = new FileHandle();
if (!fromDirHandle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
fromName = xdr.readString();
toDirHandle = new FileHandle();
if (!toDirHandle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
toName = xdr.readString();
public static RENAME3Request deserialize(XDR xdr) throws IOException {
FileHandle fromDirHandle = readHandle(xdr);
String fromName = xdr.readString();
FileHandle toDirHandle = readHandle(xdr);
String toName = xdr.readString();
return new RENAME3Request(fromDirHandle, fromName, toDirHandle, toName);
}
public RENAME3Request(FileHandle fromDirHandle, String fromName,
FileHandle toDirHandle, String toName) {
this.fromDirHandle = fromDirHandle;
this.fromName = fromName;
this.toDirHandle = toDirHandle;
this.toName = toName;
}
public FileHandle getFromDirHandle() {
@ -59,4 +62,14 @@ public FileHandle getToDirHandle() {
public String getToName() {
return toName;
}
@Override
public void serialize(XDR xdr) {
fromDirHandle.serialize(xdr);
xdr.writeInt(fromName.getBytes().length);
xdr.writeFixedOpaque(fromName.getBytes());
toDirHandle.serialize(xdr);
xdr.writeInt(toName.getBytes().length);
xdr.writeFixedOpaque(toName.getBytes());
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -27,12 +28,25 @@
public class RMDIR3Request extends RequestWithHandle {
private final String name;
public RMDIR3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
public static RMDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new RMDIR3Request(handle, name);
}
public RMDIR3Request(FileHandle handle, String name) {
super(handle);
this.name = name;
}
public String getName() {
return this.name;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes().length);
xdr.writeFixedOpaque(name.getBytes());
}
}

View File

@ -17,33 +17,19 @@
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* An NFS request that uses {@link FileHandle} to identify a file.
*/
public class RequestWithHandle {
public abstract class RequestWithHandle extends NFS3Request {
protected final FileHandle handle;
RequestWithHandle(FileHandle handle) {
this.handle = handle;
}
RequestWithHandle(XDR xdr) throws IOException {
handle = new FileHandle();
if (!handle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
}
public FileHandle getHandle() {
return this.handle;
}
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}

View File

@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -38,16 +39,26 @@ public class SETATTR3Request extends RequestWithHandle {
private final boolean check;
private final NfsTime ctime;
public SETATTR3Request(XDR xdr) throws IOException {
super(xdr);
attr = new SetAttr3();
public static SETATTR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
SetAttr3 attr = new SetAttr3();
attr.deserialize(xdr);
check = xdr.readBoolean();
boolean check = xdr.readBoolean();
NfsTime ctime;
if (check) {
ctime = NfsTime.deserialize(xdr);
} else {
ctime = null;
}
return new SETATTR3Request(handle, attr, check, ctime);
}
public SETATTR3Request(FileHandle handle, SetAttr3 attr, boolean check,
NfsTime ctime) {
super(handle);
this.attr = attr;
this.check = check;
this.ctime = ctime;
}
public SetAttr3 getAttr() {
@ -61,4 +72,14 @@ public boolean isCheck() {
public NfsTime getCtime() {
return ctime;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
attr.serialize(xdr);
xdr.writeBoolean(check);
if (check) {
ctime.serialize(xdr);
}
}
}

View File

@ -19,6 +19,7 @@
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
@ -29,14 +30,23 @@ public class SYMLINK3Request extends RequestWithHandle {
private final SetAttr3 symAttr;
private final String symData; // It contains the target
public SYMLINK3Request(XDR xdr) throws IOException {
super(xdr);
name = xdr.readString();
symAttr = new SetAttr3();
public static SYMLINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
SetAttr3 symAttr = new SetAttr3();
symAttr.deserialize(xdr);
symData = xdr.readString();
String symData = xdr.readString();
return new SYMLINK3Request(handle, name, symAttr, symData);
}
public SYMLINK3Request(FileHandle handle, String name, SetAttr3 symAttr,
String symData) {
super(handle);
this.name = name;
this.symAttr = symAttr;
this.symData = symData;
}
public String getName() {
return name;
}
@ -48,4 +58,14 @@ public SetAttr3 getSymAttr() {
public String getSymData() {
return symData;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes().length);
xdr.writeFixedOpaque(name.getBytes());
symAttr.serialize(xdr);
xdr.writeInt(symData.getBytes().length);
xdr.writeFixedOpaque(symData.getBytes());
}
}

View File

@ -52,6 +52,15 @@ public SetAttr3() {
size = 0;
updateFields = EnumSet.noneOf(SetAttrField.class);
}
public SetAttr3(int mode, int uid, int gid, long size, NfsTime atime,
NfsTime mtime, EnumSet<SetAttrField> updateFields) {
this.mode = mode;
this.uid = uid;
this.gid = gid;
this.size = size;
this.updateFields = updateFields;
}
public int getMode() {
return mode;

View File

@ -33,12 +33,13 @@ public class WRITE3Request extends RequestWithHandle {
private final WriteStableHow stableHow;
private final ByteBuffer data;
public WRITE3Request(XDR xdr) throws IOException {
super(xdr);
offset = xdr.readHyper();
count = xdr.readInt();
stableHow = WriteStableHow.fromValue(xdr.readInt());
data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
public static WRITE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
WriteStableHow stableHow = WriteStableHow.fromValue(xdr.readInt());
ByteBuffer data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
return new WRITE3Request(handle, offset, count, stableHow, data);
}
public WRITE3Request(FileHandle handle, final long offset, final int count,

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
public class LINK3Response extends NFS3Response {
private final WccData fromDirWcc;
private final WccData linkDirWcc;
public LINK3Response(int status) {
this(status, new WccData(null, null), new WccData(null, null));
}
public LINK3Response(int status, WccData fromDirWcc,
WccData linkDirWcc) {
super(status);
this.fromDirWcc = fromDirWcc;
this.linkDirWcc = linkDirWcc;
}
public WccData getFromDirWcc() {
return fromDirWcc;
}
public WccData getLinkDirWcc() {
return linkDirWcc;
}
@Override
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
super.writeHeaderAndResponse(out, xid, verifier);
fromDirWcc.serialize(out);
linkDirWcc.serialize(out);
return out;
}
}

View File

@ -0,0 +1,68 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
public class MKNOD3Response extends NFS3Response {
private final FileHandle objFileHandle;
private final Nfs3FileAttributes objPostOpAttr;
private final WccData dirWcc;
public MKNOD3Response(int status) {
this(status, null, null, new WccData(null, null));
}
public MKNOD3Response(int status, FileHandle handle,
Nfs3FileAttributes attrs, WccData dirWcc) {
super(status);
this.objFileHandle = handle;
this.objPostOpAttr = attrs;
this.dirWcc = dirWcc;
}
public FileHandle getObjFileHandle() {
return objFileHandle;
}
public Nfs3FileAttributes getObjPostOpAttr() {
return objPostOpAttr;
}
public WccData getDirWcc() {
return dirWcc;
}
@Override
public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
super.writeHeaderAndResponse(out, xid, verifier);
if (this.getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true);
objFileHandle.serialize(out);
out.writeBoolean(true);
objPostOpAttr.serialize(out);
}
dirWcc.serialize(out);
return out;
}
}

View File

@ -114,6 +114,9 @@
run rm -rf hadoop-${project.version}
run mkdir hadoop-${project.version}
run cd hadoop-${project.version}
run cp $ROOT/LICENSE.txt .
run cp $ROOT/NOTICE.txt .
run cp $ROOT/README.txt .
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .

View File

@ -34,7 +34,6 @@
<description>Apache Hadoop HttpFS</description>
<properties>
<tomcat.version>6.0.36</tomcat.version>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.repository>REPO NOT AVAIL</httpfs.source.repository>
<httpfs.source.revision>REVISION NOT AVAIL</httpfs.source.revision>

View File

@ -421,7 +421,7 @@ public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request,
if (existantWriteCtx != null) {
if (!existantWriteCtx.getReplied()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Repeated write request which hasn't be served: xid="
LOG.debug("Repeated write request which hasn't been served: xid="
+ xid + ", drop it.");
}
} else {
@ -579,7 +579,7 @@ private void processOverWrite(DFSClient dfsClient, WRITE3Request request,
* writing, and there is no other threads writing (i.e., asyncStatus is
* false), start the writing and set asyncStatus to true.
*
* @return True if the new write is sequencial and we can start writing
* @return True if the new write is sequential and we can start writing
* (including the case that there is already a thread writing).
*/
private synchronized boolean checkAndStartWrite(
@ -898,7 +898,7 @@ private synchronized WriteCtx offerNextToWrite() {
long offset = nextOffset.get();
if (range.getMin() > offset) {
if (LOG.isDebugEnabled()) {
LOG.debug("The next sequencial write has not arrived yet");
LOG.debug("The next sequential write has not arrived yet");
}
processCommits(nextOffset.get()); // handle race
this.asyncStatus = false;

View File

@ -268,7 +268,7 @@ GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler,
GETATTR3Request request = null;
try {
request = new GETATTR3Request(xdr);
request = GETATTR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid GETATTR request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@ -360,7 +360,7 @@ SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler,
SETATTR3Request request = null;
try {
request = new SETATTR3Request(xdr);
request = SETATTR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid SETATTR request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@ -445,7 +445,7 @@ LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler,
LOOKUP3Request request = null;
try {
request = new LOOKUP3Request(xdr);
request = LOOKUP3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid LOOKUP request");
return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
@ -513,7 +513,7 @@ ACCESS3Response access(XDR xdr, SecurityHandler securityHandler,
ACCESS3Request request = null;
try {
request = new ACCESS3Request(xdr);
request = ACCESS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid ACCESS request");
return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
@ -581,7 +581,7 @@ READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler,
READLINK3Request request = null;
try {
request = new READLINK3Request(xdr);
request = READLINK3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READLINK request");
return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
@ -655,7 +655,7 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler,
READ3Request request = null;
try {
request = new READ3Request(xdr);
request = READ3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READ request");
return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
@ -788,7 +788,7 @@ WRITE3Response write(XDR xdr, Channel channel, int xid,
WRITE3Request request = null;
try {
request = new WRITE3Request(xdr);
request = WRITE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid WRITE request");
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
@ -870,7 +870,7 @@ CREATE3Response create(XDR xdr, SecurityHandler securityHandler,
CREATE3Request request = null;
try {
request = new CREATE3Request(xdr);
request = CREATE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid CREATE request");
return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1003,7 +1003,7 @@ MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler,
MKDIR3Request request = null;
try {
request = new MKDIR3Request(xdr);
request = MKDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid MKDIR request");
return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1099,7 +1099,7 @@ REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler,
REMOVE3Request request = null;
try {
request = new REMOVE3Request(xdr);
request = REMOVE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid REMOVE request");
return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1179,7 +1179,7 @@ RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler,
RMDIR3Request request = null;
try {
request = new RMDIR3Request(xdr);
request = RMDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid RMDIR request");
return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1264,7 +1264,7 @@ RENAME3Response rename(XDR xdr, SecurityHandler securityHandler,
RENAME3Request request = null;
try {
request = new RENAME3Request(xdr);
request = RENAME3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid RENAME request");
return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1360,7 +1360,7 @@ SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler,
SYMLINK3Request request = null;
try {
request = new SYMLINK3Request(xdr);
request = SYMLINK3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid SYMLINK request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@ -1423,7 +1423,7 @@ private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
throw io;
}
// This happens when startAfter was just deleted
LOG.info("Cookie cound't be found: " + new String(startAfter)
LOG.info("Cookie couldn't be found: " + new String(startAfter)
+ ", do listing from beginning");
dlisting = dfsClient
.listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
@ -1453,7 +1453,7 @@ public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
READDIR3Request request = null;
try {
request = new READDIR3Request(xdr);
request = READDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIR request");
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1611,7 +1611,7 @@ READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler,
READDIRPLUS3Request request = null;
try {
request = new READDIRPLUS3Request(xdr);
request = READDIRPLUS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIRPLUS request");
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1788,7 +1788,7 @@ FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler,
FSSTAT3Request request = null;
try {
request = new FSSTAT3Request(xdr);
request = FSSTAT3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSSTAT request");
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1862,7 +1862,7 @@ FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler,
FSINFO3Request request = null;
try {
request = new FSINFO3Request(xdr);
request = FSINFO3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSINFO request");
return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1926,7 +1926,7 @@ PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler,
PATHCONF3Request request = null;
try {
request = new PATHCONF3Request(xdr);
request = PATHCONF3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid PATHCONF request");
return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
@ -1977,7 +1977,7 @@ COMMIT3Response commit(XDR xdr, Channel channel, int xid,
COMMIT3Request request = null;
try {
request = new COMMIT3Request(xdr);
request = COMMIT3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid COMMIT request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);

View File

@ -17,12 +17,71 @@
*/
package org.apache.hadoop.hdfs.nfs.nfs3;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request;
import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request;
import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request;
import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request;
import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request;
import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request;
import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request;
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
import org.apache.hadoop.nfs.nfs3.request.READDIR3Request;
import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request;
import org.apache.hadoop.nfs.nfs3.request.READLINK3Request;
import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request;
import org.apache.hadoop.nfs.nfs3.request.RENAME3Request;
import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request;
import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request;
import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request;
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.jboss.netty.channel.Channel;
import org.junit.AfterClass;
import org.junit.Assert;
@ -31,46 +90,6 @@
import org.junit.Test;
import org.mockito.Mockito;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.SecurityHandler;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
/**
* Tests for {@link RpcProgramNfs3}
@ -143,8 +162,9 @@ public void testGetattr() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
GETATTR3Request req = new GETATTR3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
@ -165,13 +185,12 @@ public void testSetattr() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
SetAttr3 symAttr = new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeBoolean(false);
SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
EnumSet.of(SetAttrField.UID));
SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
// Attempt by an unprivileged user should fail.
SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
@ -214,7 +233,8 @@ public void testAccess() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
ACCESS3Request req = new ACCESS3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
@ -237,12 +257,10 @@ public void testReadlink() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr = new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
"bar");
req.serialize(xdr_req);
SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@ -251,7 +269,8 @@ public void testReadlink() throws Exception {
// Now perform readlink operations.
FileHandle handle2 = response.getObjFileHandle();
XDR xdr_req2 = new XDR();
handle2.serialize(xdr_req2);
READLINK3Request req2 = new READLINK3Request(handle2);
req2.serialize(xdr_req2);
// Attempt by an unpriviledged user should fail.
READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
@ -327,12 +346,10 @@ public void testCreate() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
SetAttr3 symAttr = new SetAttr3();
symAttr.serialize(xdr_req);
CREATE3Request req = new CREATE3Request(handle, "fubar",
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
@ -348,26 +365,27 @@ public void testCreate() throws Exception {
}
@Test(timeout = 60000)
public void testMkdir() throws Exception {
public void testMkdir() throws Exception {//FixME
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr = new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
// Attempt to remove by an unpriviledged user should fail.
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
req.serialize(xdr_req);
// Attempt to mkdir by an unprivileged user should fail.
MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt to remove by a priviledged user should pass.
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
XDR xdr_req2 = new XDR();
MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
req2.serialize(xdr_req2);
// Attempt to mkdir by a privileged user should pass.
MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
response2.getStatus());
@ -379,20 +397,18 @@ public void testSymlink() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr = new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
"bar");
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
// Attempt by an unprivileged user should fail.
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
// Attempt by a privileged user should pass.
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@ -405,8 +421,8 @@ public void testRemove() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
REMOVE3Request req = new REMOVE3Request(handle, "bar");
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
@ -428,17 +444,17 @@ public void testRmdir() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("foo");
RMDIR3Request req = new RMDIR3Request(handle, "foo");
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
// Attempt by an unprivileged user should fail.
RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
// Attempt by a privileged user should pass.
RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@ -451,19 +467,17 @@ public void testRename() throws Exception {
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
// Attempt by an unpriviledged user should fail.
RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar");
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
// Attempt by a privileged user should pass.
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@ -476,10 +490,8 @@ public void testReaddir() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
@ -501,20 +513,17 @@ public void testReaddirplus() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(3);
xdr_req.writeInt(2);
// Attempt by an unpriviledged user should fail.
READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
// Attempt by a privileged user should pass.
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@ -527,8 +536,9 @@ public void testFsstat() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
FSSTAT3Request req = new FSSTAT3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
@ -549,8 +559,9 @@ public void testFsinfo() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
FSINFO3Request req = new FSINFO3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
@ -571,8 +582,9 @@ public void testPathconf() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
PATHCONF3Request req = new PATHCONF3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
@ -593,9 +605,8 @@ public void testCommit() throws Exception {
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(5);
COMMIT3Request req = new COMMIT3Request(handle, 0, 5);
req.serialize(xdr_req);
Channel ch = Mockito.mock(Channel.class);

View File

@ -278,99 +278,6 @@ Trunk (Unreleased)
HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
(Vinayakumar B via wheat 9)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
encryption zone. (clamb)
HDFS-6386. HDFS Encryption Zones (clamb)
HDFS-6388. HDFS integration with KeyProvider. (clamb)
HDFS-6473. Protocol and API for Encryption Zones (clamb)
HDFS-6392. Wire crypto streams for encrypted files in
DFSClient. (clamb and yliu)
HDFS-6476. Print out the KeyProvider after finding KP successfully on
startup. (Juan Yu via wang)
HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
DFSClient. (Charles Lamb and wang)
HDFS-6389. Rename restrictions for encryption zones. (clamb)
HDFS-6605. Client server negotiation of cipher suite. (wang)
HDFS-6625. Remove the Delete Encryption Zone function (clamb)
HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
HDFS-6635. Refactor encryption zone functionality into new
EncryptionZoneManager class. (wang)
HDFS-6474. Namenode needs to get the actual keys and iv from the
KeyProvider. (wang)
HDFS-6619. Clean up encryption-related tests. (wang)
HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
HDFS-6490. Fix the keyid format for generated keys in
FSNamesystem.createEncryptionZone (clamb)
HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
(wang)
HDFS-6718. Remove EncryptionZoneManager lock. (wang)
HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
EZManager#createEncryptionZone. (clamb)
HDFS-6724. Decrypt EDEK before creating
CryptoInputStream/CryptoOutputStream. (wang)
HDFS-6509. Create a special /.reserved/raw directory for raw access to
encrypted data. (clamb via wang)
HDFS-6771. Require specification of an encryption key when creating
an encryption zone. (wang)
HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
HDFS-6692. Add more HDFS encryption tests. (wang)
HDFS-6780. Batch the encryption zones listing API. (wang)
HDFS-6394. HDFS encryption documentation. (wang)
HDFS-6834. Improve the configuration guidance in DFSClient when there
are no Codec classes found in configs. (umamahesh)
HDFS-6546. Add non-superuser capability to get the encryption zone
for a specific path. (clamb)
HDFS-6733. Creating encryption zone results in NPE when
KeyProvider is null. (clamb)
HDFS-6785. Should not be able to create encryption zone using path
to a non-directory file. (clamb)
HDFS-6807. Fix TestReservedRawPaths. (clamb)
HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
as boolean. (umamahesh)
HDFS-6817. Fix findbugs and other warnings. (yliu)
HDFS-6839. Fix TestCLI to expect new output. (clamb)
HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
intermittently with various symptoms - debugging patch. (Yongjun Zhang via
Arpit Agarwal)
@ -537,10 +444,30 @@ Release 2.6.0 - UNRELEASED
HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
per volume. (Arpit Agarwal)
HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang
via Colin Patrick McCabe)
HDFS-6879. Adding tracing to Hadoop RPC (Masatake Iwasaki via Colin Patrick
McCabe)
HDFS-6774. Make FsDataset and DataStore support removing volumes. (Lei Xu
via atm)
HDFS-6634. inotify in HDFS. (James Thomas via wang)
HDFS-4257. The ReplaceDatanodeOnFailure policies could have a forgiving
option (szetszwo via cmccabe)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
HDFS-6773. MiniDFSCluster should skip edit log fsync by default (Stephen
Chu via Colin Patrick McCabe)
HDFS-6865. Byte array native checksumming on client side
(James Thomas via todd)
BUG FIXES
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
@ -668,6 +595,117 @@ Release 2.6.0 - UNRELEASED
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
(Juan Yu and jing9 via jing9)
HDFS-6892. Add XDR packaging method for each NFS request (brandonli)
HDFS-6938. Cleanup javac warnings in FSNamesystem (Charles Lamb via wheat9)
HDFS-6902. FileWriter should be closed in finally block in
BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)
HDFS-6800. Support Datanode layout changes with rolling upgrade.
(James Thomas via Arpit Agarwal)
HDFS-6972. TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration
doesn't decode url correctly. (Yongjun Zhang via wang)
HDFS-6942. Fix typos in log messages. (Ray Chiang via wheat9)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an
encryption zone. (clamb)
HDFS-6386. HDFS Encryption Zones (clamb)
HDFS-6388. HDFS integration with KeyProvider. (clamb)
HDFS-6473. Protocol and API for Encryption Zones (clamb)
HDFS-6392. Wire crypto streams for encrypted files in
DFSClient. (clamb and yliu)
HDFS-6476. Print out the KeyProvider after finding KP successfully on
startup. (Juan Yu via wang)
HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
DFSClient. (Charles Lamb and wang)
HDFS-6389. Rename restrictions for encryption zones. (clamb)
HDFS-6605. Client server negotiation of cipher suite. (wang)
HDFS-6625. Remove the Delete Encryption Zone function (clamb)
HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
HDFS-6635. Refactor encryption zone functionality into new
EncryptionZoneManager class. (wang)
HDFS-6474. Namenode needs to get the actual keys and iv from the
KeyProvider. (wang)
HDFS-6619. Clean up encryption-related tests. (wang)
HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
HDFS-6490. Fix the keyid format for generated keys in
FSNamesystem.createEncryptionZone (clamb)
HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
(wang)
HDFS-6718. Remove EncryptionZoneManager lock. (wang)
HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
EZManager#createEncryptionZone. (clamb)
HDFS-6724. Decrypt EDEK before creating
CryptoInputStream/CryptoOutputStream. (wang)
HDFS-6509. Create a special /.reserved/raw directory for raw access to
encrypted data. (clamb via wang)
HDFS-6771. Require specification of an encryption key when creating
an encryption zone. (wang)
HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
HDFS-6692. Add more HDFS encryption tests. (wang)
HDFS-6780. Batch the encryption zones listing API. (wang)
HDFS-6394. HDFS encryption documentation. (wang)
HDFS-6834. Improve the configuration guidance in DFSClient when there
are no Codec classes found in configs. (umamahesh)
HDFS-6546. Add non-superuser capability to get the encryption zone
for a specific path. (clamb)
HDFS-6733. Creating encryption zone results in NPE when
KeyProvider is null. (clamb)
HDFS-6785. Should not be able to create encryption zone using path
to a non-directory file. (clamb)
HDFS-6807. Fix TestReservedRawPaths. (clamb)
HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
as boolean. (umamahesh)
HDFS-6817. Fix findbugs and other warnings. (yliu)
HDFS-6839. Fix TestCLI to expect new output. (clamb)
HDFS-6954. With crypto, no native lib systems are too verbose. (clamb via wang)
HDFS-2975. Rename with overwrite flag true can make NameNode to stuck in safemode
on NN (crash + restart). (Yi Liu via umamahesh)
Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES
@ -680,6 +718,9 @@ Release 2.5.1 - UNRELEASED
BUG FIXES
HADOOP-10957. The globber will sometimes erroneously return a permission
denied exception when there is a non-terminal wildcard (cmccabe)
Release 2.5.0 - 2014-08-11
INCOMPATIBLE CHANGES

View File

@ -1,271 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
APACHE HADOOP SUBCOMPONENTS:
The Apache Hadoop project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
For the org.apache.hadoop.util.bloom.* classes:
/**
*
* Copyright (c) 2005, European Commission project OneLab under contract
* 034819 (http://www.one-lab.org)
* All rights reserved.
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution.
* - Neither the name of the University Catholique de Louvain - UCL
* nor the names of its contributors may be used to endorse or
* promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
For src/main/native/util/tree.h:
/*-
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

View File

@ -1,2 +0,0 @@
This product includes software developed by The Apache Software
Foundation (http://www.apache.org/).

Some files were not shown because too many files have changed in this diff Show More