From a590b498acf1a424ffbb3a9d8849c0abb409366d Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 7 Nov 2011 20:05:16 +0000 Subject: [PATCH] HDFS-2528. Webhdfs: set delegation kind to WEBHDFS and add a HDFS token when http requests are redirected to datanode. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1198903 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../delegation/DelegationTokenRenewer.java | 2 +- .../hadoop/hdfs/server/common/JspHelper.java | 6 +- .../web/resources/DatanodeWebHdfsMethods.java | 72 ++++++++++++------- .../web/resources/NamenodeWebHdfsMethods.java | 2 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 44 +++++------- .../hdfs/web/resources/UserProvider.java | 2 +- .../hadoop/hdfs/web/TestWebHdfsUrl.java | 2 +- 8 files changed, 72 insertions(+), 61 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3a2e9bf9a4..2fb78b8bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,6 +120,9 @@ Release 0.23.1 - UNRELEASED isDirectory and isSymlink with enum {FILE, DIRECTORY, SYMLINK} in HdfsFileStatus JSON object. (szetszwo) + HDFS-2528. Webhdfs: set delegation kind to WEBHDFS and add a HDFS token + when http requests are redirected to datanode. (szetszwo) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java index 3419be2969..349d71baeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenRenewer.java @@ -134,7 +134,7 @@ public String toString() { private DelayQueue> queue = new DelayQueue>(); - public DelegationTokenRenewer(final Class clazz) { + public DelegationTokenRenewer(final Class clazz) { super(clazz.getSimpleName() + "-" + DelegationTokenRenewer.class.getSimpleName()); setDaemon(true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 69879d81ed..a9df853294 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -58,13 +58,12 @@ import org.apache.hadoop.hdfs.web.resources.DelegationParam; import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.http.HtmlQuoting; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; @@ -546,9 +545,8 @@ public static UserGroupInformation getUGI(ServletContext context, token.decodeFromUrlString(tokenString); String serviceAddress = getNNServiceAddress(context, request); if (serviceAddress != null) { - LOG.info("Setting service in token: " - + new Text(serviceAddress)); token.setService(new Text(serviceAddress)); + token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); } ByteArrayInputStream buf = new ByteArrayInputStream(token .getIdentifier()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index ba10ee79fc..0ecf5fadba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.web.JsonUtil; @@ -58,7 +59,9 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; +import org.apache.hadoop.hdfs.web.resources.DelegationParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam; +import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.LengthParam; import org.apache.hadoop.hdfs.web.resources.OffsetParam; import org.apache.hadoop.hdfs.web.resources.OverwriteParam; @@ -69,7 +72,9 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import com.sun.jersey.spi.container.ResourceFilters; @@ -84,6 +89,29 @@ public class DatanodeWebHdfsMethods { private @Context ServletContext context; private @Context HttpServletResponse response; + private void init(final UserGroupInformation ugi, final DelegationParam delegation, + final UriFsPathParam path, final HttpOpParam op, + final Param... parameters) throws IOException { + if (LOG.isTraceEnabled()) { + LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path + + ", ugi=" + ugi + Param.toSortedString(", ", parameters)); + } + + //clear content type + response.setContentType(null); + + if (UserGroupInformation.isSecurityEnabled()) { + //add a token for RPC. + final DataNode datanode = (DataNode)context.getAttribute("datanode"); + final InetSocketAddress nnRpcAddr = NameNode.getAddress(datanode.getConf()); + final Token token = new Token(); + token.decodeFromUrlString(delegation.getValue()); + SecurityUtil.setTokenService(token, nnRpcAddr); + token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); + ugi.addToken(token); + } + } + /** Handle HTTP PUT request for the root. */ @PUT @Path("/") @@ -92,6 +120,8 @@ public class DatanodeWebHdfsMethods { public Response putRoot( final InputStream in, @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT) final PutOpParam op, @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT) @@ -105,7 +135,7 @@ public Response putRoot( @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) final BlockSizeParam blockSize ) throws IOException, InterruptedException { - return put(in, ugi, ROOT, op, permission, overwrite, bufferSize, + return put(in, ugi, delegation, ROOT, op, permission, overwrite, bufferSize, replication, blockSize); } @@ -117,6 +147,8 @@ public Response putRoot( public Response put( final InputStream in, @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @PathParam(UriFsPathParam.NAME) final UriFsPathParam path, @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT) final PutOpParam op, @@ -132,14 +164,8 @@ public Response put( final BlockSizeParam blockSize ) throws IOException, InterruptedException { - if (LOG.isTraceEnabled()) { - LOG.trace(op + ": " + path + ", ugi=" + ugi - + Param.toSortedString(", ", permission, overwrite, bufferSize, - replication, blockSize)); - } - - //clear content type - response.setContentType(null); + init(ugi, delegation, path, op, permission, overwrite, bufferSize, + replication, blockSize); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -193,12 +219,14 @@ public Response run() throws IOException, URISyntaxException { public Response postRoot( final InputStream in, @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT) final PostOpParam op, @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize ) throws IOException, InterruptedException { - return post(in, ugi, ROOT, op, bufferSize); + return post(in, ugi, delegation, ROOT, op, bufferSize); } /** Handle HTTP POST request. */ @@ -209,6 +237,8 @@ public Response postRoot( public Response post( final InputStream in, @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @PathParam(UriFsPathParam.NAME) final UriFsPathParam path, @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT) final PostOpParam op, @@ -216,13 +246,7 @@ public Response post( final BufferSizeParam bufferSize ) throws IOException, InterruptedException { - if (LOG.isTraceEnabled()) { - LOG.trace(op + ": " + path + ", ugi=" + ugi - + Param.toSortedString(", ", bufferSize)); - } - - //clear content type - response.setContentType(null); + init(ugi, delegation, path, op, bufferSize); return ugi.doAs(new PrivilegedExceptionAction() { @Override @@ -265,6 +289,8 @@ public Response run() throws IOException { @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response getRoot( @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT) final GetOpParam op, @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) @@ -274,7 +300,7 @@ public Response getRoot( @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT) final BufferSizeParam bufferSize ) throws IOException, InterruptedException { - return get(ugi, ROOT, op, offset, length, bufferSize); + return get(ugi, delegation, ROOT, op, offset, length, bufferSize); } /** Handle HTTP GET request. */ @@ -283,6 +309,8 @@ public Response getRoot( @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) public Response get( @Context final UserGroupInformation ugi, + @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT) + final DelegationParam delegation, @PathParam(UriFsPathParam.NAME) final UriFsPathParam path, @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT) final GetOpParam op, @@ -294,13 +322,7 @@ public Response get( final BufferSizeParam bufferSize ) throws IOException, InterruptedException { - if (LOG.isTraceEnabled()) { - LOG.trace(op + ": " + path + ", ugi=" + ugi - + Param.toSortedString(", ", offset, length, bufferSize)); - } - - //clear content type - response.setContentType(null); + init(ugi, delegation, path, op, offset, length, bufferSize); return ugi.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 963538f506..19569bd230 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -154,7 +154,7 @@ private Token generateDelegationToken( namenode, ugi, renewer != null? renewer: ugi.getShortUserName()); final Token t = c.getAllTokens().iterator().next(); t.setKind(WebHdfsFileSystem.TOKEN_KIND); - SecurityUtil.setTokenService(t, namenode.getNameNodeAddress()); + SecurityUtil.setTokenService(t, namenode.getHttpAddress()); return t; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 1699d86db5..21a7a97ff4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -114,17 +114,24 @@ public class WebHdfsFileSystem extends FileSystem private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator(); /** Delegation token kind */ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); + /** Token selector */ + public static final AbstractDelegationTokenSelector DT_SELECTOR + = new AbstractDelegationTokenSelector(TOKEN_KIND) {}; - private static final DelegationTokenRenewer dtRenewer - = new DelegationTokenRenewer(WebHdfsFileSystem.class); - static { - dtRenewer.start(); + private static DelegationTokenRenewer DT_RENEWER = null; + + private static synchronized void addRenewAction(final WebHdfsFileSystem webhdfs) { + if (DT_RENEWER == null) { + DT_RENEWER = new DelegationTokenRenewer(WebHdfsFileSystem.class); + DT_RENEWER.start(); + } + + DT_RENEWER.addRenewAction(webhdfs); } private final UserGroupInformation ugi; private InetSocketAddress nnAddr; private Token delegationToken; - private Token renewToken; private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); private Path workingDir; @@ -153,8 +160,7 @@ public synchronized void initialize(URI uri, Configuration conf protected void initDelegationToken() throws IOException { // look for webhdfs token, then try hdfs final Text serviceName = SecurityUtil.buildTokenService(nnAddr); - Token token = webhdfspTokenSelector.selectToken( - serviceName, ugi.getTokens()); + Token token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens()); if (token == null) { token = DelegationTokenSelector.selectHdfsDelegationToken( nnAddr, ugi, getConf()); @@ -171,7 +177,7 @@ protected void initDelegationToken() throws IOException { if (token != null) { setDelegationToken(token); if (createdToken) { - dtRenewer.addRenewAction(this); + addRenewAction(this); LOG.debug("Created new DT for " + token.getService()); } else { LOG.debug("Found existing DT for " + token.getService()); @@ -652,23 +658,14 @@ public List> getDelegationTokens(final String renewer @Override public Token getRenewToken() { - return renewToken; + return delegationToken; } @Override public void setDelegationToken( final Token token) { synchronized(this) { - renewToken = token; - // emulate the 203 usage of the tokens - // by setting the kind and service as if they were hdfs tokens - delegationToken = new Token(token); - // NOTE: the remote nn must be configured to use hdfs - delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); - // no need to change service because we aren't exactly sure what it - // should be. we can guess, but it might be wrong if the local conf - // value is incorrect. the service is a client side field, so the remote - // end does not care about the value + delegationToken = token; } } @@ -728,15 +725,6 @@ public MD5MD5CRC32FileChecksum getFileChecksum(final Path p return JsonUtil.toMD5MD5CRC32FileChecksum(m); } - private static final DtSelector webhdfspTokenSelector = new DtSelector(); - - private static class DtSelector - extends AbstractDelegationTokenSelector { - private DtSelector() { - super(TOKEN_KIND); - } - } - /** Delegation token renewer. */ public static class DtRenewer extends TokenRenewer { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java index 74070243c0..67bb5f37fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java @@ -53,7 +53,7 @@ public UserGroupInformation getValue(final HttpContext context) { return JspHelper.getUGI(servletcontext, request, conf, AuthenticationMethod.KERBEROS, false); } catch (IOException e) { - throw new RuntimeException(e); + throw new SecurityException("Failed to obtain user group information.", e); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 7cae2d6454..ec90146d60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -75,7 +75,7 @@ public void testDelegationTokenInUrl() throws IOException { + "&token=" + tokenString, renewTokenUrl.getQuery()); Token delegationToken = new Token( token); - delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); + delegationToken.setKind(WebHdfsFileSystem.TOKEN_KIND); Assert.assertEquals( generateUrlQueryPrefix(PutOpParam.Op.CANCELDELEGATIONTOKEN, ugi.getUserName())