From 7ba5913797c49d5001ad95558eadd119c3361060 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 17 Jul 2014 23:11:27 +0000 Subject: [PATCH] HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails with Client cannot authenticate via:[TOKEN, KERBEROS] error. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1611508 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../java/org/apache/hadoop/hdfs/HAUtil.java | 35 +++++++++++-------- .../apache/hadoop/hdfs/NameNodeProxies.java | 6 ++-- .../hadoop/hdfs/protocol/HdfsConstants.java | 2 +- .../web/resources/DatanodeWebHdfsMethods.java | 3 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 2 +- .../ha/TestDelegationTokensWithHA.java | 10 ++++-- 7 files changed, 39 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 405ecdbe29..00d1e93b68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -326,6 +326,9 @@ Release 2.6.0 - UNRELEASED HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb ) + HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails + with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 250d41c5cb..90acedea12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -38,14 +37,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; -import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -259,12 +257,11 @@ public static boolean useLogicalUri(Configuration conf, URI nameNodeUri) /** * Parse the file system URI out of the provided token. */ - public static URI getServiceUriFromToken(final String scheme, - Token token) { + public static URI getServiceUriFromToken(final String scheme, Token token) { String tokStr = token.getService().toString(); - - if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) { - tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, ""); + final String prefix = buildTokenServicePrefixForLogicalUri(scheme); + if (tokStr.startsWith(prefix)) { + tokStr = tokStr.replaceFirst(prefix, ""); } return URI.create(scheme + "://" + tokStr); } @@ -273,10 +270,13 @@ public static URI getServiceUriFromToken(final String scheme, * Get the service name used in the delegation token for the given logical * HA service. * @param uri the logical URI of the cluster + * @param scheme the scheme of the corresponding FileSystem * @return the service name */ - public static Text buildTokenServiceForLogicalUri(URI uri) { - return new Text(HA_DT_SERVICE_PREFIX + uri.getHost()); + public static Text buildTokenServiceForLogicalUri(final URI uri, + final String scheme) { + return new Text(buildTokenServicePrefixForLogicalUri(scheme) + + uri.getHost()); } /** @@ -286,7 +286,11 @@ public static Text buildTokenServiceForLogicalUri(URI uri) { public static boolean isTokenForLogicalUri(Token token) { return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); } - + + public static String buildTokenServicePrefixForLogicalUri(String scheme) { + return HA_DT_SERVICE_PREFIX + scheme + ":"; + } + /** * Locate a delegation token associated with the given HA cluster URI, and if * one is found, clone it to also represent the underlying namenode address. @@ -298,7 +302,9 @@ public static boolean isTokenForLogicalUri(Token token) { public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, Collection nnAddrs) { - Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); + // this cloning logic is only used by hdfs + Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME); Token haToken = tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { @@ -309,8 +315,9 @@ public static void cloneDelegationTokenForLogicalUri( Token specificToken = new Token.PrivateToken(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); - Text alias = - new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService()); + Text alias = new Text( + buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index ab8f3dc7bd..17653345ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -163,7 +163,8 @@ public static ProxyAndInfo createProxy(Configuration conf, Text dtService; if (failoverProxyProvider.useLogicalURI()) { - dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); + dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, + HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); @@ -224,7 +225,8 @@ public static ProxyAndInfo createProxyWithLossyRetryHandler( new Class[] { xface }, dummyHandler); Text dtService; if (failoverProxyProvider.useLogicalURI()) { - dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); + dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, + HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 7cc8c31880..77fe543784 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -124,7 +124,7 @@ public static enum DatanodeReportType { * of a delgation token, indicating that the URI is a logical (HA) * URI. */ - public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:"; + public static final String HA_DT_SERVICE_PREFIX = "ha-"; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 83de6ebe41..51731c8d01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -128,7 +128,8 @@ private void init(final UserGroupInformation ugi, "://" + nnId); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); if (isLogical) { - token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri)); + token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri, + HdfsConstants.HDFS_URI_SCHEME)); } else { token.setService(SecurityUtil.buildTokenService(nnUri)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 94c666a3a1..6eb09f6134 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -158,7 +158,7 @@ public synchronized void initialize(URI uri, Configuration conf // getCanonicalUri() in order to handle the case where no port is // specified in the URI this.tokenServiceName = isLogicalUri ? - HAUtil.buildTokenServiceForLogicalUri(uri) + HAUtil.buildTokenServiceForLogicalUri(uri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri()); if (!isHA) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 46059520f8..b2cc9197aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; @@ -299,7 +300,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception { UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test"); URI haUri = new URI("hdfs://my-ha-uri/"); - token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri)); + token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME)); ugi.addToken(token); Collection nnAddrs = new HashSet(); @@ -355,7 +357,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception { @Test public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri = HATestUtil.getLogicalUri(cluster); - String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString(); + String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, + HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService, dfs.getCanonicalServiceName()); final String renewer = UserGroupInformation.getCurrentUser().getShortUserName(); final Token token = @@ -371,7 +374,8 @@ public void testHdfsGetCanonicalServiceName() throws Exception { Configuration conf = dfs.getConf(); URI haUri = HATestUtil.getLogicalUri(cluster); AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); - String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString(); + String haService = HAUtil.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService, afs.getCanonicalServiceName()); Token token = afs.getDelegationTokens( UserGroupInformation.getCurrentUser().getShortUserName()).get(0);