diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 22c54344f8..6ef6329e30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -110,6 +110,9 @@ Trunk (unreleased changes) HDFS-2814 NamenodeMXBean does not account for svn revision in the version information. (Hitesh Shah via jitendra) + HDFS-2784. Update hftp and hdfs for host-based token support. + (Kihwal Lee via jitendra) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the namenode state. (Tomasz Nykiel via hairong) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index e52ef995f0..be52b48f11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -631,7 +631,7 @@ public long renew(Token token, Configuration conf) throws IOException { DelegationTokenIdentifier.stringifyToken(delToken)); ClientProtocol nn = DFSUtil.createNamenode - (NameNode.getAddress(token.getService().toString()), + (SecurityUtil.getTokenServiceAddr(delToken), conf, UserGroupInformation.getCurrentUser()); try { return nn.renewDelegationToken(delToken); @@ -649,7 +649,7 @@ public void cancel(Token token, Configuration conf) throws IOException { LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(delToken)); ClientProtocol nn = DFSUtil.createNamenode( - NameNode.getAddress(token.getService().toString()), conf, + SecurityUtil.getTokenServiceAddr(delToken), conf, UserGroupInformation.getCurrentUser()); try { nn.cancelDelegationToken(delToken); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 9dd6dfd37e..119bca9b55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -108,45 +108,10 @@ public void initialize(URI uri, Configuration conf) throws IOException { InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); - this.uri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + uri.getAuthority()); + this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority()); this.workingDir = getHomeDirectory(); } - /** Permit paths which explicitly specify the default port. */ - @Override - protected void checkPath(Path path) { - URI thisUri = this.getUri(); - URI thatUri = path.toUri(); - String thatAuthority = thatUri.getAuthority(); - if (thatUri.getScheme() != null - && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) - && thatUri.getPort() == NameNode.DEFAULT_PORT - && (thisUri.getPort() == -1 || - thisUri.getPort() == NameNode.DEFAULT_PORT) - && thatAuthority.substring(0,thatAuthority.indexOf(":")) - .equalsIgnoreCase(thisUri.getAuthority())) - return; - super.checkPath(path); - } - - /** Normalize paths that explicitly specify the default port. */ - @Override - public Path makeQualified(Path path) { - URI thisUri = this.getUri(); - URI thatUri = path.toUri(); - String thatAuthority = thatUri.getAuthority(); - if (thatUri.getScheme() != null - && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) - && thatUri.getPort() == NameNode.DEFAULT_PORT - && thisUri.getPort() == -1 - && thatAuthority.substring(0,thatAuthority.indexOf(":")) - .equalsIgnoreCase(thisUri.getAuthority())) { - path = new Path(thisUri.getScheme(), thisUri.getAuthority(), - thatUri.getPath()); - } - return super.makeQualified(path); - } - @Override public Path getWorkingDirectory() { return workingDir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 7c56d78058..8fe8cba60d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -59,6 +59,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenRenewer; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.ServletUtil; import org.xml.sax.Attributes; @@ -89,17 +90,20 @@ public class HftpFileSystem extends FileSystem public static final Text TOKEN_KIND = new Text("HFTP delegation"); - private String nnHttpUrl; - private Text hdfsServiceName; + protected UserGroupInformation ugi; private URI hftpURI; + protected InetSocketAddress nnAddr; - protected UserGroupInformation ugi; + protected InetSocketAddress nnSecureAddr; public static final String HFTP_TIMEZONE = "UTC"; public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; + private Token delegationToken; private Token renewToken; - + private static final HftpDelegationTokenSelector hftpTokenSelector = + new HftpDelegationTokenSelector(); + public static final SimpleDateFormat getDateFormat() { final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT); df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE)); @@ -115,11 +119,8 @@ protected SimpleDateFormat initialValue() { @Override protected int getDefaultPort() { - return getDefaultSecurePort(); - - //TODO: un-comment the following once HDFS-7510 is committed. -// return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, -// DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); + return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); } protected int getDefaultSecurePort() { @@ -127,16 +128,22 @@ protected int getDefaultSecurePort() { DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); } - @Override - public String getCanonicalServiceName() { - return SecurityUtil.buildDTServiceName(hftpURI, getDefaultPort()); - } - - private String buildUri(String schema, String host, int port) { - StringBuilder sb = new StringBuilder(schema); - return sb.append(host).append(":").append(port).toString(); + protected InetSocketAddress getNamenodeAddr(URI uri) { + // use authority so user supplied uri can override port + return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); } + protected InetSocketAddress getNamenodeSecureAddr(URI uri) { + // must only use the host and the configured https port + return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort()); + } + + @Override + public String getCanonicalServiceName() { + // unlike other filesystems, hftp's service is the secure port, not the + // actual port in the uri + return SecurityUtil.buildTokenService(nnSecureAddr).toString(); + } @Override public void initialize(final URI name, final Configuration conf) @@ -144,95 +151,51 @@ public void initialize(final URI name, final Configuration conf) super.initialize(name, conf); setConf(conf); this.ugi = UserGroupInformation.getCurrentUser(); - nnAddr = NetUtils.createSocketAddr(name.toString()); - - // in case we open connection to hftp of a different cluster - // we need to know this cluster https port - // if it is not set we assume it is the same cluster or same port - int urlPort = conf.getInt("dfs.hftp.https.port", -1); - if(urlPort == -1) - urlPort = conf.getInt(DFSConfigKeys.DFS_HTTPS_PORT_KEY, - DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT); - - String normalizedNN = NetUtils.normalizeHostName(name.getHost()); - nnHttpUrl = buildUri("https://", normalizedNN ,urlPort); - LOG.debug("using url to get DT:" + nnHttpUrl); + this.nnAddr = getNamenodeAddr(name); + this.nnSecureAddr = getNamenodeSecureAddr(name); try { - hftpURI = new URI(buildUri("hftp://", normalizedNN, urlPort)); - } catch (URISyntaxException ue) { - throw new IOException("bad uri for hdfs", ue); - } - - // if one uses RPC port different from the Default one, - // one should specify what is the setvice name for this delegation token - // otherwise it is hostname:RPC_PORT - String key = DelegationTokenSelector.SERVICE_NAME_KEY - + SecurityUtil.buildDTServiceName(name, - DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT); - if(LOG.isDebugEnabled()) { - LOG.debug("Trying to find DT for " + name + " using key=" + key + - "; conf=" + conf.get(key, "")); - } - String nnServiceName = conf.get(key); - int nnPort = NameNode.DEFAULT_PORT; - if (nnServiceName != null) { // get the real port - nnPort = NetUtils.createSocketAddr(nnServiceName, - NameNode.DEFAULT_PORT).getPort(); - } - try { - URI hdfsURI = new URI("hdfs://" + normalizedNN + ":" + nnPort); - hdfsServiceName = new Text(SecurityUtil.buildDTServiceName(hdfsURI, - nnPort)); - } catch (URISyntaxException ue) { - throw new IOException("bad uri for hdfs", ue); + this.hftpURI = new URI(name.getScheme(), name.getAuthority(), + null, null, null); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); } if (UserGroupInformation.isSecurityEnabled()) { - //try finding a token for this namenode (esp applicable for tasks - //using hftp). If there exists one, just set the delegationField - String hftpServiceName = getCanonicalServiceName(); - for (Token t : ugi.getTokens()) { - Text kind = t.getKind(); - if (DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind)) { - if (t.getService().equals(hdfsServiceName)) { - setDelegationToken(t); - break; - } - } else if (TOKEN_KIND.equals(kind)) { - if (hftpServiceName - .equals(normalizeService(t.getService().toString()))) { - setDelegationToken(t); - break; - } - } - } - - //since we don't already have a token, go get one over https - if (delegationToken == null) { - setDelegationToken(getDelegationToken(null)); + initDelegationToken(); + } + } + + protected void initDelegationToken() throws IOException { + // look for hftp token, then try hdfs + Token token = selectHftpDelegationToken(); + if (token == null) { + token = selectHdfsDelegationToken(); + } + + // if we don't already have a token, go get one over https + boolean createdToken = false; + if (token == null) { + token = getDelegationToken(null); + createdToken = (token != null); + } + + // we already had a token or getDelegationToken() didn't fail. + if (token != null) { + setDelegationToken(token); + if (createdToken) { dtRenewer.addRenewAction(this); + LOG.debug("Created new DT for " + token.getService()); + } else { + LOG.debug("Found existing DT for " + token.getService()); } } } - private String normalizeService(String service) { - int colonIndex = service.indexOf(':'); - if (colonIndex == -1) { - throw new IllegalArgumentException("Invalid service for hftp token: " + - service); - } - String hostname = - NetUtils.normalizeHostName(service.substring(0, colonIndex)); - String port = service.substring(colonIndex + 1); - return hostname + ":" + port; + protected Token selectHftpDelegationToken() { + Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr); + return hftpTokenSelector.selectToken(serviceName, ugi.getTokens()); } - //TODO: un-comment the following once HDFS-7510 is committed. -// protected Token selectHftpDelegationToken() { -// Text serviceName = SecurityUtil.buildTokenService(nnSecureAddr); -// return hftpTokenSelector.selectToken(serviceName, ugi.getTokens()); -// } - protected Token selectHdfsDelegationToken() { return DelegationTokenSelector.selectHdfsDelegationToken( nnAddr, ugi, getConf()); @@ -245,13 +208,17 @@ public Token getRenewToken() { } @Override - public void setDelegationToken(Token token) { + public synchronized void setDelegationToken(Token token) { renewToken = token; // emulate the 203 usage of the tokens // by setting the kind and service as if they were hdfs tokens delegationToken = new Token(token); + // NOTE: the remote nn must be configured to use hdfs delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); - delegationToken.setService(hdfsServiceName); + // no need to change service because we aren't exactly sure what it + // should be. we can guess, but it might be wrong if the local conf + // value is incorrect. the service is a client side field, so the remote + // end does not care about the value } @Override @@ -262,6 +229,7 @@ public synchronized Token getDelegationToken(final String renewer ugi.reloginFromKeytab(); return ugi.doAs(new PrivilegedExceptionAction>() { public Token run() throws IOException { + final String nnHttpUrl = DFSUtil.createUri("https", nnSecureAddr).toString(); Credentials c; try { c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); @@ -291,12 +259,7 @@ public Token run() throws IOException { @Override public URI getUri() { - try { - return new URI("hftp", null, nnAddr.getHostName(), nnAddr.getPort(), - null, null, null); - } catch (URISyntaxException e) { - return null; - } + return hftpURI; } /** @@ -722,11 +685,12 @@ public boolean isManaged(Token token) throws IOException { public long renew(Token token, Configuration conf) throws IOException { // update the kerberos credentials, if they are coming from a keytab - UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); + UserGroupInformation.getLoginUser().reloginFromKeytab(); // use https to renew the token + InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); return DelegationTokenFetcher.renewDelegationToken - ("https://" + token.getService().toString(), + (DFSUtil.createUri("https", serviceAddr).toString(), (Token) token); } @@ -737,10 +701,18 @@ public void cancel(Token token, // update the kerberos credentials, if they are coming from a keytab UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); // use https to cancel the token + InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); DelegationTokenFetcher.cancelDelegationToken - ("https://" + token.getService().toString(), + (DFSUtil.createUri("https", serviceAddr).toString(), (Token) token); + } + } + + private static class HftpDelegationTokenSelector + extends AbstractDelegationTokenSelector { + + public HftpDelegationTokenSelector() { + super(TOKEN_KIND); } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index fa1a89f4b9..97e3b2414a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -21,6 +21,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.net.HttpURLConnection; +import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; @@ -120,6 +121,16 @@ private static void setupSsl(Configuration conf) throws IOException { } } + @Override + protected int getDefaultPort() { + return getDefaultSecurePort(); + } + + @Override + protected InetSocketAddress getNamenodeSecureAddr(URI uri) { + return getNamenodeAddr(uri); + } + @Override protected HttpURLConnection openConnection(String path, String query) throws IOException { @@ -161,16 +172,6 @@ protected HttpURLConnection openConnection(String path, String query) return (HttpURLConnection) conn; } - @Override - public URI getUri() { - try { - return new URI("hsftp", null, nnAddr.getHostName(), nnAddr.getPort(), - null, null, null); - } catch (URISyntaxException e) { - return null; - } - } - /** * Dummy hostname verifier that is used to bypass hostname checking */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 8f8ef8e067..b5f24d1855 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; @@ -296,8 +297,7 @@ public static Credentials createCredentials(final NameNode namenode, } final InetSocketAddress addr = namenode.getNameNodeAddress(); - final String s = addr.getAddress().getHostAddress() + ":" + addr.getPort(); - token.setService(new Text(s)); + SecurityUtil.setTokenService(token, addr); final Credentials c = new Credentials(); c.addToken(new Text(ugi.getShortUserName()), token); return c; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index b20e694acf..bfc7e355cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -62,6 +62,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.util.KerberosName; @@ -492,7 +493,7 @@ public static UserGroupInformation getDefaultWebUser(Configuration conf return UserGroupInformation.createRemoteUser(strings[0]); } - private static String getNNServiceAddress(ServletContext context, + private static InetSocketAddress getNNServiceAddress(ServletContext context, HttpServletRequest request) { String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS); InetSocketAddress namenodeAddress = null; @@ -503,8 +504,7 @@ private static String getNNServiceAddress(ServletContext context, context); } if (namenodeAddress != null) { - return (namenodeAddress.getAddress().getHostAddress() + ":" - + namenodeAddress.getPort()); + return namenodeAddress; } return null; } @@ -547,9 +547,9 @@ public static UserGroupInformation getUGI(ServletContext context, Token token = new Token(); token.decodeFromUrlString(tokenString); - String serviceAddress = getNNServiceAddress(context, request); + InetSocketAddress serviceAddress = getNNServiceAddress(context, request); if (serviceAddress != null) { - token.setService(new Text(serviceAddress)); + SecurityUtil.setTokenService(token, serviceAddress); token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); } ByteArrayInputStream buf = new ByteArrayInputStream(token diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 2a685f73bd..35537a5782 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -25,6 +25,7 @@ import java.io.InputStreamReader; import java.io.PrintStream; import java.net.HttpURLConnection; +import java.net.InetSocketAddress; import java.net.URL; import java.net.URLConnection; import java.security.PrivilegedExceptionAction; @@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -204,7 +206,8 @@ public Object run() throws Exception { static public Credentials getDTfromRemote(String nnAddr, String renewer) throws IOException { DataInputStream dis = null; - + InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnAddr); + try { StringBuffer url = new StringBuffer(); if (renewer != null) { @@ -229,9 +232,7 @@ static public Credentials getDTfromRemote(String nnAddr, ts.readFields(dis); for(Token token: ts.getAllTokens()) { token.setKind(HftpFileSystem.TOKEN_KIND); - token.setService(new Text(SecurityUtil.buildDTServiceName - (remoteURL.toURI(), - DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT))); + SecurityUtil.setTokenService(token, serviceAddr); } return ts; } catch (Exception e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 55cc4ade8b..8b7f64ffad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -882,6 +882,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); + //NOTE: the following is true if and only if: + // hadoop.security.token.service.use_ip=true //since the HDFS does things based on IP:port, we need to add the mapping //for IP:port to rackId String ipAddr = dn.getSelfAddr().getAddress().getHostAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index ac639bd939..3b617c71d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.net.URISyntaxException; +import java.net.URI; import java.net.URL; import java.net.HttpURLConnection; import java.util.Random; @@ -232,4 +233,164 @@ public void testSeek() throws IOException { in.seek(7); assertEquals('7', in.read()); } + + public void resetFileSystem() throws IOException { + // filesystem caching has a quirk/bug that it caches based on the user's + // given uri. the result is if a filesystem is instantiated with no port, + // it gets the default port. then if the default port is changed, + // and another filesystem is instantiated with no port, the prior fs + // is returned, not a new one using the changed port. so let's flush + // the cache between tests... + FileSystem.closeAll(); + } + + @Test + public void testHftpDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hftp://localhost"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hftp://localhost"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(123, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomUriPortWithDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hftp://localhost:123"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hftp://localhost:789"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(123, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + /// + + @Test + public void testHsftpDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hsftp://localhost"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hsftp://localhost"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(456, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomUriPortWithDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hsftp://localhost:123"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:123", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hsftp://localhost:789"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(456, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:789", + fs.getCanonicalServiceName() + ); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml new file mode 100644 index 0000000000..98910049ab --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/ssl-client.xml @@ -0,0 +1,26 @@ + + + + + + + + ssl.client.do.not.authenticate.server + true + +