From 09998741ac3b425d95a6ba47865414b629ef1252 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Tue, 8 Oct 2013 04:26:01 +0000 Subject: [PATCH] HDFS-5316. Namenode ignores the default https port. Contributed by Haohui Mai git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1530150 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/namenode/NameNodeHttpServer.java | 20 ++++++++++++------- .../hadoop/hdfs/TestNameNodeHttpServer.java | 8 ++++---- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 809017c0dd..7174e3af8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -351,6 +351,9 @@ Release 2.2.1 - UNRELEASED HDFS-5317. Go back to DFS Home link does not work on datanode webUI (Haohui Mai via brandonli) + HDFS-5316. Namenode ignores the default https port (Haohui Mai via + brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 620e4ffb33..b5afd5f3dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -52,6 +52,7 @@ public class NameNodeHttpServer { private final NameNode nn; private InetSocketAddress httpAddress; + private InetSocketAddress httpsAddress; private InetSocketAddress bindAddress; public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address"; @@ -99,14 +100,15 @@ public void start() throws IOException { boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); if (certSSL) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); - InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get( - DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, "0")); + httpsAddress = NetUtils.createSocketAddr(conf.get( + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT)); + Configuration sslConf = new Configuration(false); - if (certSSL) { - sslConf.addResource(conf.get(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - "ssl-server.xml")); - } - httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); + sslConf.addResource(conf.get( + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); + httpServer.addSslListener(httpsAddress, sslConf, needClientAuth); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 50475)); @@ -163,6 +165,10 @@ public InetSocketAddress getHttpAddress() { return httpAddress; } + public InetSocketAddress getHttpsAddress() { + return httpsAddress; + } + /** * Sets fsimage for use by servlets. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java index 72cd92b3c1..572f1d4ab5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java @@ -31,9 +31,9 @@ public void testSslConfiguration() throws IOException { System.setProperty("jetty.ssl.password", "foo"); System.setProperty("jetty.ssl.keypassword", "bar"); - MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(0).build(); - dfsCluster.waitActive(); - dfsCluster.shutdown(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + .build(); + + cluster.shutdown(); } }