From 6c8b6f3646b31a3e028704bc7fd78bf319f89f0a Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 20 Oct 2015 13:44:53 -0700 Subject: [PATCH] HDFS-3059. ssl-server.xml causes NullPointer. Contributed by Xiao Chen. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 19 ++++ .../server/namenode/SecondaryNameNode.java | 87 +++++++++++-------- .../hadoop/hdfs/TestHDFSServerPorts.java | 1 + 5 files changed, 73 insertions(+), 38 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b583b963b6..6464861ef0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2090,6 +2090,8 @@ Release 2.8.0 - UNRELEASED HDFS-9270. TestShortCircuitLocalRead should not leave socket after unit test (Masatake Iwasaki via Colin P. McCabe) + HDFS-3059. ssl-server.xml causes NullPointer. (Xiao Chen via wang) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index cb05fa9f83..275e638660 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -213,6 +213,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-server.xml"; public static final String DFS_SERVER_HTTPS_KEYPASSWORD_KEY = "ssl.server.keystore.keypassword"; public static final String DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY = "ssl.server.keystore.password"; + public static final String DFS_SERVER_HTTPS_KEYSTORE_LOCATION_KEY = "ssl.server.keystore.location"; + public static final String DFS_SERVER_HTTPS_TRUSTSTORE_LOCATION_KEY = "ssl.server.truststore.location"; public static final String DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY = "ssl.server.truststore.password"; public static final String DFS_NAMENODE_NAME_DIR_RESTORE_KEY = "dfs.namenode.name.dir.restore"; public static final boolean DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 2309843c3f..1c25d018b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1292,6 +1292,9 @@ static String getPassword(Configuration conf, String alias) { } } catch (IOException ioe) { + LOG.warn("Setting password to null since IOException is caught" + + " when getting password", ioe); + password = null; } return password; @@ -1352,6 +1355,22 @@ public static Configuration loadSslConfiguration(Configuration conf) { DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); + final String[] reqSslProps = { + DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_LOCATION_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_LOCATION_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY, + DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY + }; + + // Check if the required properties are included + for (String sslProp : reqSslProps) { + if (sslConf.get(sslProp) == null) { + LOG.warn("SSL config " + sslProp + " is missing. If " + + DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY + + " is specified, make sure it is a relative path"); + } + } + boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index e3e0a7d859..29e332b1ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -252,47 +252,9 @@ private void initialize(final Configuration conf, // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); - - final InetSocketAddress httpAddr = infoSocAddr; - - final String httpsAddrString = conf.getTrimmed( - DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, - DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); - InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); - - HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, - httpAddr, httpsAddr, "secondary", - DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); - nameNodeStatusBeanName = MBeans.register("SecondaryNameNode", "SecondaryNameNodeInfo", this); - infoServer = builder.build(); - - infoServer.setAttribute("secondary.name.node", this); - infoServer.setAttribute("name.system.image", checkpointImage); - infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); - infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, - ImageServlet.class, true); - infoServer.start(); - - LOG.info("Web server init done"); - - HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); - int connIdx = 0; - if (policy.isHttpEnabled()) { - InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++); - conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, - NetUtils.getHostPortString(httpAddress)); - } - - if (policy.isHttpsEnabled()) { - InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx); - conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, - NetUtils.getHostPortString(httpsAddress)); - } - legacyOivImageDir = conf.get( DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY); @@ -501,6 +463,49 @@ private URL getInfoServer() throws IOException { return address.toURL(); } + /** + * Start the web server. + */ + @VisibleForTesting + public void startInfoServer() throws IOException { + final InetSocketAddress httpAddr = getHttpAddress(conf); + final String httpsAddrString = conf.getTrimmed( + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); + InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); + + HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, + httpAddr, httpsAddr, "secondary", DFSConfigKeys. + DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY); + + infoServer = builder.build(); + infoServer.setAttribute("secondary.name.node", this); + infoServer.setAttribute("name.system.image", checkpointImage); + infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); + infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, + ImageServlet.class, true); + infoServer.start(); + + LOG.info("Web server init done"); + + HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); + int connIdx = 0; + if (policy.isHttpEnabled()) { + InetSocketAddress httpAddress = + infoServer.getConnectorAddress(connIdx++); + conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + NetUtils.getHostPortString(httpAddress)); + } + + if (policy.isHttpsEnabled()) { + InetSocketAddress httpsAddress = + infoServer.getConnectorAddress(connIdx); + conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, + NetUtils.getHostPortString(httpsAddress)); + } + } + /** * Create a new checkpoint * @return if the image is fetched from primary or not @@ -680,6 +685,12 @@ public static void main(String[] argv) throws Exception { } if (secondary != null) { + // The web server is only needed when starting SNN as a daemon, + // and not needed if called from shell command. Starting the web server + // from shell may fail when getting credentials, if the environment + // is not set up for it, which is most of the case. + secondary.startInfoServer(); + secondary.startCheckpointThread(); secondary.join(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index ce8a4e75d1..59e85553e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -216,6 +216,7 @@ private boolean canStartSecondaryNode(Configuration conf) throws IOException { org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode sn = null; try { sn = new org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode(conf); + sn.startInfoServer(); } catch(IOException e) { if (e instanceof java.net.BindException) return false;