From 99cf2ecee9c19231dea3620c053b2d8d71812fd6 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Tue, 29 Dec 2015 10:56:59 -0800 Subject: [PATCH] HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException. Contributed by Xiao Chen. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/namenode/NameNode.java | 16 +++++++++ .../server/namenode/NameNodeHttpServer.java | 9 +++++ .../apache/hadoop/hdfs/MiniDFSCluster.java | 35 ++++++++++--------- .../hdfs/server/namenode/TestStartup.java | 2 ++ 5 files changed, 49 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d5f51ff2a7..9da92b2e4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2547,6 +2547,9 @@ Release 2.8.0 - UNRELEASED HDFS-9458. TestBackupNode always binds to port 50070, which can cause bind failures. (Xiao Chen via cnauroth) + HDFS-7553. fix the TestDFSUpgradeWithHA due to BindException. + (Xiao Chen via cnauroth) + Release 2.7.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index acb4069c5c..97c8b26362 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1021,6 +1021,22 @@ public InetSocketAddress getHttpsAddress() { return httpServer.getHttpsAddress(); } + /** + * @return NameNodeHttpServer, used by unit tests to ensure a full shutdown, + * so that no bind exception is thrown during restart. + */ + @VisibleForTesting + public void joinHttpServer() { + if (httpServer != null) { + try { + httpServer.join(); + } catch (InterruptedException e) { + LOG.info("Caught InterruptedException joining NameNodeHttpServer", e); + Thread.currentThread().interrupt(); + } + } + } + /** * Verify that configured directories exist, then * Interactively confirm that formatting is desired diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 6bd9868971..8f112bd6a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -194,6 +194,15 @@ private Map getAuthFilterParams(Configuration conf) return params; } + /** + * Joins the httpserver. + */ + public void join() throws InterruptedException { + if (httpServer != null) { + httpServer.join(); + } + } + void stop() throws Exception { if (httpServer != null) { httpServer.stop(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 3e25177b00..63561fe690 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1908,12 +1908,7 @@ public void shutdown(boolean deleteDfsDir, boolean closeFileSystem) { shutdownDataNodes(); for (NameNodeInfo nnInfo : namenodes.values()) { if (nnInfo == null) continue; - NameNode nameNode = nnInfo.nameNode; - if (nameNode != null) { - nameNode.stop(); - nameNode.join(); - nameNode = null; - } + stopAndJoinNameNode(nnInfo.nameNode); } ShutdownHookManager.get().clearShutdownHooks(); if (base_dir != null) { @@ -1953,17 +1948,25 @@ public synchronized void shutdownNameNodes() { */ public synchronized void shutdownNameNode(int nnIndex) { NameNodeInfo info = getNN(nnIndex); - NameNode nn = info.nameNode; - if (nn != null) { - LOG.info("Shutting down the namenode"); - nn.stop(); - nn.join(); - info.nnId = null; - info.nameNode = null; - info.nameserviceId = null; - } + stopAndJoinNameNode(info.nameNode); + info.nnId = null; + info.nameNode = null; + info.nameserviceId = null; } - + + /** + * Fully stop the NameNode by stop and join. + */ + private void stopAndJoinNameNode(NameNode nn) { + if (nn == null) { + return; + } + LOG.info("Shutting down the namenode"); + nn.stop(); + nn.join(); + nn.joinHttpServer(); + } + /** * Restart all namenodes. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 20dda37268..860a2c9ae0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -444,6 +444,7 @@ public void testCompression() throws IOException { nnRpc.saveNamespace(0, 0); namenode.stop(); namenode.join(); + namenode.joinHttpServer(); // compress image using default codec LOG.info("Read an uncomressed image and store it compressed using default codec."); @@ -474,6 +475,7 @@ private void checkNameSpace(Configuration conf) throws IOException { nnRpc.saveNamespace(0, 0); namenode.stop(); namenode.join(); + namenode.joinHttpServer(); } @Test