diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5814821b0e..78c1d370f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -598,6 +598,9 @@ Release 2.5.0 - UNRELEASED HDFS-6227. ShortCircuitCache#unref should purge ShortCircuitReplicas whose streams have been closed by java interrupts. (Colin Patrick McCabe via jing9) + HDFS-6442. Fix TestEditLogAutoroll and TestStandbyCheckpoints failure + caused by port conficts. (Zesheng Wu via Arpit Agarwal) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java index bea5ee4e56..3a0ae5c5f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java @@ -17,11 +17,16 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.net.BindException; +import java.util.Random; + import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,6 +48,9 @@ public class TestEditLogAutoroll { private NameNode nn0; private FileSystem fs; private FSEditLog editLog; + private final Random random = new Random(); + + private static final Log LOG = LogFactory.getLog(TestEditLog.class); @Before public void setUp() throws Exception { @@ -54,24 +62,35 @@ public void setUp() throws Exception { conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f); conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); - nn0 = cluster.getNameNode(0); - fs = HATestUtil.configureFailoverFs(cluster, conf); + nn0 = cluster.getNameNode(0); + fs = HATestUtil.configureFailoverFs(cluster, conf); - cluster.transitionToActive(0); + cluster.transitionToActive(0); - fs = cluster.getFileSystem(0); - editLog = nn0.getNamesystem().getEditLog(); + fs = cluster.getFileSystem(0); + editLog = nn0.getNamesystem().getEditLog(); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 2b5e89af8b..fd579026e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -25,12 +25,14 @@ import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.net.BindException; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; import java.net.URI; import java.net.URL; import java.util.List; +import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -73,6 +75,7 @@ public class TestStandbyCheckpoints { protected MiniDFSCluster cluster; protected NameNode nn0, nn1; protected FileSystem fs; + private final Random random = new Random(); protected File tmpOivImgDir; private static final Log LOG = LogFactory.getLog(TestStandbyCheckpoints.class); @@ -87,22 +90,33 @@ public void setupCluster() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); - MiniDFSNNTopology topology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); - - cluster = new MiniDFSCluster.Builder(conf) - .nnTopology(topology) - .numDataNodes(0) - .build(); - cluster.waitActive(); - - nn0 = cluster.getNameNode(0); - nn1 = cluster.getNameNode(1); - fs = HATestUtil.configureFailoverFs(cluster, conf); + int retryCount = 0; + while (true) { + try { + int basePort = 10060 + random.nextInt(100) * 2; + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf("ns1") + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); - cluster.transitionToActive(0); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(topology) + .numDataNodes(0) + .build(); + cluster.waitActive(); + + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + fs = HATestUtil.configureFailoverFs(cluster, conf); + + cluster.transitionToActive(0); + ++retryCount; + break; + } catch (BindException e) { + LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + + retryCount + " times"); + } + } } protected Configuration setupCommonConfig() {