diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6b2b2aed79..7a9e504c43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -181,6 +181,9 @@ Trunk (Unreleased) HDFS-4346. Add SequentialNumber as a base class for INodeId and GenerationStamp. (szetszwo) + HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes + cache expires too quickly (Chris Nauroth via Sanjay) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java index fa88e39457..3574323d54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java @@ -32,6 +32,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.util.ThreadUtil; +import org.junit.After; +import org.junit.Before; import org.junit.Test; @@ -41,10 +43,25 @@ */ public class TestDFSClientExcludedNodes { - @Test(timeout=10000) + private MiniDFSCluster cluster; + private Configuration conf; + + @Before + public void setUp() { + cluster = null; + conf = new HdfsConfiguration(); + } + + @After + public void tearDown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test(timeout=60000) public void testExcludedNodes() throws IOException { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); Path filePath = new Path("/testExcludedNodes"); @@ -67,17 +84,16 @@ public void testExcludedNodes() throws IOException { } } - @Test(timeout=10000) + @Test(timeout=60000) public void testExcludedNodesForgiveness() throws IOException { - Configuration conf = new HdfsConfiguration(); - // Forgive nodes in under 1s for this test case. + // Forgive nodes in under 2.5s for this test case. conf.setLong( DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL, - 1000); + 2500); // We'll be using a 512 bytes block size just for tests // so making sure the checksum bytes too match it. conf.setInt("io.bytes.per.checksum", 512); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); List props = cluster.dataNodes; FileSystem fs = cluster.getFileSystem(); Path filePath = new Path("/testForgivingExcludedNodes"); @@ -112,11 +128,11 @@ public void testExcludedNodesForgiveness() throws IOException { Assert.assertEquals(true, cluster.restartDataNode(two, true)); cluster.waitActive(); - // Sleep for 2s, to let the excluded nodes be expired + // Sleep for 5s, to let the excluded nodes be expired // from the excludes list (i.e. forgiven after the configured wait period). - // [Sleeping just in case the restart of the DNs completed < 2s cause + // [Sleeping just in case the restart of the DNs completed < 5s cause // otherwise, we'll end up quickly excluding those again.] - ThreadUtil.sleepAtLeastIgnoreInterrupts(2000); + ThreadUtil.sleepAtLeastIgnoreInterrupts(5000); // Terminate the last good DN, to assert that there's no // single-DN-available scenario, caused by not forgiving the other