diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index 71a9f6f572..04f8127883 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -151,7 +151,7 @@ public void testDisableLazyPersistFileScrubber() /** * If NN restarted then lazyPersist files should not deleted */ - @Test + @Test(timeout = 20000) public void testFileShouldNotDiscardedIfNNRestarted() throws IOException, InterruptedException, TimeoutException { getClusterBuilder().setRamDiskReplicaCapacity(2).build(); @@ -165,13 +165,12 @@ public void testFileShouldNotDiscardedIfNNRestarted() cluster.restartNameNodes(); // wait for the redundancy monitor to mark the file as corrupt. - Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000); - - Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode() - .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator()); - - // Check block detected as corrupted - assertThat(corruptBlkCount, is(1L)); + Long corruptBlkCount; + do { + Thread.sleep(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000); + corruptBlkCount = (long) Iterators.size(cluster.getNameNode() + .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator()); + } while (corruptBlkCount != 1L); // Ensure path1 exist. Assert.assertTrue(fs.exists(path1));