From c3c626d32b0158f7776af00ab67df01b84512bac Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 19 Nov 2013 04:07:28 +0000 Subject: [PATCH] HDFS-5073. TestListCorruptFileBlocks fails intermittently. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543303 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hdfs/server/namenode/TestListCorruptFileBlocks.java | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 825d3a8c9e..a5d3312618 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -504,6 +504,8 @@ Release 2.3.0 - UNRELEASED HDFS-5393. Serve bootstrap and jQuery locally. (Haohui Mai via jing9) + HDFS-5073. TestListCorruptFileBlocks fails intermittently. (Arpit Agarwal) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index 7c8a712a79..0995e006ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -442,6 +442,7 @@ public void testlistCorruptFileBlocksDFS() throws Exception { /** * Test if NN.listCorruptFiles() returns the right number of results. + * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @@ -450,7 +451,6 @@ public void testMaxCorruptFiles() throws Exception { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 15); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -491,6 +491,13 @@ public void testMaxCorruptFiles() throws Exception { } } + // Occasionally the BlockPoolSliceScanner can run before we have removed + // the blocks. Restart the Datanode to trigger the scanner into running + // once more. + LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); + cluster.restartDataNodes(); + cluster.waitActive(); + badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);