HDFS-5073. TestListCorruptFileBlocks fails intermittently.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543303 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
70234e2213
commit
c3c626d32b
@ -504,6 +504,8 @@ Release 2.3.0 - UNRELEASED
|
||||
|
||||
HDFS-5393. Serve bootstrap and jQuery locally. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5073. TestListCorruptFileBlocks fails intermittently. (Arpit Agarwal)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
||||
|
@ -442,6 +442,7 @@ public void testlistCorruptFileBlocksDFS() throws Exception {
|
||||
|
||||
/**
|
||||
* Test if NN.listCorruptFiles() returns the right number of results.
|
||||
* The corrupt blocks are detected by the BlockPoolSliceScanner.
|
||||
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
|
||||
* calls.
|
||||
*/
|
||||
@ -450,7 +451,6 @@ public void testMaxCorruptFiles() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 15); // datanode scans directories
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
@ -491,6 +491,13 @@ public void testMaxCorruptFiles() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
// Occasionally the BlockPoolSliceScanner can run before we have removed
|
||||
// the blocks. Restart the Datanode to trigger the scanner into running
|
||||
// once more.
|
||||
LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
|
||||
badFiles =
|
||||
namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user