HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken. Contributed by Andy Isaacson

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1384081 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-09-12 18:36:01 +00:00
parent 80a8d57ffc
commit 2ba149f85c
5 changed files with 28 additions and 8 deletions

View File

@ -783,6 +783,9 @@ Release 2.0.2-alpha - 2012-09-07
HDFS-3833. TestDFSShell fails on windows due to concurrent file HDFS-3833. TestDFSShell fails on windows due to concurrent file
read/write. (Brandon Li via suresh) read/write. (Brandon Li via suresh)
HDFS-3902. TestDatanodeBlockScanner#testBlockCorruptionPolicy is broken.
(Andy Isaacson via eli)
Release 2.0.0-alpha - 05-23-2012 Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -374,7 +374,8 @@ private synchronized void adjustThrottler() {
throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE)); throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
} }
private void verifyBlock(ExtendedBlock block) { @VisibleForTesting
void verifyBlock(ExtendedBlock block) {
BlockSender blockSender = null; BlockSender blockSender = null;
/* In case of failure, attempt to read second time to reduce /* In case of failure, attempt to read second time to reduce

View File

@ -172,7 +172,8 @@ private synchronized int getBlockPoolSetSize() {
return blockPoolScannerMap.size(); return blockPoolScannerMap.size();
} }
private synchronized BlockPoolSliceScanner getBPScanner(String bpid) { @VisibleForTesting
synchronized BlockPoolSliceScanner getBPScanner(String bpid) {
return blockPoolScannerMap.get(bpid); return blockPoolScannerMap.get(bpid);
} }

View File

@ -34,14 +34,19 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test; import org.junit.Test;
/** /**
@ -59,6 +64,10 @@ public class TestDatanodeBlockScanner {
private static Pattern pattern_blockVerify = private static Pattern pattern_blockVerify =
Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)"); Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
static {
((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
}
/** /**
* This connects to datanode and fetches block verification data. * This connects to datanode and fetches block verification data.
* It repeats this until the given block has a verification time > newTime. * It repeats this until the given block has a verification time > newTime.
@ -206,12 +215,12 @@ public void testBlockCorruptionPolicy() throws Exception {
assertTrue(MiniDFSCluster.corruptReplica(1, block)); assertTrue(MiniDFSCluster.corruptReplica(1, block));
assertTrue(MiniDFSCluster.corruptReplica(2, block)); assertTrue(MiniDFSCluster.corruptReplica(2, block));
// Read the file to trigger reportBadBlocks by client // Trigger each of the DNs to scan this block immediately.
try { // The block pool scanner doesn't run frequently enough on its own
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), // to notice these, and due to HDFS-1371, the client won't report
conf, true); // bad blocks to the NN when all replicas are bad.
} catch (IOException e) { for (DataNode dn : cluster.getDataNodes()) {
// Ignore exception DataNodeTestUtils.runBlockScannerForBlock(dn, block);
} }
// We now have the blocks to be marked as corrupt and we get back all // We now have the blocks to be marked as corrupt and we get back all

View File

@ -114,6 +114,12 @@ public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname); dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
} }
public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
DataBlockScanner scanner = dn.getBlockScanner();
BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
bpScanner.verifyBlock(b);
}
public static void shutdownBlockScanner(DataNode dn) { public static void shutdownBlockScanner(DataNode dn) {
if (dn.blockScanner != null) { if (dn.blockScanner != null) {
dn.blockScanner.shutdown(); dn.blockScanner.shutdown();