From 399ad009158cbc6aca179396d390fe770801420f Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Mon, 26 Oct 2015 16:08:06 -0700 Subject: [PATCH] HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset Implementation. (lei) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/TestFileCorruption.java | 65 ++++++++----------- 2 files changed, 30 insertions(+), 38 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e26abcc05f..f6904c3bee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1581,6 +1581,9 @@ Release 2.8.0 - UNRELEASED HDFS-8945. Update the description about replica placement in HDFS Architecture documentation. (Masatake Iwasaki via wang) + HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset + Implementation. (lei) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 8e0ffe7de9..c1a7ebbd99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -24,20 +24,16 @@ import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.File; import java.io.FileOutputStream; import java.util.ArrayList; -import java.util.Collection; -import java.util.List; +import java.util.Map; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.filefilter.DirectoryFileFilter; -import org.apache.commons.io.filefilter.PrefixFileFilter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -45,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; @@ -74,17 +71,17 @@ public void testFileCorruption() throws Exception { FileSystem fs = cluster.getFileSystem(); util.createFiles(fs, "/srcdat"); // Now deliberately remove the blocks - File storageDir = cluster.getInstanceStorageDir(2, 0); String bpid = cluster.getNamesystem().getBlockPoolId(); - File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - assertTrue("data directory does not exist", data_dir.exists()); - Collection blocks = FileUtils.listFiles(data_dir, - new PrefixFileFilter(Block.BLOCK_FILE_PREFIX), - DirectoryFileFilter.DIRECTORY); - assertTrue("Blocks do not exist in data-dir", blocks.size() > 0); - for (File block : blocks) { - System.out.println("Deliberately removing file " + block.getName()); - assertTrue("Cannot remove file.", block.delete()); + DataNode dn = cluster.getDataNodes().get(2); + Map blockReports = + dn.getFSDataset().getBlockReports(bpid); + assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty()); + for (BlockListAsLongs report : blockReports.values()) { + for (BlockReportReplica brr : report) { + LOG.info("Deliberately removing block {}", brr.getBlockName()); + cluster.getFsDatasetTestUtils(2).getMaterializedReplica( + new ExtendedBlock(bpid, brr)).deleteData(); + } } assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat")); @@ -110,7 +107,7 @@ public void testLocalFileCorruption() throws Exception { // Now attempt to read the file DataInputStream dis = fs.open(file, 512); try { - System.out.println("A ChecksumException is expected to be logged."); + LOG.info("A ChecksumException is expected to be logged."); dis.readByte(); } catch (ChecksumException ignore) { //expect this exception but let any NPE get thrown @@ -137,15 +134,7 @@ public void testArrayOutOfBoundsException() throws Exception { // get the block final String bpid = cluster.getNamesystem().getBlockPoolId(); - File storageDir = cluster.getInstanceStorageDir(0, 0); - File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - assertTrue("Data directory does not exist", dataDir.exists()); - ExtendedBlock blk = getBlock(bpid, dataDir); - if (blk == null) { - storageDir = cluster.getInstanceStorageDir(0, 1); - dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - blk = getBlock(bpid, dataDir); - } + ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid); assertFalse("Data directory does not contain any blocks or there was an " + "IO error", blk==null); @@ -173,20 +162,20 @@ public void testArrayOutOfBoundsException() throws Exception { //clean up fs.delete(FILE_PATH, false); } finally { - if (cluster != null) { cluster.shutdown(); } + if (cluster != null) { + cluster.shutdown(); + } } - } - public static ExtendedBlock getBlock(String bpid, File dataDir) { - List metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir); - if (metadataFiles == null || metadataFiles.isEmpty()) { - return null; + private static ExtendedBlock getFirstBlock(DataNode dn, String bpid) { + Map blockReports = + dn.getFSDataset().getBlockReports(bpid); + for (BlockListAsLongs blockLongs : blockReports.values()) { + for (BlockReportReplica block : blockLongs) { + return new ExtendedBlock(bpid, block); + } } - File metadataFile = metadataFiles.get(0); - File blockFile = Block.metaToBlockFile(metadataFile); - return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()), - blockFile.length(), Block.getGenerationStamp(metadataFile.getName())); + return null; } - }