diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2a8da43dc7..16fe394270 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -306,6 +306,9 @@ Trunk (Unreleased) HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM documentation (Arshad Mohammad via aw) + HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks + fails using IBM java (Ayappan via aw) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5b391c567f..7e7ff39e89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -251,6 +251,12 @@ private class MyFile { public void createFiles(FileSystem fs, String topdir) throws IOException { createFiles(fs, topdir, (short)3); } + + public static byte[] readFileAsBytes(FileSystem fs, Path fileName) throws IOException { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + IOUtils.copyBytes(fs.open(fileName), os, 1024, true); + return os.toByteArray(); + } /** create nFiles with random names and directory hierarchies * with random (but reproducible) data in them. @@ -723,6 +729,12 @@ public static String readFile(File f) throws IOException { return b.toString(); } + public static byte[] readFileAsBytes(File f) throws IOException { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + IOUtils.copyBytes(new FileInputStream(f), os, 1024, true); + return os.toByteArray(); + } + /* Write the given string to the given file */ public static void writeFile(FileSystem fs, Path p, String s) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 5297ba2bc7..2c1d07e716 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1869,6 +1869,16 @@ public String readBlockOnDataNode(int i, ExtendedBlock block) return null; } + public byte[] readBlockOnDataNodeAsBytes(int i, ExtendedBlock block) + throws IOException { + assert (i >= 0 && i < dataNodes.size()) : "Invalid datanode "+i; + File blockFile = getBlockFile(i, block); + if (blockFile != null && blockFile.exists()) { + return DFSTestUtil.readFileAsBytes(blockFile); + } + return null; + } + /** * Corrupt a block on a particular datanode. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 1bc7cdce4f..54983a1378 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertArrayEquals; import java.util.ArrayList; @@ -202,7 +203,7 @@ public void testCorruptBlockRereplicatedAcrossRacks() throws Exception { final FileSystem fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, filePath, fileLen, REPLICATION_FACTOR, 1L); - final String fileContent = DFSTestUtil.readFile(fs, filePath); + final byte[] fileContent = DFSTestUtil.readFileAsBytes(fs, filePath); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath); DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0); @@ -224,9 +225,9 @@ public void testCorruptBlockRereplicatedAcrossRacks() throws Exception { // Ensure all replicas are valid (the corrupt replica may not // have been cleaned up yet). for (int i = 0; i < racks.length; i++) { - String blockContent = cluster.readBlockOnDataNode(i, b); + byte[] blockContent = cluster.readBlockOnDataNodeAsBytes(i, b); if (blockContent != null && i != dnToCorrupt) { - assertEquals("Corrupt replica", fileContent, blockContent); + assertArrayEquals("Corrupt replica", fileContent, blockContent); } } } finally {