From 80e59e7876fed1c9d56e696331e0c54e7cd3499b Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Fri, 7 Dec 2018 17:18:20 -0700 Subject: [PATCH] HDFS-14101. Fixing underflow error in test. Contributed by Zsolt Venczel. --- .../namenode/TestListCorruptFileBlocks.java | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java index db12146e24..6bfc64d8f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java @@ -80,10 +80,13 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); + // Files are corrupted with 2 bytes before the end of the file, + // so that's the minimum length. + final int corruptionLength = 2; // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder(). setName("testCorruptFilesCorruptedBlock").setNumFiles(2). - setMaxLevels(1).setMaxSize(512).build(); + setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. @@ -104,14 +107,13 @@ public void testListCorruptFilesCorruptedBlock() throws Exception { File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; + long position = channel.size() - corruptionLength; + byte[] buffer = new byte[corruptionLength]; new Random(13L).nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + - " at offset " + position + " length " + length); + " at offset " + position + " length " + corruptionLength); // read all files to trigger detection of corrupted replica try { @@ -160,10 +162,13 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); FileSystem fs = cluster.getFileSystem(); + // Files are corrupted with 2 bytes before the end of the file, + // so that's the minimum length. + final int corruptionLength = 2; // create two files with one block each DFSTestUtil util = new DFSTestUtil.Builder(). setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2). - setMaxLevels(1).setMaxSize(512).build(); + setMaxLevels(1).setMinSize(corruptionLength).setMaxSize(512).build(); util.createFiles(fs, "/srcdat10"); // fetch bad file list from namenode. There should be none. @@ -183,14 +188,13 @@ public void testListCorruptFileBlocksInSafeMode() throws Exception { File metaFile = metaFiles.get(0); RandomAccessFile file = new RandomAccessFile(metaFile, "rw"); FileChannel channel = file.getChannel(); - long position = channel.size() - 2; - int length = 2; - byte[] buffer = new byte[length]; + long position = channel.size() - corruptionLength; + byte[] buffer = new byte[corruptionLength]; new Random(13L).nextBytes(buffer); channel.write(ByteBuffer.wrap(buffer), position); file.close(); LOG.info("Deliberately corrupting file " + metaFile.getName() + - " at offset " + position + " length " + length); + " at offset " + position + " length " + corruptionLength); // read all files to trigger detection of corrupted replica try {