From eb7fe1d588de903be2ff6e20384c25c184881532 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 28 Apr 2018 09:05:30 -0700 Subject: [PATCH] HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang. --- .../hdfs/server/datanode/LocalReplica.java | 18 ++--- .../apache/hadoop/hdfs/TestFileAppend.java | 71 ++++++++++++------- 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java index 2c5af11d0d..68126a5eb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java @@ -186,16 +186,18 @@ private void breakHardlinks(File file, Block b) throws IOException { final FileIoProvider fileIoProvider = getFileIoProvider(); final File tmpFile = DatanodeUtil.createFileWithExistsCheck( getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider); - try (FileInputStream in = fileIoProvider.getFileInputStream( - getVolume(), file)) { - try (FileOutputStream out = fileIoProvider.getFileOutputStream( - getVolume(), tmpFile)) { - IOUtils.copyBytes(in, out, 16 * 1024); + try { + try (FileInputStream in = fileIoProvider.getFileInputStream( + getVolume(), file)) { + try (FileOutputStream out = fileIoProvider.getFileOutputStream( + getVolume(), tmpFile)) { + IOUtils.copyBytes(in, out, 16 * 1024); + } } if (file.length() != tmpFile.length()) { - throw new IOException("Copy of file " + file + " size " + file.length()+ - " into file " + tmpFile + - " resulted in a size of " + tmpFile.length()); + throw new IOException("Copy of file " + file + " size " + file.length() + + " into file " + tmpFile + " resulted in a size of " + + tmpFile.length()); } fileIoProvider.replaceFile(getVolume(), tmpFile, file); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 20cec6a2c8..aa8afb0b19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -120,7 +121,9 @@ private void checkFile(DistributedFileSystem fileSys, Path name, int repl) @Test public void testBreakHardlinksIfNeeded() throws IOException { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); FileSystem fs = cluster.getFileSystem(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); @@ -186,7 +189,9 @@ public void testBreakHardlinksIfNeeded() throws IOException { public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); DistributedFileSystem fs = cluster.getFileSystem(); try { @@ -239,7 +244,9 @@ public void testSimpleFlush() throws IOException { public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); DistributedFileSystem fs = cluster.getFileSystem(); try { @@ -286,7 +293,9 @@ public void testComplexFlush() throws IOException { @Test(expected = FileNotFoundException.class) public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); FileSystem fs = cluster.getFileSystem(); try { Path file1 = new Path("/nonexistingfile.dat"); @@ -301,7 +310,9 @@ public void testFileNotFound() throws IOException { @Test public void testAppendTwice() throws Exception { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); final FileSystem fs1 = cluster.getFileSystem(); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); try { @@ -340,7 +351,9 @@ public void testAppendTwice() throws Exception { @Test public void testAppend2Twice() throws Exception { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); final DistributedFileSystem fs1 = cluster.getFileSystem(); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); try { @@ -386,8 +399,9 @@ public void testMultipleAppends() throws Exception { HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(4).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, + builderBaseDir).numDataNodes(4).build(); final DistributedFileSystem fs = cluster.getFileSystem(); try { final Path p = new Path("/testMultipleAppend/foo"); @@ -438,8 +452,9 @@ public void testAppendAfterSoftLimit() final long softLimit = 1L; final long hardLimit = 9999999L; - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); cluster.setLeasePeriod(softLimit, hardLimit); cluster.waitActive(); @@ -478,8 +493,9 @@ public void testAppend2AfterSoftLimit() throws Exception { final long softLimit = 1L; final long hardLimit = 9999999L; - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); cluster.setLeasePeriod(softLimit, hardLimit); cluster.waitActive(); @@ -525,8 +541,9 @@ public void testFailedAppendBlockRejection() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(3).build(); DistributedFileSystem fs = null; try { fs = cluster.getFileSystem(); @@ -578,8 +595,9 @@ public void testMultiAppend2() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(3).build(); DistributedFileSystem fs = null; final String hello = "hello\n"; try { @@ -650,8 +668,9 @@ public void testAppendCorruptedBlock() throws Exception { conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt("dfs.min.replication", 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); try { DistributedFileSystem fs = cluster.getFileSystem(); Path fileName = new Path("/appendCorruptBlock"); @@ -676,7 +695,9 @@ public void testConcurrentAppendRead() conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt("dfs.min.replication", 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0); @@ -693,9 +714,9 @@ public void testConcurrentAppendRead() // Call FsDatasetImpl#append to append the block file, // which converts it to a rbw replica. ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName); - long newGS = block.getGenerationStamp()+1; - ReplicaHandler - replicaHandler = dataSet.append(block, newGS, initialFileLength); + long newGS = block.getGenerationStamp() + 1; + ReplicaHandler replicaHandler = + dataSet.append(block, newGS, initialFileLength); // write data to block file ReplicaBeingWritten rbw = @@ -711,9 +732,8 @@ public void testConcurrentAppendRead() // update checksum file final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf); - FsDatasetUtil.computeChecksum( - rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), - smallBufferSize, conf); + FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), + rbw.getBlockFile(), smallBufferSize, conf); // read the block // the DataNode BlockSender should read from the rbw replica's in-memory @@ -725,5 +745,4 @@ public void testConcurrentAppendRead() cluster.shutdown(); } } - }