From 6dcf2e48158040e03144eed7dcc4b0873970d1e4 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Wed, 8 Aug 2012 20:15:40 +0000 Subject: [PATCH] HDFS-3755. Creating an already-open-for-write file with overwrite=true fails. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1370937 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSNamesystem.java | 15 +++-- .../apache/hadoop/hdfs/TestFileCreation.java | 57 ++++++++++++++++++- 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2439fccaaf..c1f0531f21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -189,6 +189,9 @@ Branch-2 ( Unreleased changes ) HDFS-3446. HostsFileReader silently ignores bad includes/excludes (Matthew Jacobs via todd) + HDFS-3755. Creating an already-open-for-write file with overwrite=true fails + (todd) + NEW FEATURES HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 1ab2318b60..20a655979a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1755,8 +1755,6 @@ private LocatedBlock startFileInternal(String src, try { INodeFile myFile = dir.getFileINode(src); - recoverLeaseInternal(myFile, src, holder, clientMachine, false); - try { blockManager.verifyReplication(src, replication, clientMachine); } catch(IOException e) { @@ -1772,10 +1770,15 @@ private LocatedBlock startFileInternal(String src, // File exists - must be one of append or overwrite if (overwrite) { delete(src, true); - } else if (!append) { - throw new FileAlreadyExistsException("failed to create file " + src - + " on client " + clientMachine - + " because the file exists"); + } else { + // Opening an existing file for write - may need to recover lease. + recoverLeaseInternal(myFile, src, holder, clientMachine, false); + + if (!append) { + throw new FileAlreadyExistsException("failed to create file " + src + + " on client " + clientMachine + + " because the file exists"); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index cb79f67c90..dbf22b4392 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -42,8 +42,8 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import org.apache.commons.logging.LogFactory; @@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -337,6 +338,60 @@ public void testDeleteOnExit() throws IOException { } } + /** + * Test that a file which is open for write is overwritten by another + * client. Regression test for HDFS-3755. + */ + @Test + public void testOverwriteOpenForWrite() throws Exception { + Configuration conf = new HdfsConfiguration(); + SimulatedFSDataset.setFactory(conf); + conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + FileSystem fs = cluster.getFileSystem(); + + UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting( + "testuser", new String[]{"testgroup"}); + FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(cluster.getConfiguration(0)); + } + }); + + try { + Path p = new Path("/testfile"); + FSDataOutputStream stm1 = fs.create(p); + stm1.write(1); + stm1.hflush(); + + // Create file again without overwrite + try { + fs2.create(p, false); + fail("Did not throw!"); + } catch (IOException abce) { + GenericTestUtils.assertExceptionContains("already being created by", + abce); + } + + FSDataOutputStream stm2 = fs2.create(p, true); + stm2.write(2); + stm2.close(); + + try { + stm1.close(); + fail("Should have exception closing stm1 since it was deleted"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("File is not open for writing", ioe); + } + + } finally { + IOUtils.closeStream(fs); + IOUtils.closeStream(fs2); + cluster.shutdown(); + } + } + /** * Test that file data does not become corrupted even in the face of errors. */