HDFS-3755. Creating an already-open-for-write file with overwrite=true fails. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1370937 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-08-08 20:15:40 +00:00
parent b587af4631
commit 6dcf2e4815
3 changed files with 68 additions and 7 deletions

View File

@ -189,6 +189,9 @@ Branch-2 ( Unreleased changes )
HDFS-3446. HostsFileReader silently ignores bad includes/excludes HDFS-3446. HostsFileReader silently ignores bad includes/excludes
(Matthew Jacobs via todd) (Matthew Jacobs via todd)
HDFS-3755. Creating an already-open-for-write file with overwrite=true fails
(todd)
NEW FEATURES NEW FEATURES
HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo) HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)

View File

@ -1755,8 +1755,6 @@ private LocatedBlock startFileInternal(String src,
try { try {
INodeFile myFile = dir.getFileINode(src); INodeFile myFile = dir.getFileINode(src);
recoverLeaseInternal(myFile, src, holder, clientMachine, false);
try { try {
blockManager.verifyReplication(src, replication, clientMachine); blockManager.verifyReplication(src, replication, clientMachine);
} catch(IOException e) { } catch(IOException e) {
@ -1772,10 +1770,15 @@ private LocatedBlock startFileInternal(String src,
// File exists - must be one of append or overwrite // File exists - must be one of append or overwrite
if (overwrite) { if (overwrite) {
delete(src, true); delete(src, true);
} else if (!append) { } else {
throw new FileAlreadyExistsException("failed to create file " + src // Opening an existing file for write - may need to recover lease.
+ " on client " + clientMachine recoverLeaseInternal(myFile, src, holder, clientMachine, false);
+ " because the file exists");
if (!append) {
throw new FileAlreadyExistsException("failed to create file " + src
+ " on client " + clientMachine
+ " because the file exists");
}
} }
} }

View File

@ -42,8 +42,8 @@
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -75,6 +75,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -337,6 +338,60 @@ public void testDeleteOnExit() throws IOException {
} }
} }
/**
* Test that a file which is open for write is overwritten by another
* client. Regression test for HDFS-3755.
*/
@Test
public void testOverwriteOpenForWrite() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(cluster.getConfiguration(0));
}
});
try {
Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p);
stm1.write(1);
stm1.hflush();
// Create file again without overwrite
try {
fs2.create(p, false);
fail("Did not throw!");
} catch (IOException abce) {
GenericTestUtils.assertExceptionContains("already being created by",
abce);
}
FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2);
stm2.close();
try {
stm1.close();
fail("Should have exception closing stm1 since it was deleted");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("File is not open for writing", ioe);
}
} finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(fs2);
cluster.shutdown();
}
}
/** /**
* Test that file data does not become corrupted even in the face of errors. * Test that file data does not become corrupted even in the face of errors.
*/ */