HDFS-4159. Rename should fail when the destination directory is snapshottable and has snapshots. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1406771 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-07 19:38:29 +00:00
parent 8a577a16f9
commit b94cf83a11
3 changed files with 79 additions and 4 deletions

View File

@ -59,3 +59,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4150. Update the inode in the block map when a snapshotted file or a HDFS-4150. Update the inode in the block map when a snapshotted file or a
snapshot file is deleted. (Jing Zhao via szetszwo) snapshot file is deleted. (Jing Zhao via szetszwo)
HDFS-4159. Rename should fail when the destination directory is snapshottable
and has snapshots. (Jing Zhao via szetszwo)

View File

@ -705,6 +705,16 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
+ error); + error);
throw new IOException(error); throw new IOException(error);
} }
INode snapshotNode = hasSnapshot(dstInode);
if (snapshotNode != null) {
error = "The direcotry " + dstInode.getFullPathName()
+ " cannot be deleted for renaming since "
+ snapshotNode.getFullPathName()
+ " is snapshottable and already has snapshots";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+ error);
throw new IOException(error);
}
} }
if (dstInodes[dstInodes.length - 2] == null) { if (dstInodes[dstInodes.length - 2] == null) {
error = "rename destination parent " + dst + " not found."; error = "rename destination parent " + dst + " not found.";
@ -1145,10 +1155,13 @@ private static INode hasSnapshot(INode target) {
&& ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) { && ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) {
return target; return target;
} }
for (INode child : targetDir.getChildren()) { List<INode> children = targetDir.getChildren();
INode snapshotDir = hasSnapshot(child); if (children != null) {
if (snapshotDir != null) { for (INode child : children) {
return snapshotDir; INode snapshotDir = hasSnapshot(child);
if (snapshotDir != null) {
return snapshotDir;
}
} }
} }
} }

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -253,6 +254,64 @@ public void testDeleteDirectoryWithSnapshot2() throws Exception {
hdfs.delete(dir, true); hdfs.delete(dir, true);
} }
/**
* Renaming a directory to another directory with snapshots must fail.
*/
@Test
public void testRenameToDirectoryWithSnapshot() throws Exception {
// create the directory sub1
hdfs.mkdirs(sub1);
Path sub2 = new Path(dir, "sub2");
Path file2 = new Path(sub2, "file");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
// The normal rename should succeed
hdfs.rename(sub2, sub1, Rename.OVERWRITE);
// Create sub3 and create snapshot for it
Path sub3 = new Path(dir, "sub3");
hdfs.mkdirs(sub3);
SnapshotTestHelper.createSnapshot(hdfs, sub3, "s1");
exception.expect(RemoteException.class);
String error = "The direcotry " + sub3.toString()
+ " cannot be deleted for renaming since " + sub3.toString()
+ " is snapshottable and already has snapshots";
exception.expectMessage(error);
hdfs.rename(sub1, sub3, Rename.OVERWRITE);
}
/**
* Renaming a directory to another directory with snapshots
* must fail.
*/
@Test
public void testRenameToDirectoryWithSnapshot2() throws Exception {
Path file0 = new Path(sub1, "file0");
Path file1 = new Path(sub1, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
Path sub2 = new Path(dir, "sub2");
Path file2 = new Path(sub2, "file");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
// Create snapshot for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
// Then delete file0 and file1 so that the renaming can succeed if without
// snapshots
hdfs.delete(file0, true);
hdfs.delete(file1, true);
exception.expect(RemoteException.class);
String error = "The direcotry " + sub1.toString()
+ " cannot be deleted for renaming since " + sub1.toString()
+ " is snapshottable and already has snapshots";
exception.expectMessage(error);
hdfs.rename(sub2, sub1, Rename.OVERWRITE);
}
/** /**
* Base class to present changes applied to current file/dir. A modification * Base class to present changes applied to current file/dir. A modification
* can be file creation, deletion, or other modifications such as appending on * can be file creation, deletion, or other modifications such as appending on