HDFS-4806. In INodeDirectoryWithSnapshot, use isInLatestSnapshot() to determine if an added/removed child should be recorded in the snapshot diff. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1480146 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
41312abe84
commit
08ec505d6b
@ -347,3 +347,7 @@ Branch-2802 Snapshot (Unreleased)
|
|||||||
|
|
||||||
HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException
|
HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException
|
||||||
but not IllegalStateException. (Jing Zhao via szetszwo)
|
but not IllegalStateException. (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
|
HDFS-4806. In INodeDirectoryWithSnapshot, use isInLatestSnapshot() to
|
||||||
|
determine if an added/removed child should be recorded in the snapshot diff.
|
||||||
|
(Jing Zhao via szetszwo)
|
||||||
|
@ -550,7 +550,7 @@ public boolean addChild(INode inode, boolean setModTime, Snapshot latest,
|
|||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
final INodeMap inodeMap) throws QuotaExceededException {
|
||||||
ChildrenDiff diff = null;
|
ChildrenDiff diff = null;
|
||||||
Integer undoInfo = null;
|
Integer undoInfo = null;
|
||||||
if (latest != null) {
|
if (isInLatestSnapshot(latest)) {
|
||||||
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
||||||
undoInfo = diff.create(inode);
|
undoInfo = diff.create(inode);
|
||||||
}
|
}
|
||||||
@ -566,7 +566,18 @@ public boolean removeChild(INode child, Snapshot latest,
|
|||||||
final INodeMap inodeMap) throws QuotaExceededException {
|
final INodeMap inodeMap) throws QuotaExceededException {
|
||||||
ChildrenDiff diff = null;
|
ChildrenDiff diff = null;
|
||||||
UndoInfo<INode> undoInfo = null;
|
UndoInfo<INode> undoInfo = null;
|
||||||
if (latest != null) {
|
// For a directory that is not a renamed node, if isInLatestSnapshot returns
|
||||||
|
// false, the directory is not in the latest snapshot, thus we do not need
|
||||||
|
// to record the removed child in any snapshot.
|
||||||
|
// For a directory that was moved/renamed, note that if the directory is in
|
||||||
|
// any of the previous snapshots, we will create a reference node for the
|
||||||
|
// directory while rename, and isInLatestSnapshot will return true in that
|
||||||
|
// scenario (if all previous snapshots have been deleted, isInLatestSnapshot
|
||||||
|
// still returns false). Thus if isInLatestSnapshot returns false, the
|
||||||
|
// directory node cannot be in any snapshot (not in current tree, nor in
|
||||||
|
// previous src tree). Thus we do not need to record the removed child in
|
||||||
|
// any snapshot.
|
||||||
|
if (isInLatestSnapshot(latest)) {
|
||||||
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff;
|
||||||
undoInfo = diff.delete(child);
|
undoInfo = diff.delete(child);
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
import org.apache.hadoop.hdfs.server.namenode.INodeMap;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
|
||||||
@ -1674,4 +1675,51 @@ public void testRenameDirAndDeleteSnapshot_4() throws Exception {
|
|||||||
|
|
||||||
restartClusterAndCheckImage();
|
restartClusterAndCheckImage();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test demonstrates that
|
||||||
|
* {@link INodeDirectoryWithSnapshot#removeChild(INode, Snapshot, INodeMap)}
|
||||||
|
* and
|
||||||
|
* {@link INodeDirectoryWithSnapshot#addChild(INode, boolean, Snapshot, INodeMap)}
|
||||||
|
* should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
|
||||||
|
* added/removed child should be recorded in snapshots.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRenameAndDeleteSnapshot_5() throws Exception {
|
||||||
|
final Path dir1 = new Path("/dir1");
|
||||||
|
final Path dir2 = new Path("/dir2");
|
||||||
|
final Path dir3 = new Path("/dir3");
|
||||||
|
hdfs.mkdirs(dir1);
|
||||||
|
hdfs.mkdirs(dir2);
|
||||||
|
hdfs.mkdirs(dir3);
|
||||||
|
|
||||||
|
final Path foo = new Path(dir1, "foo");
|
||||||
|
hdfs.mkdirs(foo);
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
|
||||||
|
final Path bar = new Path(foo, "bar");
|
||||||
|
// create file bar, and foo will become an INodeDirectoryWithSnapshot
|
||||||
|
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
|
||||||
|
// delete snapshot s1. now foo is not in any snapshot
|
||||||
|
hdfs.deleteSnapshot(dir1, "s1");
|
||||||
|
|
||||||
|
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
|
||||||
|
// rename /dir1/foo to /dir2/foo
|
||||||
|
final Path foo2 = new Path(dir2, foo.getName());
|
||||||
|
hdfs.rename(foo, foo2);
|
||||||
|
// rename /dir2/foo/bar to /dir3/foo/bar
|
||||||
|
final Path bar2 = new Path(dir2, "foo/bar");
|
||||||
|
final Path bar3 = new Path(dir3, "bar");
|
||||||
|
hdfs.rename(bar2, bar3);
|
||||||
|
|
||||||
|
// delete /dir2/foo. Since it is not in any snapshot, we will call its
|
||||||
|
// destroy function. If we do not use isInLatestSnapshot in removeChild and
|
||||||
|
// addChild methods in INodeDirectoryWithSnapshot, the file bar will be
|
||||||
|
// stored in the deleted list of foo, and will be destroyed.
|
||||||
|
hdfs.delete(foo2, true);
|
||||||
|
|
||||||
|
// check if /dir3/bar still exists
|
||||||
|
assertTrue(hdfs.exists(bar3));
|
||||||
|
INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
|
||||||
|
assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user