diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index c622f12c92..8728c3f792 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -241,3 +241,6 @@ Branch-2802 Snapshot (Unreleased) create a file/directory with ".snapshot" as the name. If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade; otherwise, upgrade will fail. (szetszwo) + + HDFS-4700. Fix the undo section of rename with snapshots. (Jing Zhao via + szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 67ef05cf49..afd79f7709 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; +import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -582,12 +583,17 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) // check srcChild for reference final INodeReference.WithCount withCount; - if (srcChildIsReference || isSrcInSnapshot) { + int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() + .getDstSnapshotId() : Snapshot.INVALID_ID; + if (isSrcInSnapshot) { final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() .replaceChild4ReferenceWithName(srcChild); - withCount = (INodeReference.WithCount)withName.getReferredINode(); + withCount = (INodeReference.WithCount) withName.getReferredINode(); srcChild = withName; srcIIP.setLastINode(srcChild); + } else if (srcChildIsReference) { + // srcChild is reference but srcChild is not in latest snapshot + withCount = (WithCount) srcChild.asReference().getReferredINode(); } else { withCount = null; } @@ -602,7 +608,6 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) return false; } - // add src to the destination if (dstParent.getParent() == null) { // src and dst file/dir are in the same directory, and the dstParent has // been replaced when we removed the src. Refresh the dstIIP and @@ -611,6 +616,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) dstParent = dstIIP.getINode(-2); } + // add src to the destination + srcChild = srcIIP.getLastINode(); final byte[] dstChildName = dstIIP.getLastLocalName(); final INode toDst; @@ -645,17 +652,29 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) } } finally { if (!added) { + final INodeDirectory srcParent = srcIIP.getINode(-2).asDirectory(); + final INode oldSrcChild = srcChild; // put it back if (withCount == null) { srcChild.setLocalName(srcChildName); } else if (!srcChildIsReference) { // src must be in snapshot - final INodeDirectoryWithSnapshot srcParent = - (INodeDirectoryWithSnapshot) srcIIP.getINode(-2).asDirectory(); final INode originalChild = withCount.getReferredINode(); - srcParent.replaceRemovedChild(srcChild, originalChild); srcChild = originalChild; + } else { + final INodeReference originalRef = new INodeReference.DstReference( + srcParent, withCount, srcRefDstSnapshot); + withCount.setParentReference(originalRef); + srcChild = originalRef; + } + + if (isSrcInSnapshot) { + ((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent( + oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot()); + } else { + // srcParent is not an INodeDirectoryWithSnapshot, we only need to add + // the srcChild back + addLastINodeNoQuotaCheck(srcIIP, srcChild); } - addLastINodeNoQuotaCheck(srcIIP, srcChild); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " @@ -797,12 +816,17 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, // check srcChild for reference final INodeReference.WithCount withCount; - if (srcChildIsReference || isSrcInSnapshot) { + int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() + .getDstSnapshotId() : Snapshot.INVALID_ID; + if (isSrcInSnapshot) { final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() .replaceChild4ReferenceWithName(srcChild); - withCount = (INodeReference.WithCount)withName.getReferredINode(); + withCount = (INodeReference.WithCount) withName.getReferredINode(); srcChild = withName; srcIIP.setLastINode(srcChild); + } else if (srcChildIsReference) { + // srcChild is reference but srcChild is not in latest snapshot + withCount = (WithCount) srcChild.asReference().getReferredINode(); } else { withCount = null; } @@ -888,21 +912,38 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, } finally { if (undoRemoveSrc) { // Rename failed - restore src - srcChild = srcIIP.getLastINode(); + final INodeDirectory srcParent = srcIIP.getINode(-2).asDirectory(); + final INode oldSrcChild = srcChild; + // put it back if (withCount == null) { srcChild.setLocalName(srcChildName); } else if (!srcChildIsReference) { // src must be in snapshot - final INodeDirectoryWithSnapshot srcParent - = (INodeDirectoryWithSnapshot)srcIIP.getINode(-2).asDirectory(); final INode originalChild = withCount.getReferredINode(); - srcParent.replaceRemovedChild(srcChild, originalChild); srcChild = originalChild; + } else { + final INodeReference originalRef = new INodeReference.DstReference( + srcParent, withCount, srcRefDstSnapshot); + withCount.setParentReference(originalRef); + srcChild = originalRef; + } + + if (srcParent instanceof INodeDirectoryWithSnapshot) { + ((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent( + oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot()); + } else { + // srcParent is not an INodeDirectoryWithSnapshot, we only need to add + // the srcChild back + addLastINodeNoQuotaCheck(srcIIP, srcChild); } - addLastINodeNoQuotaCheck(srcIIP, srcChild); } if (undoRemoveDst) { // Rename failed - restore dst - addLastINodeNoQuotaCheck(dstIIP, removedDst); + if (dstParent instanceof INodeDirectoryWithSnapshot) { + ((INodeDirectoryWithSnapshot) dstParent).undoRename4DstParent( + removedDst, dstIIP.getLatestSnapshot()); + } else { + addLastINodeNoQuotaCheck(dstIIP, removedDst); + } } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " @@ -1270,7 +1311,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, collectedBlocks); parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE)); + -counts.get(Quota.DISKSPACE), true); removed = counts.get(Quota.NAMESPACE); } if (NameNode.stateChangeLog.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 7d468f79a2..f32abd03e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -376,11 +376,11 @@ public abstract Content.CountsMap computeContentSummary( * Check and add namespace/diskspace consumed to itself and the ancestors. * @throws QuotaExceededException if quote is violated. */ - public void addSpaceConsumed(long nsDelta, long dsDelta) + public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) throws QuotaExceededException { final INodeDirectory parentDir = getParent(); if (parentDir != null) { - parentDir.addSpaceConsumed(nsDelta, dsDelta); + parentDir.addSpaceConsumed(nsDelta, dsDelta, verify); } } @@ -403,7 +403,7 @@ public final boolean isQuotaSet() { /** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#DISKSPACE} usages. */ - final Quota.Counts computeQuotaUsage() { + public final Quota.Counts computeQuotaUsage() { return computeQuotaUsage(new Quota.Counts(), true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 58e5e664c6..d957804526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -216,6 +216,7 @@ public void replaceChild(final INode oldChild, final INode newChild) { Preconditions.checkState(i >= 0); Preconditions.checkState(oldChild == children.get(i)); + // TODO: the first case may never be hit if (oldChild.isReference() && !newChild.isReference()) { final INode withCount = oldChild.asReference().getReferredINode(); withCount.asReference().setReferredINode(newChild); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java index 2d3bb6d397..21918f4eb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -140,21 +140,23 @@ long numItemsInTree() { } @Override - public final void addSpaceConsumed(final long nsDelta, final long dsDelta) - throws QuotaExceededException { + public final void addSpaceConsumed(final long nsDelta, final long dsDelta, + boolean verify) throws QuotaExceededException { if (isQuotaSet()) { // The following steps are important: // check quotas in this inode and all ancestors before changing counts // so that no change is made if there is any quota violation. - // (1) verify quota in this inode - verifyQuota(nsDelta, dsDelta); + // (1) verify quota in this inode + if (verify) { + verifyQuota(nsDelta, dsDelta); + } // (2) verify quota and then add count in ancestors - super.addSpaceConsumed(nsDelta, dsDelta); + super.addSpaceConsumed(nsDelta, dsDelta, verify); // (3) add count in this inode addSpaceConsumed2Cache(nsDelta, dsDelta); } else { - super.addSpaceConsumed(nsDelta, dsDelta); + super.addSpaceConsumed(nsDelta, dsDelta, verify); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 7d9c675ffc..975cb55745 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -254,9 +254,9 @@ public final INode getSnapshotINode(Snapshot snapshot) { } @Override - public final void addSpaceConsumed(long nsDelta, long dsDelta - ) throws QuotaExceededException { - referred.addSpaceConsumed(nsDelta, dsDelta); + public final void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) + throws QuotaExceededException { + referred.addSpaceConsumed(nsDelta, dsDelta, verify); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index eab0fe2384..2d5622151e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -108,7 +108,7 @@ final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, /** Add an {@link AbstractINodeDiff} for the given snapshot. */ final D addDiff(Snapshot latest, N currentINode) throws QuotaExceededException { - currentINode.addSpaceConsumed(1, 0); + currentINode.addSpaceConsumed(1, 0, true); return addLast(factory.createDiff(latest, currentINode)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index 671790ee47..ce0141afc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -325,7 +325,7 @@ Snapshot removeSnapshot(String snapshotName, INodeDirectory parent = getParent(); if (parent != null) { parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE)); + -counts.get(Quota.DISKSPACE), true); } } catch(QuotaExceededException e) { LOG.error("BUG: removeSnapshot increases namespace usage.", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index c9fd409be9..8b8c524b21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -81,16 +81,16 @@ private final boolean replace(final ListType type, return true; } - private final boolean removeCreatedChild(final int c, final INode child) { - final List created = getList(ListType.CREATED); - if (created.get(c) == child) { - final INode removed = created.remove(c); - Preconditions.checkState(removed == child); + private final boolean removeChild(ListType type, final INode child) { + final List list = getList(type); + final int i = searchIndex(type, child.getLocalNameBytes()); + if (i >= 0 && list.get(i) == child) { + list.remove(i); return true; } return false; } - + /** clear the created list */ private Quota.Counts destroyCreatedList( final INodeDirectoryWithSnapshot currentINode, @@ -452,6 +452,18 @@ private boolean replaceChild(final ListType type, final INode oldChild, } return false; } + + /** Remove the given child in the created/deleted list, if there is any. */ + private boolean removeChild(final ListType type, final INode child) { + final List diffList = asList(); + for(int i = diffList.size() - 1; i >= 0; i--) { + final ChildrenDiff diff = diffList.get(i).diff; + if (diff.removeChild(type, child)) { + return true; + } + } + return false; + } } /** @@ -627,15 +639,11 @@ public boolean removeChildAndAllSnapshotCopies(INode child) { } // remove same child from the created list, if there is any. - final byte[] name = child.getLocalNameBytes(); final List diffList = diffs.asList(); for(int i = diffList.size() - 1; i >= 0; i--) { final ChildrenDiff diff = diffList.get(i).diff; - final int c = diff.searchIndex(ListType.CREATED, name); - if (c >= 0) { - if (diff.removeCreatedChild(c, child)) { - return true; - } + if (diff.removeChild(ListType.CREATED, child)) { + return true; } } return true; @@ -646,12 +654,65 @@ public void replaceChild(final INode oldChild, final INode newChild) { super.replaceChild(oldChild, newChild); diffs.replaceChild(ListType.CREATED, oldChild, newChild); } - - /** The child just has been removed, replace it with a reference. */ - public void replaceRemovedChild(INode oldChild, INode newChild) { - // the old child must be in the deleted list - Preconditions.checkState( - diffs.replaceChild(ListType.DELETED, oldChild, newChild)); + + /** + * This method is usually called by the undo section of rename. + * + * Before calling this function, in the rename operation, we replace the + * original src node (of the rename operation) with a reference node (WithName + * instance) in both the children list and a created list, delete the + * reference node from the children list, and add it to the corresponding + * deleted list. + * + * To undo the above operations, we have the following steps in particular: + * + *
+   * 1) remove the WithName node from the deleted list (if it exists) 
+   * 2) replace the WithName node in the created list with srcChild 
+   * 3) add srcChild back as a child of srcParent. Note that we already add 
+   * the node into the created list of a snapshot diff in step 2, we do not need
+   * to add srcChild to the created list of the latest snapshot.
+   * 
+ * + * We do not need to update quota usage because the old child is in the + * deleted list before. + * + * @param oldChild + * The reference node to be removed/replaced + * @param newChild + * The node to be added back + * @param latestSnapshot + * The latest snapshot. Note this may not be the last snapshot in the + * {@link #diffs}, since the src tree of the current rename operation + * may be the dst tree of a previous rename. + * @throws QuotaExceededException should not throw this exception + */ + public void undoRename4ScrParent(final INodeReference oldChild, + final INode newChild, Snapshot latestSnapshot) + throws QuotaExceededException { + diffs.removeChild(ListType.DELETED, oldChild); + diffs.replaceChild(ListType.CREATED, oldChild, newChild); + addChild(newChild, true, null); + } + + /** + * Undo the rename operation for the dst tree, i.e., if the rename operation + * (with OVERWRITE option) removes a file/dir from the dst tree, add it back + * and delete possible record in the deleted list. + */ + public void undoRename4DstParent(final INode deletedChild, + Snapshot latestSnapshot) throws QuotaExceededException { + boolean removeDeletedChild = diffs.removeChild(ListType.DELETED, + deletedChild); + final boolean added = addChild(deletedChild, true, removeDeletedChild ? null + : latestSnapshot); + // update quota usage if adding is successfully and the old child has not + // been stored in deleted list before + if (added && !removeDeletedChild) { + final Quota.Counts counts = deletedChild.computeQuotaUsage(); + addSpaceConsumed(counts.get(Quota.NAMESPACE), + counts.get(Quota.DISKSPACE), false); + } } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 6f9aed721f..1969ea052b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -21,6 +21,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; import java.io.File; import java.io.IOException; @@ -48,7 +52,10 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; +import org.apache.hadoop.hdfs.util.Diff.ListType; +import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; @@ -1159,4 +1166,224 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception { hdfs.deleteSnapshot(sdir1, "s1"); restartClusterAndCheckImage(); } + + /** + * Test the undo section of rename. Before the rename, we create the renamed + * file/dir before taking the snapshot. + */ + @Test + public void testRenameUndo() throws Exception { + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + hdfs.mkdirs(sdir1); + hdfs.mkdirs(sdir2); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); + final Path dir2file = new Path(sdir2, "file"); + DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED); + + SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1"); + + INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); + INodeDirectory mockDir2 = spy(dir2); + doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), + (Snapshot) anyObject()); + INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); + root.replaceChild(dir2, mockDir2); + + final Path newfoo = new Path(sdir2, "foo"); + boolean result = hdfs.rename(foo, newfoo); + assertFalse(result); + + // check the current internal details + INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir + .getINode4Write(sdir1.toString()); + ReadOnlyList dir1Children = dir1Node.getChildrenList(null); + assertEquals(1, dir1Children.size()); + assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); + List dir1Diffs = dir1Node.getDiffs().asList(); + assertEquals(1, dir1Diffs.size()); + assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); + + // after the undo of rename, both the created and deleted list of sdir1 + // should be empty + ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff(); + assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); + assertEquals(0, childrenDiff.getList(ListType.CREATED).size()); + + INode fooNode = fsdir.getINode4Write(foo.toString()); + assertTrue(fooNode instanceof INodeDirectoryWithSnapshot); + List fooDiffs = ((INodeDirectoryWithSnapshot) fooNode) + .getDiffs().asList(); + assertEquals(1, fooDiffs.size()); + assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + + final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo"); + INode fooNode_s1 = fsdir.getINode(foo_s1.toString()); + assertTrue(fooNode_s1 == fooNode); + + // check sdir2 + assertFalse(hdfs.exists(newfoo)); + INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) + .asDirectory(); + assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot); + ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + assertEquals(1, dir2Children.size()); + assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); + } + + /** + * Test the undo section of rename. Before the rename, we create the renamed + * file/dir after taking the snapshot. + */ + @Test + public void testRenameUndo_2() throws Exception { + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + hdfs.mkdirs(sdir1); + hdfs.mkdirs(sdir2); + final Path dir2file = new Path(sdir2, "file"); + DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED); + + SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1"); + + // create foo after taking snapshot + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); + + INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory(); + INodeDirectory mockDir2 = spy(dir2); + doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(), + (Snapshot) anyObject()); + INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); + root.replaceChild(dir2, mockDir2); + + final Path newfoo = new Path(sdir2, "foo"); + boolean result = hdfs.rename(foo, newfoo); + assertFalse(result); + + // check the current internal details + INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir + .getINode4Write(sdir1.toString()); + ReadOnlyList dir1Children = dir1Node.getChildrenList(null); + assertEquals(1, dir1Children.size()); + assertEquals(foo.getName(), dir1Children.get(0).getLocalName()); + List dir1Diffs = dir1Node.getDiffs().asList(); + assertEquals(1, dir1Diffs.size()); + assertEquals("s1", dir1Diffs.get(0).snapshot.getRoot().getLocalName()); + + // after the undo of rename, the created list of sdir1 should contain + // 1 element + ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff(); + assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); + assertEquals(1, childrenDiff.getList(ListType.CREATED).size()); + + INode fooNode = fsdir.getINode4Write(foo.toString()); + assertTrue(fooNode instanceof INodeDirectory); + assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); + + final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo"); + assertFalse(hdfs.exists(foo_s1)); + + // check sdir2 + assertFalse(hdfs.exists(newfoo)); + INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString()) + .asDirectory(); + assertFalse(dir2Node instanceof INodeDirectoryWithSnapshot); + ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + assertEquals(1, dir2Children.size()); + assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName()); + } + + /** + * Test the undo section of the second-time rename. + */ + @Test + public void testRenameUndo_3() throws Exception { + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + final Path sdir3 = new Path("/dir3"); + hdfs.mkdirs(sdir1); + hdfs.mkdirs(sdir2); + hdfs.mkdirs(sdir3); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); + + SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1"); + SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2"); + + INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory(); + INodeDirectory mockDir3 = spy(dir3); + doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(), + (Snapshot) anyObject()); + INodeDirectory root = fsdir.getINode4Write("/").asDirectory(); + root.replaceChild(dir3, mockDir3); + + final Path foo_dir2 = new Path(sdir2, "foo"); + final Path foo_dir3 = new Path(sdir3, "foo"); + hdfs.rename(foo, foo_dir2); + boolean result = hdfs.rename(foo_dir2, foo_dir3); + assertFalse(result); + + // check the current internal details + INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir + .getINode4Write(sdir2.toString()); + ReadOnlyList dir2Children = dir2Node.getChildrenList(null); + assertEquals(1, dir2Children.size()); + List dir2Diffs = dir2Node.getDiffs().asList(); + assertEquals(1, dir2Diffs.size()); + assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); + ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff(); + assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); + assertEquals(1, childrenDiff.getList(ListType.CREATED).size()); + final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo"); + assertFalse(hdfs.exists(foo_s2)); + + INode fooNode = fsdir.getINode4Write(foo_dir2.toString()); + assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); + assertTrue(fooNode instanceof INodeReference.DstReference); + List fooDiffs = ((INodeDirectoryWithSnapshot) fooNode + .asDirectory()).getDiffs().asList(); + assertEquals(1, fooDiffs.size()); + assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + + // create snapshot on sdir2 and rename again + hdfs.createSnapshot(sdir2, "s3"); + result = hdfs.rename(foo_dir2, foo_dir3); + assertFalse(result); + + // check internal details again + dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2 + .toString()); + fooNode = fsdir.getINode4Write(foo_dir2.toString()); + dir2Children = dir2Node.getChildrenList(null); + assertEquals(1, dir2Children.size()); + dir2Diffs = dir2Node.getDiffs().asList(); + assertEquals(2, dir2Diffs.size()); + assertEquals("s2", Snapshot.getSnapshotName(dir2Diffs.get(0).snapshot)); + assertEquals("s3", Snapshot.getSnapshotName(dir2Diffs.get(1).snapshot)); + + childrenDiff = dir2Diffs.get(0).getChildrenDiff(); + assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); + assertEquals(1, childrenDiff.getList(ListType.CREATED).size()); + assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); + + childrenDiff = dir2Diffs.get(1).getChildrenDiff(); + assertEquals(0, childrenDiff.getList(ListType.DELETED).size()); + assertEquals(0, childrenDiff.getList(ListType.CREATED).size()); + + final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo"); + assertFalse(hdfs.exists(foo_s2)); + assertTrue(hdfs.exists(foo_s3)); + + assertTrue(fooNode instanceof INodeReference.DstReference); + fooDiffs = ((INodeDirectoryWithSnapshot) fooNode.asDirectory()).getDiffs() + .asList(); + assertEquals(2, fooDiffs.size()); + assertEquals("s1", fooDiffs.get(0).snapshot.getRoot().getLocalName()); + assertEquals("s3", fooDiffs.get(1).snapshot.getRoot().getLocalName()); + } }