diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 6c6929fef0..a120261899 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -225,3 +225,6 @@ Branch-2802 Snapshot (Unreleased) created directory to an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo) HDFS-4611. Update FSImage for INodeReference. (szetszwo) + + HDFS-4647. Rename should call setLocalName after an inode is removed from + snapshots. (Arpit Agarwal via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 49993a3250..4f348cfe7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -559,8 +559,24 @@ public class FSDirectory implements Closeable { verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); boolean added = false; - final INode srcChild = srcIIP.getLastINode(); + INode srcChild = srcIIP.getLastINode(); final byte[] srcChildName = srcChild.getLocalNameBytes(); + final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( + srcIIP.getLatestSnapshot()); + final boolean srcChildIsReference = srcChild.isReference(); + + // check srcChild for reference + final INodeReference.WithCount withCount; + if (srcChildIsReference || isSrcInSnapshot) { + final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() + .replaceChild4ReferenceWithName(srcChild); + withCount = (INodeReference.WithCount)withName.getReferredINode(); + srcChild = withName; + srcIIP.setLastINode(srcChild); + } else { + withCount = null; + } + try { // remove src final long removedSrc = removeLastINode(srcIIP); @@ -570,11 +586,23 @@ public class FSDirectory implements Closeable { + " because the source can not be removed"); return false; } - //TODO: setLocalName breaks created/deleted lists - srcChild.setLocalName(dstIIP.getLastLocalName()); + + srcChild = srcIIP.getLastINode(); + final byte[] dstChildName = dstIIP.getLastLocalName(); + final INode toDst; + if (withCount == null) { + srcChild.setLocalName(dstChildName); + toDst = srcChild; + } else { + withCount.getReferredINode().setLocalName(dstChildName); + final INodeReference ref = new INodeReference(dstIIP.getINode(-2), withCount); + withCount.setParentReference(ref); + withCount.incrementReferenceCount(); + toDst = ref; + } // add src to the destination - added = addLastINodeNoQuotaCheck(dstIIP, srcChild); + added = addLastINodeNoQuotaCheck(dstIIP, toDst); if (added) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " @@ -587,17 +615,20 @@ public class FSDirectory implements Closeable { // update moved leases with new filename getFSNamesystem().unprotectedChangeLease(src, dst); - if (srcIIP.getLatestSnapshot() != null) { - createReferences4Rename(srcChild, srcChildName, - (INodeDirectoryWithSnapshot)srcParent.asDirectory(), - dstParent.asDirectory()); - } return true; } } finally { if (!added) { // put it back - srcChild.setLocalName(srcChildName); + if (withCount == null) { + srcChild.setLocalName(srcChildName); + } else if (!srcChildIsReference) { // src must be in snapshot + final INodeDirectoryWithSnapshot srcParent = + (INodeDirectoryWithSnapshot) srcIIP.getINode(-2).asDirectory(); + final INode originalChild = withCount.getReferredINode(); + srcParent.replaceRemovedChild(srcChild, originalChild); + srcChild = originalChild; + } addLastINodeNoQuotaCheck(srcIIP, srcChild); } } @@ -730,6 +761,24 @@ public class FSDirectory implements Closeable { // Ensure dst has quota to accommodate rename verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); + INode srcChild = srcIIP.getLastINode(); + final byte[] srcChildName = srcChild.getLocalNameBytes(); + final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( + srcIIP.getLatestSnapshot()); + final boolean srcChildIsReference = srcChild.isReference(); + + // check srcChild for reference + final INodeReference.WithCount withCount; + if (srcChildIsReference || isSrcInSnapshot) { + final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() + .replaceChild4ReferenceWithName(srcChild); + withCount = (INodeReference.WithCount)withName.getReferredINode(); + srcChild = withName; + srcIIP.setLastINode(srcChild); + } else { + withCount = null; + } + boolean undoRemoveSrc = true; final long removedSrc = removeLastINode(srcIIP); if (removedSrc == -1) { @@ -739,9 +788,7 @@ public class FSDirectory implements Closeable { + error); throw new IOException(error); } - final INode srcChild = srcIIP.getLastINode(); - final byte[] srcChildName = srcChild.getLocalNameBytes(); - + boolean undoRemoveDst = false; INode removedDst = null; try { @@ -751,11 +798,24 @@ public class FSDirectory implements Closeable { undoRemoveDst = true; } } - //TODO: setLocalName breaks created/deleted lists - srcChild.setLocalName(dstIIP.getLastLocalName()); + + srcChild = srcIIP.getLastINode(); + + final byte[] dstChildName = dstIIP.getLastLocalName(); + final INode toDst; + if (withCount == null) { + srcChild.setLocalName(dstChildName); + toDst = srcChild; + } else { + withCount.getReferredINode().setLocalName(dstChildName); + final INodeReference ref = new INodeReference(dstIIP.getINode(-2), withCount); + withCount.setParentReference(ref); + withCount.incrementReferenceCount(); + toDst = ref; + } // add src as dst to complete rename - if (addLastINodeNoQuotaCheck(dstIIP, srcChild)) { + if (addLastINodeNoQuotaCheck(dstIIP, toDst)) { undoRemoveSrc = false; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( @@ -780,12 +840,6 @@ public class FSDirectory implements Closeable { getFSNamesystem().removePathAndBlocks(src, collectedBlocks); } - if (srcIIP.getLatestSnapshot() != null) { - createReferences4Rename(srcChild, srcChildName, - (INodeDirectoryWithSnapshot)srcParent.asDirectory(), - dstParent.asDirectory()); - } - if (snapshottableDirs.size() > 0) { // There are snapshottable directories (without snapshots) to be // deleted. Need to update the SnapshotManager. @@ -796,7 +850,16 @@ public class FSDirectory implements Closeable { } finally { if (undoRemoveSrc) { // Rename failed - restore src - srcChild.setLocalName(srcChildName); + srcChild = srcIIP.getLastINode(); + if (withCount == null) { + srcChild.setLocalName(srcChildName); + } else if (!srcChildIsReference) { // src must be in snapshot + final INodeDirectoryWithSnapshot srcParent + = (INodeDirectoryWithSnapshot)srcIIP.getINode(-2).asDirectory(); + final INode originalChild = withCount.getReferredINode(); + srcParent.replaceRemovedChild(srcChild, originalChild); + srcChild = originalChild; + } addLastINodeNoQuotaCheck(srcIIP, srcChild); } if (undoRemoveDst) { @@ -808,19 +871,7 @@ public class FSDirectory implements Closeable { + "failed to rename " + src + " to " + dst); throw new IOException("rename from " + src + " to " + dst + " failed."); } - - /** The renamed inode is also in a snapshot, create references */ - private static void createReferences4Rename(final INode srcChild, - final byte[] srcChildName, final INodeDirectoryWithSnapshot srcParent, - final INodeDirectory dstParent) { - final INodeReference.WithCount ref; - if (srcChild.isReference()) { - ref = (INodeReference.WithCount)srcChild.asReference().getReferredINode(); - } else { - ref = dstParent.asDirectory().replaceChild4Reference(srcChild); - } - srcParent.replaceRemovedChild4Reference(srcChild, ref, srcChildName); - } + /** * Set file replication * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 6c8e36e82a..e3b5b90cef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -215,7 +215,7 @@ public class INodeDirectory extends INodeWithAdditionalFields { Preconditions.checkState(i >= 0); Preconditions.checkState(oldChild == children.get(i)); - if (oldChild.isReference()) { + if (oldChild.isReference() && !newChild.isReference()) { final INode withCount = oldChild.asReference().getReferredINode(); withCount.asReference().setReferredINode(newChild); } else { @@ -234,6 +234,23 @@ public class INodeDirectory extends INodeWithAdditionalFields { return withCount; } + INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild) { + if (oldChild instanceof INodeReference.WithName) { + return (INodeReference.WithName)oldChild; + } + + final INodeReference.WithCount withCount; + if (oldChild.isReference()) { + withCount = (INodeReference.WithCount) oldChild.asReference().getReferredINode(); + } else { + withCount = new INodeReference.WithCount(null, oldChild); + } + final INodeReference.WithName ref = new INodeReference.WithName( + this, withCount, oldChild.getLocalNameBytes()); + replaceChild(oldChild, ref); + return ref; + } + private void replaceChildFile(final INodeFile oldChild, final INodeFile newChild) { replaceChild(oldChild, newChild); oldChild.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 57795e293f..85f2fcefab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -610,12 +610,15 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { final INodeReference.WithName ref = new INodeReference.WithName(this, newChild, childName); newChild.incrementReferenceCount(); + replaceRemovedChild(oldChild, ref); + return ref; + } - diffs.replaceChild(ListType.CREATED, oldChild, ref); + /** The child just has been removed, replace it with a reference. */ + public void replaceRemovedChild(INode oldChild, INode newChild) { // the old child must be in the deleted list Preconditions.checkState( - diffs.replaceChild(ListType.DELETED, oldChild, ref)); - return ref; + diffs.replaceChild(ListType.DELETED, oldChild, newChild)); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 3a0cbc5832..495b41f20c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -17,19 +17,28 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeReference; -import org.junit.AfterClass; +import org.junit.After; import org.junit.Assert; -import org.junit.BeforeClass; +import org.junit.Before; import org.junit.Test; /** Testing rename with snapshots. */ @@ -37,9 +46,9 @@ public class TestRenameWithSnapshots { { SnapshotTestHelper.disableLogs(); } - + private static final Log LOG = LogFactory.getLog(TestRenameWithSnapshots.class); + private static final long SEED = 0; - private static final short REPL = 3; private static final long BLOCKSIZE = 1024; @@ -49,9 +58,19 @@ public class TestRenameWithSnapshots { private static FSDirectory fsdir; private static DistributedFileSystem hdfs; - @BeforeClass - public static void setUp() throws Exception { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build(); + static private final Path dir = new Path("/testRenameWithSnapshots"); + static private final Path sub1 = new Path(dir, "sub1"); + static private final Path file1 = new Path(sub1, "file1"); + static private final Path file2 = new Path(sub1, "file2"); + static private final Path file3 = new Path(sub1, "file3"); + static private final String snap1 = "snap1"; + static private final String snap2 = "snap2"; + + + @Before + public void setUp() throws Exception { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).format(true) + .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); @@ -60,8 +79,8 @@ public class TestRenameWithSnapshots { hdfs = cluster.getFileSystem(); } - @AfterClass - public static void tearDown() throws Exception { + @After + public void tearDown() throws Exception { if (cluster != null) { cluster.shutdown(); } @@ -104,4 +123,98 @@ public class TestRenameWithSnapshots { hdfs.delete(bar, false); Assert.assertEquals(1, withCount.getReferenceCount()); } + + private static boolean existsInDiffReport(List entries, + DiffType type, String relativePath) { + for (DiffReportEntry entry : entries) { + System.out.println("DiffEntry is:" + entry.getType() + "\"" + + new String(entry.getRelativePath()) + "\""); + if ((entry.getType() == type) + && ((new String(entry.getRelativePath())).compareTo(relativePath) == 0)) { + return true; + } + } + return false; + } + + /** + * Rename a file under a snapshottable directory, file does not exist + * in a snapshot. + */ + @Test (timeout=60000) + public void testRenameFileNotInSnapshot() throws Exception { + hdfs.mkdirs(sub1); + hdfs.allowSnapshot(sub1.toString()); + hdfs.createSnapshot(sub1, snap1); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); + hdfs.rename(file1, file2); + + // Query the diff report and make sure it looks as expected. + SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, ""); + List entries = diffReport.getDiffList(); + assertTrue(entries.size() == 2); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName())); + } + + /** + * Rename a file under a snapshottable directory, file exists + * in a snapshot. + */ + @Test (timeout=60000) + public void testRenameFileInSnapshot() throws Exception { + hdfs.mkdirs(sub1); + hdfs.allowSnapshot(sub1.toString()); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); + hdfs.createSnapshot(sub1, snap1); + hdfs.rename(file1, file2); + + // Query the diff report and make sure it looks as expected. + SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, ""); + System.out.println("DiffList is " + diffReport.toString()); + List entries = diffReport.getDiffList(); + assertTrue(entries.size() == 3); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName())); + } + + @Test (timeout=60000) + public void testRenameTwiceInSnapshot() throws Exception { + hdfs.mkdirs(sub1); + hdfs.allowSnapshot(sub1.toString()); + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); + hdfs.createSnapshot(sub1, snap1); + hdfs.rename(file1, file2); + + hdfs.createSnapshot(sub1, snap2); + hdfs.rename(file2, file3); + + SnapshotDiffReport diffReport; + + // Query the diff report and make sure it looks as expected. + diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2); + LOG.info("DiffList is " + diffReport.toString()); + List entries = diffReport.getDiffList(); + assertTrue(entries.size() == 3); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName())); + + diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, ""); + LOG.info("DiffList is " + diffReport.toString()); + entries = diffReport.getDiffList(); + assertTrue(entries.size() == 3); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, file2.getName())); + + diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, ""); + LOG.info("DiffList is " + diffReport.toString()); + entries = diffReport.getDiffList(); + assertTrue(entries.size() == 3); + assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "")); + assertTrue(existsInDiffReport(entries, DiffType.CREATE, file3.getName())); + assertTrue(existsInDiffReport(entries, DiffType.DELETE, file1.getName())); + } }