HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. Contributed by Juan Yu and Jing Zhao.

This commit is contained in:
Jing Zhao 2014-08-27 10:26:22 -07:00
parent e2d0ff364a
commit 6b441d227a
3 changed files with 85 additions and 5 deletions

View File

@ -642,6 +642,9 @@ Release 2.6.0 - UNRELEASED
HDFS-4852. libhdfs documentation is out of date. (cnauroth) HDFS-4852. libhdfs documentation is out of date. (cnauroth)
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
(Juan Yu and jing9 via jing9)
Release 2.5.1 - UNRELEASED Release 2.5.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -722,6 +722,8 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
counts.add(lastDiff.diff.destroyCreatedList(currentINode, counts.add(lastDiff.diff.destroyCreatedList(currentINode,
collectedBlocks, removedINodes)); collectedBlocks, removedINodes));
} }
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
} else { } else {
// update prior // update prior
prior = getDiffs().updatePrior(snapshot, prior); prior = getDiffs().updatePrior(snapshot, prior);
@ -739,7 +741,9 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
currentINode, collectedBlocks, removedINodes, countDiffChange)); currentINode, collectedBlocks, removedINodes, countDiffChange));
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
// check priorDiff again since it may be created during the diff deletion // check priorDiff again since it may be created during the diff deletion
if (prior != Snapshot.NO_SNAPSHOT_ID) { if (prior != Snapshot.NO_SNAPSHOT_ID) {
DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior); DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
@ -778,9 +782,7 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
} }
} }
} }
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
if (currentINode.isQuotaSet()) { if (currentINode.isQuotaSet()) {
currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));

View File

@ -19,6 +19,7 @@
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -558,7 +559,81 @@ public void testDeleteEarliestSnapshot2() throws Exception {
+ toDeleteFileInSnapshot.toString(), e); + toDeleteFileInSnapshot.toString(), e);
} }
} }
/**
* Delete a snapshot that is taken before a directory deletion,
* directory diff list should be combined correctly.
*/
@Test (timeout=60000)
public void testDeleteSnapshot1() throws Exception {
final Path root = new Path("/");
Path dir = new Path("/dir1");
Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
Path file2 = new Path(dir, "file2");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
hdfs.createSnapshot(root, "s2");
// delete file
hdfs.delete(file1, true);
hdfs.delete(file2, true);
// delete directory
assertTrue(hdfs.delete(dir, false));
// delete second snapshot
hdfs.deleteSnapshot(root, "s2");
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
// restart NN
cluster.restartNameNodes();
}
/**
* Delete a snapshot that is taken before a directory deletion (recursively),
* directory diff list should be combined correctly.
*/
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
final Path root = new Path("/");
Path dir = new Path("/dir1");
Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
Path file2 = new Path(dir, "file2");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
long file2NodeId = file2Node.getId();
hdfs.createSnapshot(root, "s2");
// delete directory recursively
assertTrue(hdfs.delete(dir, true));
assertNotNull(fsdir.getInode(file2NodeId));
// delete second snapshot
hdfs.deleteSnapshot(root, "s2");
assertTrue(fsdir.getInode(file2NodeId) == null);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
// restart NN
cluster.restartNameNodes();
}
/** /**
* Test deleting snapshots in a more complicated scenario: need to combine * Test deleting snapshots in a more complicated scenario: need to combine
* snapshot diffs, but no need to handle diffs distributed in a dir tree * snapshot diffs, but no need to handle diffs distributed in a dir tree