HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. Contributed by Juan Yu and Jing Zhao.
This commit is contained in:
parent
e2d0ff364a
commit
6b441d227a
@ -642,6 +642,9 @@ Release 2.6.0 - UNRELEASED
|
||||
|
||||
HDFS-4852. libhdfs documentation is out of date. (cnauroth)
|
||||
|
||||
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
|
||||
(Juan Yu and jing9 via jing9)
|
||||
|
||||
Release 2.5.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -722,6 +722,8 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
|
||||
counts.add(lastDiff.diff.destroyCreatedList(currentINode,
|
||||
collectedBlocks, removedINodes));
|
||||
}
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
} else {
|
||||
// update prior
|
||||
prior = getDiffs().updatePrior(snapshot, prior);
|
||||
@ -739,6 +741,8 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
|
||||
|
||||
counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
|
||||
currentINode, collectedBlocks, removedINodes, countDiffChange));
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
|
||||
// check priorDiff again since it may be created during the diff deletion
|
||||
if (prior != Snapshot.NO_SNAPSHOT_ID) {
|
||||
@ -778,8 +782,6 @@ public Quota.Counts cleanDirectory(final INodeDirectory currentINode,
|
||||
}
|
||||
}
|
||||
}
|
||||
counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
|
||||
collectedBlocks, removedINodes, priorDeleted, countDiffChange));
|
||||
|
||||
if (currentINode.isQuotaSet()) {
|
||||
currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
@ -559,6 +560,80 @@ public void testDeleteEarliestSnapshot2() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot that is taken before a directory deletion,
|
||||
* directory diff list should be combined correctly.
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testDeleteSnapshot1() throws Exception {
|
||||
final Path root = new Path("/");
|
||||
|
||||
Path dir = new Path("/dir1");
|
||||
Path file1 = new Path(dir, "file1");
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.allowSnapshot(root);
|
||||
hdfs.createSnapshot(root, "s1");
|
||||
|
||||
Path file2 = new Path(dir, "file2");
|
||||
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.createSnapshot(root, "s2");
|
||||
|
||||
// delete file
|
||||
hdfs.delete(file1, true);
|
||||
hdfs.delete(file2, true);
|
||||
|
||||
// delete directory
|
||||
assertTrue(hdfs.delete(dir, false));
|
||||
|
||||
// delete second snapshot
|
||||
hdfs.deleteSnapshot(root, "s2");
|
||||
|
||||
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
|
||||
NameNodeAdapter.saveNamespace(cluster.getNameNode());
|
||||
|
||||
// restart NN
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a snapshot that is taken before a directory deletion (recursively),
|
||||
* directory diff list should be combined correctly.
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testDeleteSnapshot2() throws Exception {
|
||||
final Path root = new Path("/");
|
||||
|
||||
Path dir = new Path("/dir1");
|
||||
Path file1 = new Path(dir, "file1");
|
||||
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
|
||||
|
||||
hdfs.allowSnapshot(root);
|
||||
hdfs.createSnapshot(root, "s1");
|
||||
|
||||
Path file2 = new Path(dir, "file2");
|
||||
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
|
||||
INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
|
||||
long file2NodeId = file2Node.getId();
|
||||
|
||||
hdfs.createSnapshot(root, "s2");
|
||||
|
||||
// delete directory recursively
|
||||
assertTrue(hdfs.delete(dir, true));
|
||||
assertNotNull(fsdir.getInode(file2NodeId));
|
||||
|
||||
// delete second snapshot
|
||||
hdfs.deleteSnapshot(root, "s2");
|
||||
assertTrue(fsdir.getInode(file2NodeId) == null);
|
||||
|
||||
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
|
||||
NameNodeAdapter.saveNamespace(cluster.getNameNode());
|
||||
|
||||
// restart NN
|
||||
cluster.restartNameNodes();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test deleting snapshots in a more complicated scenario: need to combine
|
||||
* snapshot diffs, but no need to handle diffs distributed in a dir tree
|
||||
|
Loading…
Reference in New Issue
Block a user