diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 851c64c490..d319196012 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -543,6 +543,9 @@ Release 2.4.0 - UNRELEASED HDFS-5944. LeaseManager:findLeaseWithPrefixPath can't handle path like /a/b/ and cause SecondaryNameNode failed do checkpoint (Yunjiong Zhao via brandonli) + HDFS-5982. Need to update snapshot manager when applying editlog for deleting + a snapshottable directory. (jing9) + BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 415631e0f9..a6d75ad6b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1317,20 +1317,12 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks, if (!deleteAllowed(inodesInPath, src) ) { filesRemoved = -1; } else { - // Before removing the node, first check if the targetNode is for a - // snapshottable dir with snapshots, or its descendants have - // snapshottable dir with snapshots - final INode targetNode = inodesInPath.getLastINode(); List snapshottableDirs = new ArrayList(); - checkSnapshot(targetNode, snapshottableDirs); + checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs); filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, removedINodes, now); - if (snapshottableDirs.size() > 0) { - // There are some snapshottable directories without snapshots to be - // deleted. Need to update the SnapshotManager. - namesystem.removeSnapshottableDirs(snapshottableDirs); - } + namesystem.removeSnapshottableDirs(snapshottableDirs); } } finally { writeUnlock(); @@ -1392,18 +1384,25 @@ boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { * @param src a string representation of a path to an inode * @param mtime the time the inode is removed * @throws SnapshotAccessControlException if path is in RO snapshot - */ + */ void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, - QuotaExceededException, SnapshotAccessControlException { + QuotaExceededException, SnapshotAccessControlException, IOException { assert hasWriteLock(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ChunkedArrayList(); final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); - final long filesRemoved = deleteAllowed(inodesInPath, src) ? - unprotectedDelete(inodesInPath, collectedBlocks, - removedINodes, mtime) : -1; + long filesRemoved = -1; + if (deleteAllowed(inodesInPath, src)) { + List snapshottableDirs = + new ArrayList(); + checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs); + filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, + removedINodes, mtime); + namesystem.removeSnapshottableDirs(snapshottableDirs); + } + if (filesRemoved >= 0) { getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index bd7a4c3856..a570573ee2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -128,7 +129,42 @@ public void testDeleteDirectoryWithSnapshot() throws Exception { exception.expectMessage(error); hdfs.delete(sub, true); } - + + /** + * Test applying editlog of operation which deletes a snapshottable directory + * without snapshots. The snapshottable dir list in snapshot manager should be + * updated. + */ + @Test (timeout=300000) + public void testApplyEditLogForDeletion() throws Exception { + final Path foo = new Path("/foo"); + final Path bar1 = new Path(foo, "bar1"); + final Path bar2 = new Path(foo, "bar2"); + hdfs.mkdirs(bar1); + hdfs.mkdirs(bar2); + + // allow snapshots on bar1 and bar2 + hdfs.allowSnapshot(bar1); + hdfs.allowSnapshot(bar2); + assertEquals(2, cluster.getNamesystem().getSnapshotManager() + .getNumSnapshottableDirs()); + assertEquals(2, cluster.getNamesystem().getSnapshotManager() + .getSnapshottableDirs().length); + + // delete /foo + hdfs.delete(foo, true); + cluster.restartNameNode(0); + // the snapshottable dir list in snapshot manager should be empty + assertEquals(0, cluster.getNamesystem().getSnapshotManager() + .getNumSnapshottableDirs()); + assertEquals(0, cluster.getNamesystem().getSnapshotManager() + .getSnapshottableDirs().length); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + cluster.restartNameNode(0); + } + /** * Deleting directory with snapshottable descendant with snapshots must fail. */