From 92e0416ced279a910616985bf11fa3f8b1b1de9b Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 23 Apr 2013 00:00:47 +0000 Subject: [PATCH] HDFS-4727. Update inodeMap after deleting files/directories/snapshots. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1470756 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-2802.txt | 3 + .../hdfs/server/namenode/FSDirectory.java | 62 +++++++++---------- .../hdfs/server/namenode/FSEditLogLoader.java | 7 ++- .../hdfs/server/namenode/FSImageFormat.java | 6 +- .../hdfs/server/namenode/FSNamesystem.java | 22 +++++-- .../hadoop/hdfs/server/namenode/INode.java | 23 ++++--- .../hdfs/server/namenode/INodeDirectory.java | 22 +++---- .../hdfs/server/namenode/INodeFile.java | 9 ++- .../hdfs/server/namenode/INodeReference.java | 12 ++-- .../hdfs/server/namenode/INodeSymlink.java | 12 ++-- .../namenode/snapshot/AbstractINodeDiff.java | 6 +- .../snapshot/AbstractINodeDiffList.java | 6 +- .../namenode/snapshot/FileWithSnapshot.java | 19 +++--- .../snapshot/INodeDirectorySnapshottable.java | 6 +- .../snapshot/INodeDirectoryWithSnapshot.java | 53 +++++++++------- ...NodeFileUnderConstructionWithSnapshot.java | 10 ++- .../snapshot/INodeFileWithSnapshot.java | 10 ++- .../namenode/snapshot/SnapshotManager.java | 6 +- 18 files changed, 182 insertions(+), 112 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index c84831d75b..9683241ada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -263,3 +263,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4726. Fix test failures after merging the INodeId-INode mapping from trunk. (Jing Zhao via szetszwo) + + HDFS-4727. Update inodeMap after deleting files/directories/snapshots. + (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 17c4a95163..1f214d7858 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -928,9 +928,12 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, if (removedDst != null) { undoRemoveDst = false; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ArrayList(); filesDeleted = removedDst.cleanSubtree(null, - dstIIP.getLatestSnapshot(), collectedBlocks).get(Quota.NAMESPACE); - getFSNamesystem().removePathAndBlocks(src, collectedBlocks); + dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes).get( + Quota.NAMESPACE); + getFSNamesystem().removePathAndBlocks(src, collectedBlocks, + removedINodes); } if (snapshottableDirs.size() > 0) { @@ -1210,10 +1213,11 @@ void unprotectedConcat(String target, String [] srcs, long timestamp) * * @param src Path of a directory to delete * @param collectedBlocks Blocks under the deleted directory + * @param removedINodes INodes that should be removed from {@link #inodeMap} * @return true on successful deletion; else false */ - boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) - throws IOException { + boolean delete(String src, BlocksMapUpdateInfo collectedBlocks, + List removedINodes) throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src); } @@ -1234,7 +1238,8 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) List snapshottableDirs = new ArrayList(); checkSnapshot(targetNode, snapshottableDirs); - filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, now); + filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, + removedINodes, now); if (snapshottableDirs.size() > 0) { // There are some snapshottable directories without snapshots to be // deleted. Need to update the SnapshotManager. @@ -1249,8 +1254,8 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) } fsImage.getEditLog().logDelete(src, now); incrDeletedFileCount(filesRemoved); - // Blocks will be deleted later by the caller of this method - getFSNamesystem().removePathAndBlocks(src, null); + // Blocks/INodes will be handled later by the caller of this method + getFSNamesystem().removePathAndBlocks(src, null, null); return true; } @@ -1306,13 +1311,16 @@ void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException { assert hasWriteLock(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ArrayList(); final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); - final long filesRemoved = deleteAllowed(inodesInPath, src)? - unprotectedDelete(inodesInPath, collectedBlocks, mtime): -1; + final long filesRemoved = deleteAllowed(inodesInPath, src) ? + unprotectedDelete(inodesInPath, collectedBlocks, + removedINodes, mtime) : -1; if (filesRemoved >= 0) { - getFSNamesystem().removePathAndBlocks(src, collectedBlocks); + getFSNamesystem().removePathAndBlocks(src, collectedBlocks, + removedINodes); } } @@ -1321,11 +1329,12 @@ void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, * Update the count at each ancestor directory with quota * @param iip the inodes resolved from the path * @param collectedBlocks blocks collected from the deleted path + * @param removedINodes inodes that should be removed from {@link #inodeMap} * @param mtime the time the inode is removed * @return the number of inodes deleted; 0 if no inodes are deleted. */ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, - long mtime) throws QuotaExceededException { + List removedINodes, long mtime) throws QuotaExceededException { assert hasWriteLock(); // check if target node exists @@ -1354,11 +1363,10 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, // collect block if (!targetNode.isInLatestSnapshot(latestSnapshot)) { - targetNode.destroyAndCollectBlocks(collectedBlocks); - remvoedAllFromInodesFromMap(targetNode); + targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, - collectedBlocks); + collectedBlocks, removedINodes); parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE), true); removed = counts.get(Quota.NAMESPACE); @@ -2184,7 +2192,6 @@ private long removeLastINode(final INodesInPath iip) if (!parent.removeChild(last, latestSnapshot)) { return -1; } - inodeMap.remove(last); if (parent != last.getParent()) { // parent is changed inodeMap.put(last.getParent()); @@ -2237,21 +2244,12 @@ final void addToInodeMapUnprotected(INode inode) { } /* This method is always called with writeLock held */ - private final void removeFromInodeMap(INode inode) { - inodeMap.remove(inode); - } - - /** Remove all the inodes under given inode from the map */ - private void remvoedAllFromInodesFromMap(INode inode) { - removeFromInodeMap(inode); - if (!inode.isDirectory()) { - return; + final void removeFromInodeMap(List inodes) { + if (inodes != null) { + for (INode inode : inodes) { + inodeMap.remove(inode); + } } - INodeDirectory dir = (INodeDirectory) inode; - for (INode child : dir.getChildrenList(null)) { - remvoedAllFromInodesFromMap(child); - } - dir.clearChildren(); } /** @@ -2584,7 +2582,8 @@ INode recordModification(Snapshot latest) throws QuotaExceededException { } @Override - public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { + public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks, + List removedINodes) { // Nothing to do } @@ -2605,7 +2604,8 @@ public CountsMap computeContentSummary(CountsMap countsMap) { @Override public Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, List removedINodes) + throws QuotaExceededException { return null; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 2f31b3eb14..04273e2ba9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -22,8 +22,10 @@ import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -520,11 +522,14 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, case OP_DELETE_SNAPSHOT: { DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ArrayList(); fsNamesys.getSnapshotManager().deleteSnapshot( deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName, - collectedBlocks); + collectedBlocks, removedINodes); fsNamesys.removeBlocks(collectedBlocks); collectedBlocks.clear(); + fsNamesys.dir.removeFromInodeMap(removedINodes); + removedINodes.clear(); break; } case OP_RENAME_SNAPSHOT: { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 62dbf8390c..2081f3a890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -561,7 +561,11 @@ public FSDirectory getFSDirectoryInLoading() { public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in) throws IOException { final byte[] localName = FSImageSerialization.readLocalName(in); - return loadINode(localName, isSnapshotINode, in); + INode inode = loadINode(localName, isSnapshotINode, in); + if (LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) { + namesystem.dir.addToInodeMapUnprotected(inode); + } + return inode; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 33ad7039aa..6a530d665e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2878,6 +2878,7 @@ private boolean deleteInternal(String src, boolean recursive, throws AccessControlException, SafeModeException, UnresolvedLinkException, IOException { BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ArrayList(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); @@ -2895,7 +2896,7 @@ private boolean deleteInternal(String src, boolean recursive, checkPermission(pc, src, false, null, FsAction.WRITE, null, FsAction.ALL); } // Unlink the target directory from directory tree - if (!dir.delete(src, collectedBlocks)) { + if (!dir.delete(src, collectedBlocks, removedINodes)) { return false; } } finally { @@ -2904,6 +2905,8 @@ private boolean deleteInternal(String src, boolean recursive, getEditLog().logSync(); removeBlocks(collectedBlocks); // Incremental deletion of blocks collectedBlocks.clear(); + dir.removeFromInodeMap(removedINodes); + removedINodes.clear(); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* Namesystem.delete: " + src +" is removed"); @@ -2940,13 +2943,21 @@ void removeBlocks(BlocksMapUpdateInfo blocks) { } /** - * Remove leases and blocks related to a given path + * Remove leases, inodes and blocks related to a given path * @param src The given path * @param blocks Containing the list of blocks to be deleted from blocksMap + * @param removedINodes Containing the list of inodes to be removed from + * inodesMap */ - void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks) { + void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks, + List removedINodes) { assert hasWriteLock(); leaseManager.removeLeaseWithPrefixPath(src); + // remove inodes from inodesMap + if (removedINodes != null) { + dir.removeFromInodeMap(removedINodes); + removedINodes.clear(); + } if (blocks == null) { return; } @@ -6007,13 +6018,16 @@ void deleteSnapshot(String snapshotRoot, String snapshotName) checkOwner(pc, snapshotRoot); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); + List removedINodes = new ArrayList(); dir.writeLock(); try { snapshotManager.deleteSnapshot(snapshotRoot, snapshotName, - collectedBlocks); + collectedBlocks, removedINodes); + dir.removeFromInodeMap(removedINodes); } finally { dir.writeUnlock(); } + removedINodes.clear(); this.removeBlocks(collectedBlocks); collectedBlocks.clear(); getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index fe163703d3..28da7bf117 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -329,24 +329,31 @@ public INodeSymlink asSymlink() { * @param collectedBlocks * blocks collected from the descents for further block * deletion/update will be added to the given map. + * @param removedINodes + * INodes collected from the descents for further cleaning up of + * inodeMap * @return quota usage delta when deleting a snapshot */ public abstract Quota.Counts cleanSubtree(final Snapshot snapshot, - Snapshot prior, BlocksMapUpdateInfo collectedBlocks) - throws QuotaExceededException; + Snapshot prior, BlocksMapUpdateInfo collectedBlocks, + List removedINodes) throws QuotaExceededException; /** * Destroy self and clear everything! If the INode is a file, this method - * collects its blocks for further block deletion. If the INode is a - * directory, the method goes down the subtree and collects blocks from the - * descents, and clears its parent/children references as well. The method + * collects its blocks for further block deletion. If the INode is a + * directory, the method goes down the subtree and collects blocks from the + * descents, and clears its parent/children references as well. The method * also clears the diff list if the INode contains snapshot diff list. * - * @param collectedBlocks blocks collected from the descents for further block - * deletion/update will be added to this map. + * @param collectedBlocks + * blocks collected from the descents for further block + * deletion/update will be added to this map. + * @param removedINodes + * INodes collected from the descents for further cleaning up of + * inodeMap */ public abstract void destroyAndCollectBlocks( - BlocksMapUpdateInfo collectedBlocks); + BlocksMapUpdateInfo collectedBlocks, List removedINodes); /** Compute {@link ContentSummary}. */ public final ContentSummary computeContentSummary() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 8809c40d6e..4448daf6c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -488,8 +488,8 @@ public void clear() { * recursively down the subtree. */ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, - Snapshot prior, final BlocksMapUpdateInfo collectedBlocks) - throws QuotaExceededException { + Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); // in case of deletion snapshot, since this call happens after we modify // the diff list, the snapshot to be deleted has been combined or renamed @@ -499,36 +499,36 @@ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, Snapshot s = snapshot != null && prior != null ? prior : snapshot; for (INode child : getChildrenList(s)) { Quota.Counts childCounts = child.cleanSubtree(snapshot, prior, - collectedBlocks); + collectedBlocks, removedINodes); counts.add(childCounts); } return counts; } @Override - public void destroyAndCollectBlocks( - final BlocksMapUpdateInfo collectedBlocks) { + public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { for (INode child : getChildrenList(null)) { - child.destroyAndCollectBlocks(collectedBlocks); + child.destroyAndCollectBlocks(collectedBlocks, removedINodes); } - // TODO: Need to update the cleanSubtree/destroy methods to clean inode map clear(); + removedINodes.add(this); } @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) - throws QuotaExceededException { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) throws QuotaExceededException { if (prior == null && snapshot == null) { // destroy the whole subtree and collect blocks that should be deleted Quota.Counts counts = Quota.Counts.newInstance(); this.computeQuotaUsage(counts, true); - destroyAndCollectBlocks(collectedBlocks); + destroyAndCollectBlocks(collectedBlocks, removedINodes); return counts; } else { // process recursively down the subtree Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, - collectedBlocks); + collectedBlocks, removedINodes); if (isQuotaSet()) { ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache( -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index cbe55830f4..7e0f093808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -20,6 +20,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintWriter; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; @@ -291,19 +292,20 @@ public void setBlocks(BlockInfo[] blocks) { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); if (snapshot == null && prior == null) { // this only happens when deleting the current file computeQuotaUsage(counts, false); - destroyAndCollectBlocks(collectedBlocks); + destroyAndCollectBlocks(collectedBlocks, removedINodes); } return counts; } @Override - public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { + public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { if (blocks != null && collectedBlocks != null) { for (BlockInfo blk : blocks) { collectedBlocks.addDeleteBlock(blk); @@ -312,6 +314,7 @@ public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { } setBlocks(null); clear(); + removedINodes.add(this); if (this instanceof FileWithSnapshot) { ((FileWithSnapshot) this).getDiffs().clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 975cb55745..744336a6e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.PrintWriter; +import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -222,14 +223,17 @@ final INode recordModification(Snapshot latest) throws QuotaExceededException { @Override public final Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException { - return referred.cleanSubtree(snapshot, prior, collectedBlocks); + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + throws QuotaExceededException { + return referred.cleanSubtree(snapshot, prior, collectedBlocks, + removedINodes); } @Override - public final void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { + public final void destroyAndCollectBlocks( + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { if (removeReference(this) <= 0) { - referred.destroyAndCollectBlocks(collectedBlocks); + referred.destroyAndCollectBlocks(collectedBlocks, removedINodes); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index 0106ba8955..b58691e619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.PrintWriter; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -73,14 +74,17 @@ public byte[] getSymlink() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + if (snapshot == null && prior == null) { + destroyAndCollectBlocks(collectedBlocks, removedINodes); + } return Quota.Counts.newInstance(1, 0); } @Override - public void destroyAndCollectBlocks( - final BlocksMapUpdateInfo collectedBlocks) { - // do nothing + public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { + removedINodes.add(this); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index 335b58b6e2..a5a1562538 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -19,6 +19,7 @@ import java.io.DataOutput; import java.io.IOException; +import java.util.List; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; @@ -128,7 +129,8 @@ N getSnapshotINode() { /** Combine the posterior diff and collect blocks for deletion. */ abstract Quota.Counts combinePosteriorAndCollectBlocks(final N currentINode, - final D posterior, final BlocksMapUpdateInfo collectedBlocks); + final D posterior, final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes); /** * Delete and clear self. @@ -137,7 +139,7 @@ abstract Quota.Counts combinePosteriorAndCollectBlocks(final N currentINode, * @return quota usage delta */ abstract Quota.Counts destroyDiffAndCollectBlocks(final N currentINode, - final BlocksMapUpdateInfo collectedBlocks); + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes); @Override public String toString() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index 2d5622151e..9ca445d925 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -68,7 +68,7 @@ public void clear() { */ final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, Snapshot prior, final N currentINode, - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { int snapshotIndex = Collections.binarySearch(diffs, snapshot); Quota.Counts counts = Quota.Counts.newInstance(); @@ -81,7 +81,7 @@ final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, removed = diffs.remove(0); counts.add(Quota.NAMESPACE, 1); counts.add(removed.destroyDiffAndCollectBlocks(currentINode, - collectedBlocks)); + collectedBlocks, removedINodes)); } } else if (snapshotIndex > 0) { final AbstractINodeDiff previous = diffs.get(snapshotIndex - 1); @@ -97,7 +97,7 @@ final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, removed.snapshotINode.clear(); } counts.add(previous.combinePosteriorAndCollectBlocks( - currentINode, removed, collectedBlocks)); + currentINode, removed, collectedBlocks, removedINodes)); previous.setPosterior(removed.getPosterior()); removed.setPosterior(null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java index 9a624f1726..c89f1ab835 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java @@ -19,11 +19,13 @@ import java.io.DataOutput; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; +import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; @@ -61,7 +63,7 @@ public long getFileSize() { private static Quota.Counts updateQuotaAndCollectBlocks( INodeFile currentINode, FileDiff removed, - BlocksMapUpdateInfo collectedBlocks) { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { FileWithSnapshot sFile = (FileWithSnapshot) currentINode; long oldDiskspace = currentINode.diskspaceConsumed(); if (removed.snapshotINode != null) { @@ -72,7 +74,7 @@ private static Quota.Counts updateQuotaAndCollectBlocks( } } - Util.collectBlocksAndClear(sFile, collectedBlocks); + Util.collectBlocksAndClear(sFile, collectedBlocks, removedINodes); long dsDelta = oldDiskspace - currentINode.diskspaceConsumed(); return Quota.Counts.newInstance(0, dsDelta); @@ -80,9 +82,10 @@ private static Quota.Counts updateQuotaAndCollectBlocks( @Override Quota.Counts combinePosteriorAndCollectBlocks(INodeFile currentINode, - FileDiff posterior, BlocksMapUpdateInfo collectedBlocks) { + FileDiff posterior, BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { return updateQuotaAndCollectBlocks(currentINode, posterior, - collectedBlocks); + collectedBlocks, removedINodes); } @Override @@ -107,9 +110,9 @@ void write(DataOutput out, ReferenceMap referenceMap) throws IOException { @Override Quota.Counts destroyDiffAndCollectBlocks(INodeFile currentINode, - BlocksMapUpdateInfo collectedBlocks) { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { return updateQuotaAndCollectBlocks(currentINode, this, - collectedBlocks); + collectedBlocks, removedINodes); } } @@ -171,11 +174,11 @@ public static short getBlockReplication(final FileWithSnapshot file) { * any inode, collect them and update the block list. */ static void collectBlocksAndClear(final FileWithSnapshot file, - final BlocksMapUpdateInfo info) { + final BlocksMapUpdateInfo info, final List removedINodes) { // check if everything is deleted. if (file.isCurrentFileDeleted() && file.getDiffs().asList().isEmpty()) { - file.asINodeFile().destroyAndCollectBlocks(info); + file.asINodeFile().destroyAndCollectBlocks(info, removedINodes); return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index 184a5bd80d..76ea8174bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -311,7 +311,8 @@ Snapshot addSnapshot(int id, String name) * exists. */ Snapshot removeSnapshot(String snapshotName, - BlocksMapUpdateInfo collectedBlocks) throws SnapshotException { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + throws SnapshotException { final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName)); if (i < 0) { throw new SnapshotException("Cannot delete snapshot " + snapshotName @@ -321,7 +322,8 @@ Snapshot removeSnapshot(String snapshotName, final Snapshot snapshot = snapshotsByNames.remove(i); Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot); try { - Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks); + Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks, + removedINodes); INodeDirectory parent = getParent(); if (parent != null) { parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 2827e44901..c0b83387a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -94,12 +94,13 @@ private final boolean removeChild(ListType type, final INode child) { /** clear the created list */ private Quota.Counts destroyCreatedList( final INodeDirectoryWithSnapshot currentINode, - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { Quota.Counts counts = Quota.Counts.newInstance(); final List createdList = getList(ListType.CREATED); for (INode c : createdList) { c.computeQuotaUsage(counts, true); - c.destroyAndCollectBlocks(collectedBlocks); + c.destroyAndCollectBlocks(collectedBlocks, removedINodes); // c should be contained in the children list, remove it currentINode.removeChild(c); } @@ -110,13 +111,13 @@ private Quota.Counts destroyCreatedList( /** clear the deleted list */ private Quota.Counts destroyDeletedList( final BlocksMapUpdateInfo collectedBlocks, - final List refNodes) { + final List removedINodes, final List refNodes) { Quota.Counts counts = Quota.Counts.newInstance(); final List deletedList = getList(ListType.DELETED); for (INode d : deletedList) { if (INodeReference.tryRemoveReference(d) <= 0) { d.computeQuotaUsage(counts, false); - d.destroyAndCollectBlocks(collectedBlocks); + d.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { refNodes.add(d.asReference()); } @@ -262,7 +263,8 @@ boolean isSnapshotRoot() { @Override Quota.Counts combinePosteriorAndCollectBlocks( final INodeDirectory currentDir, final DirectoryDiff posterior, - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { final Quota.Counts counts = Quota.Counts.newInstance(); diff.combinePosterior(posterior.diff, new Diff.Processor() { /** Collect blocks for deleted files. */ @@ -271,7 +273,7 @@ public void process(INode inode) { if (inode != null) { if (INodeReference.tryRemoveReference(inode) <= 0) { inode.computeQuotaUsage(counts, false); - inode.destroyAndCollectBlocks(collectedBlocks); + inode.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { // if the node is a reference node, we should continue the // snapshot deletion process @@ -284,7 +286,7 @@ public void process(INode inode) { // and it can be identified by the cleanSubtree since we call // recordModification before the rename. counts.add(inode.cleanSubtree(posterior.snapshot, null, - collectedBlocks)); + collectedBlocks, removedINodes)); } catch (QuotaExceededException e) { String error = "should not have QuotaExceededException while deleting snapshot"; LOG.error(error, e); @@ -384,11 +386,12 @@ void write(DataOutput out, ReferenceMap referenceMap) throws IOException { @Override Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode, - BlocksMapUpdateInfo collectedBlocks) { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { // this diff has been deleted Quota.Counts counts = Quota.Counts.newInstance(); List refNodes = new ArrayList(); - counts.add(diff.destroyDeletedList(collectedBlocks, refNodes)); + counts.add(diff.destroyDeletedList(collectedBlocks, removedINodes, + refNodes)); for (INodeReference ref : refNodes) { // if the node is a reference node, we should continue the // snapshot deletion process @@ -401,7 +404,8 @@ Quota.Counts destroyDiffAndCollectBlocks(INodeDirectory currentINode, // snapshot to be deleted. If the ref node presents the dst node of a // rename operation, we can identify the corresponding prior snapshot // when we come into the subtree of the ref node. - counts.add(ref.cleanSubtree(this.snapshot, null, collectedBlocks)); + counts.add(ref.cleanSubtree(this.snapshot, null, collectedBlocks, + removedINodes)); } catch (QuotaExceededException e) { String error = "should not have QuotaExceededException while deleting snapshot " @@ -755,7 +759,7 @@ public int getSnapshotDirectory( @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); if (snapshot == null) { // delete the current directory @@ -763,13 +767,14 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, // delete everything in created list DirectoryDiff lastDiff = diffs.getLast(); if (lastDiff != null) { - counts.add(lastDiff.diff.destroyCreatedList(this, collectedBlocks)); + counts.add(lastDiff.diff.destroyCreatedList(this, collectedBlocks, + removedINodes)); } } else { // update prior prior = getDiffs().updatePrior(snapshot, prior); counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this, - collectedBlocks)); + collectedBlocks, removedINodes)); if (prior != null) { DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); if (priorDiff != null) { @@ -780,7 +785,8 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, // cleanSubtreeRecursively call. for (INode cNode : priorDiff.getChildrenDiff().getList( ListType.CREATED)) { - counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks)); + counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, + removedINodes)); } // When a directory is moved from the deleted list of the posterior // diff to the deleted list of this diff, we need to destroy its @@ -792,12 +798,13 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, for (INode dNode : priorDiff.getChildrenDiff().getList( ListType.DELETED)) { counts.add(cleanDeletedINode(dNode, snapshot, prior, - collectedBlocks)); + collectedBlocks, removedINodes)); } } } } - counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks)); + counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks, + removedINodes)); if (isQuotaSet()) { this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE), @@ -816,7 +823,8 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, * @return Quota usage update. */ private Quota.Counts cleanDeletedINode(INode inode, Snapshot post, - Snapshot prior, final BlocksMapUpdateInfo collectedBlocks) { + Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { Quota.Counts counts = Quota.Counts.newInstance(); Deque queue = new ArrayDeque(); queue.addLast(inode); @@ -825,7 +833,7 @@ private Quota.Counts cleanDeletedINode(INode inode, Snapshot post, if (topNode instanceof FileWithSnapshot) { FileWithSnapshot fs = (FileWithSnapshot) topNode; counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, - topNode.asFile(), collectedBlocks)); + topNode.asFile(), collectedBlocks, removedINodes)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); if (dir instanceof INodeDirectoryWithSnapshot) { @@ -835,7 +843,7 @@ private Quota.Counts cleanDeletedINode(INode inode, Snapshot post, DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior); if (priorDiff != null) { counts.add(priorDiff.diff.destroyCreatedList(sdir, - collectedBlocks)); + collectedBlocks, removedINodes)); } } for (INode child : dir.getChildrenList(prior)) { @@ -848,13 +856,14 @@ private Quota.Counts cleanDeletedINode(INode inode, Snapshot post, @Override public void destroyAndCollectBlocks( - final BlocksMapUpdateInfo collectedBlocks) { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes) { // destroy its diff list for (DirectoryDiff diff : diffs) { - diff.destroyDiffAndCollectBlocks(this, collectedBlocks); + diff.destroyDiffAndCollectBlocks(this, collectedBlocks, removedINodes); } diffs.clear(); - super.destroyAndCollectBlocks(collectedBlocks); + super.destroyAndCollectBlocks(collectedBlocks, removedINodes); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index 5c8ffa3e50..4aaa618b1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -113,16 +116,17 @@ public FileDiffList getDiffs() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior); isCurrentFileDeleted = true; - Util.collectBlocksAndClear(this, collectedBlocks); + Util.collectBlocksAndClear(this, collectedBlocks, removedINodes); return Quota.Counts.newInstance(); } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); - return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks); + return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, + removedINodes); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index 133f1c6c0d..caf730cbe5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -84,16 +87,17 @@ public FileDiffList getDiffs() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks) + final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior); isCurrentFileDeleted = true; - Util.collectBlocksAndClear(this, collectedBlocks); + Util.collectBlocksAndClear(this, collectedBlocks, removedINodes); return Quota.Counts.newInstance(); } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); - return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks); + return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, + removedINodes); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index 5532321875..106cc10056 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodesInPath; @@ -162,7 +163,8 @@ public String createSnapshot(final String path, String snapshotName * @throws IOException */ public void deleteSnapshot(final String path, final String snapshotName, - BlocksMapUpdateInfo collectedBlocks) throws IOException { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + throws IOException { // parse the path, and check if the path is a snapshot path INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString()); // transfer the inode for path to an INodeDirectorySnapshottable. @@ -171,7 +173,7 @@ public void deleteSnapshot(final String path, final String snapshotName, INodeDirectorySnapshottable dir = INodeDirectorySnapshottable.valueOf( inodesInPath.getLastINode(), path.toString()); - dir.removeSnapshot(snapshotName, collectedBlocks); + dir.removeSnapshot(snapshotName, collectedBlocks, removedINodes); numSnapshots.getAndDecrement(); }