From 8d95784bf1a7fcf473fd5a9fed0140521a92a968 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 7 Mar 2013 22:34:51 +0000 Subject: [PATCH] HDFS-4557. Fix FSDirectory#delete when INode#cleanSubtree returns 0. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1454138 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-2802.txt | 3 +++ .../hdfs/server/namenode/FSDirectory.java | 16 +++++++------- .../hdfs/server/namenode/FSNamesystem.java | 22 +++++++------------ .../hdfs/server/namenode/INodeFile.java | 7 +++++- .../snapshot/AbstractINodeDiffList.java | 2 +- ...NodeFileUnderConstructionWithSnapshot.java | 2 +- .../snapshot/INodeFileWithSnapshot.java | 12 +--------- 7 files changed, 28 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 4ab43108fe..7988633a33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -185,3 +185,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4545. With snapshots, FSDirectory.unprotectedSetReplication(..) always changes file replication but it may or may not changes block replication. (szetszwo) + + HDFS-4557. Fix FSDirectory#delete when INode#cleanSubtree returns 0. + (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 71e49dce4e..7c21117af7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -757,7 +757,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, getFSNamesystem().unprotectedChangeLease(src, dst); // Collect the blocks and remove the lease for previous dst - int filesDeleted = 0; + int filesDeleted = -1; if (removedDst != null) { INode rmdst = removedDst; removedDst = null; @@ -772,7 +772,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, // deleted. Need to update the SnapshotManager. namesystem.removeSnapshottableDirs(snapshottableDirs); } - return filesDeleted >0; + return filesDeleted >= 0; } } finally { if (removedSrc != null) { @@ -1017,7 +1017,7 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); if (!deleteAllowed(inodesInPath, src) ) { - filesRemoved = 0; + filesRemoved = -1; } else { // Before removing the node, first check if the targetNode is for a // snapshottable dir with snapshots, or its descendants have @@ -1041,10 +1041,10 @@ boolean delete(String src, BlocksMapUpdateInfo collectedBlocks) } finally { writeUnlock(); } - fsImage.getEditLog().logDelete(src, now); - if (filesRemoved <= 0) { + if (filesRemoved < 0) { return false; } + fsImage.getEditLog().logDelete(src, now); incrDeletedFileCount(filesRemoved); // Blocks will be deleted later by the caller of this method getFSNamesystem().removePathAndBlocks(src, null); @@ -1107,8 +1107,8 @@ void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); final int filesRemoved = deleteAllowed(inodesInPath, src)? - unprotectedDelete(inodesInPath, collectedBlocks, mtime): 0; - if (filesRemoved > 0) { + unprotectedDelete(inodesInPath, collectedBlocks, mtime): -1; + if (filesRemoved >= 0) { getFSNamesystem().removePathAndBlocks(src, collectedBlocks); } } @@ -1128,7 +1128,7 @@ int unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, // check if target node exists INode targetNode = iip.getLastINode(); if (targetNode == null) { - return 0; + return -1; } // record modification diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 39b213363f..0d0af0523d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5664,8 +5664,7 @@ public void allowSnapshot(String path) throws SafeModeException, IOException { //TODO: need to update metrics in corresponding SnapshotManager method if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "allowSnapshot", path, null, null); + logAuditEvent(true, "allowSnapshot", path, null, null); } } @@ -5692,8 +5691,7 @@ public void disallowSnapshot(String path) //TODO: need to update metrics in corresponding SnapshotManager method if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "disallowSnapshot", path, null, null); + logAuditEvent(true, "disallowSnapshot", path, null, null); } } @@ -5729,8 +5727,8 @@ public void createSnapshot(String snapshotRoot, String snapshotName) if (auditLog.isInfoEnabled() && isExternalInvocation()) { Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName); - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "createSnapshot", snapshotRoot, rootPath.toString(), null); + logAuditEvent(true, "createSnapshot", snapshotRoot, rootPath.toString(), + null); } } @@ -5768,8 +5766,7 @@ public void renameSnapshot(String path, String snapshotOldName, + "/" + snapshotOldName); Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/" + snapshotNewName); - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "renameSnapshot", oldSnapshotRoot.toString(), + logAuditEvent(true, "renameSnapshot", oldSnapshotRoot.toString(), newSnapshotRoot.toString(), null); } } @@ -5795,8 +5792,7 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() readUnlock(); } if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "listSnapshottableDirectory", null, null, null); + logAuditEvent(true, "listSnapshottableDirectory", null, null, null); } return status; } @@ -5828,8 +5824,7 @@ public SnapshotDiffReport getSnapshotDiffReport(String path, } if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "computeSnapshotDiff", null, null, null); + logAuditEvent(true, "computeSnapshotDiff", null, null, null); } return diffs != null ? diffs.generateReport() : new SnapshotDiffReport( path, fromSnapshot, toSnapshot, @@ -5874,8 +5869,7 @@ public void deleteSnapshot(String snapshotRoot, String snapshotName) if (auditLog.isInfoEnabled() && isExternalInvocation()) { Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName); - logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(), - "deleteSnapshot", rootPath.toString(), null, null); + logAuditEvent(true, "deleteSnapshot", rootPath.toString(), null, null); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index dbe6207683..d4b639d2c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -303,6 +303,7 @@ public int cleanSubtree(final Snapshot snapshot, Snapshot prior, @Override public int destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { + int total = 1; if (blocks != null && collectedBlocks != null) { for (BlockInfo blk : blocks) { collectedBlocks.addDeleteBlock(blk); @@ -311,7 +312,11 @@ public int destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) { } setBlocks(null); clearReferences(); - return 1; + + if (this instanceof FileWithSnapshot) { + total += ((FileWithSnapshot) this).getDiffs().clear(); + } + return total; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index 9f9d4bfe7d..1524f8a932 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -50,7 +50,7 @@ public final List asList() { } /** Get the size of the list and then clear it. */ - int clear() { + public int clear() { final int n = diffs.size(); diffs.clear(); return n; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index a69726883e..5a9b959928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -121,7 +121,7 @@ public int cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // delete a snapshot return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks); } - return 1; + return prior == null ? 1 : 0; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index f2bfb6491e..9112094da8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.namenode.INodeFile; -import com.google.common.base.Preconditions; - /** * Represent an {@link INodeFile} that is snapshotted. * Note that snapshot files are represented by {@link INodeFileSnapshot}. @@ -94,15 +92,7 @@ public int cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // delete a snapshot return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks); } - return 1; - } - - @Override - public int destroyAndCollectBlocks( - final BlocksMapUpdateInfo collectedBlocks) { - Preconditions.checkState(this.isCurrentFileDeleted); - final int n = diffs.clear(); - return n + super.destroyAndCollectBlocks(collectedBlocks); + return prior == null ? 1 : 0; } @Override