diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 23b0381b3d..b2d52977b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1056,6 +1056,9 @@ Release 2.1.0-beta - UNRELEASED HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or snapshots. (jing9) + HDFS-4877. Snapshot: fix the scenario where a directory is renamed under + its prior descendant. (jing9) + Release 2.0.5-alpha - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 573a9e8718..7911fe4acd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -678,7 +678,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), - newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID); + newSrcCounts.get(Quota.DISKSPACE), false); } return true; @@ -944,8 +944,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ArrayList(); filesDeleted = removedDst.cleanSubtree(null, - dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes).get( - Quota.NAMESPACE); + dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) + .get(Quota.NAMESPACE); getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes); } @@ -963,7 +963,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), - newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID); + newSrcCounts.get(Quota.DISKSPACE), false); } return filesDeleted >= 0; @@ -1408,9 +1408,9 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, true); parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); removed = counts.get(Quota.NAMESPACE); } if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -1797,7 +1797,8 @@ private static void unprotectedUpdateCount(INodesInPath inodesInPath, final INode[] inodes = inodesInPath.getINodes(); for(int i=0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota - INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; + INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i] + .asDirectory(); node.addSpaceConsumed2Cache(nsDelta, dsDelta); } } @@ -2033,7 +2034,8 @@ private static void verifyQuota(INode[] inodes, int pos, long nsDelta, } if (inodes[i].isQuotaSet()) { // a directory with quota try { - ((INodeDirectoryWithQuota)inodes[i]).verifyQuota(nsDelta, dsDelta); + ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota( + nsDelta, dsDelta); } catch (QuotaExceededException e) { e.setPathName(getFullPathName(inodes, i)); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 2cc2a9931e..6c6531c3f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -342,12 +342,13 @@ public INodeSymlink asSymlink() { * deletion/update will be added to the given map. * @param removedINodes * INodes collected from the descents for further cleaning up of - * inodeMap + * inodeMap * @return quota usage delta when deleting a snapshot */ public abstract Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, BlocksMapUpdateInfo collectedBlocks, - List removedINodes) throws QuotaExceededException; + List removedINodes, boolean countDiffChange) + throws QuotaExceededException; /** * Destroy self and clear everything! If the INode is a file, this method @@ -388,17 +389,10 @@ public final ContentSummary computeContentSummary() { * Check and add namespace/diskspace consumed to itself and the ancestors. * @throws QuotaExceededException if quote is violated. */ - public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify, - int snapshotId) throws QuotaExceededException { + public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) + throws QuotaExceededException { if (parent != null) { - parent.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - } - } - - public void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - if (parent != null) { - parent.addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId); + parent.addSpaceConsumed(nsDelta, dsDelta, verify); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 78aa46b30f..7e72fa3228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -499,7 +500,8 @@ public void clear() { /** Call cleanSubtree(..) recursively down the subtree. */ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final List removedINodes, final Map excludedNodes, + final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); // in case of deletion snapshot, since this call happens after we modify // the diff list, the snapshot to be deleted has been combined or renamed @@ -508,9 +510,14 @@ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, // INodeDirectoryWithSnapshot#cleanSubtree) Snapshot s = snapshot != null && prior != null ? prior : snapshot; for (INode child : getChildrenList(s)) { - Quota.Counts childCounts = child.cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); - counts.add(childCounts); + if (snapshot != null && excludedNodes != null + && excludedNodes.containsKey(child)) { + continue; + } else { + Quota.Counts childCounts = child.cleanSubtree(snapshot, prior, + collectedBlocks, removedINodes, countDiffChange); + counts.add(childCounts); + } } return counts; } @@ -527,8 +534,9 @@ public void destroyAndCollectBlocks(final BlocksMapUpdateInfo collectedBlocks, @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { if (prior == null && snapshot == null) { // destroy the whole subtree and collect blocks that should be deleted Quota.Counts counts = Quota.Counts.newInstance(); @@ -538,7 +546,7 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // process recursively down the subtree Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, null, countDiffChange); if (isQuotaSet()) { ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache( -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java index b623fe5c06..f2b607ccaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -132,7 +132,7 @@ long numItemsInTree() { @Override public final void addSpaceConsumed(final long nsDelta, final long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { + boolean verify) throws QuotaExceededException { if (isQuotaSet()) { // The following steps are important: // check quotas in this inode and all ancestors before changing counts @@ -143,11 +143,11 @@ public final void addSpaceConsumed(final long nsDelta, final long dsDelta, verifyQuota(nsDelta, dsDelta); } // (2) verify quota and then add count in ancestors - super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); + super.addSpaceConsumed(nsDelta, dsDelta, verify); // (3) add count in this inode addSpaceConsumed2Cache(nsDelta, dsDelta); } else { - super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); + super.addSpaceConsumed(nsDelta, dsDelta, verify); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 37bc16c84d..57f7fefecd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -271,7 +271,8 @@ public void setBlocks(BlockInfo[] blocks) { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); if (snapshot == null && prior == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index a5c4d6f51b..ca9e10170f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -113,8 +113,8 @@ public Content.Counts computeContentSummary(Content.Counts counts) { @Override public Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, List removedINodes, + boolean countDiffChange) throws QuotaExceededException { return null; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 3e9c71e297..22d0a487e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -254,10 +254,10 @@ final INode recordModification(Snapshot latest, final INodeMap inodeMap) @Override // used by WithCount public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, final List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes, + final boolean countDiffChange) throws QuotaExceededException { return referred.cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } @Override // used by WithCount @@ -396,29 +396,6 @@ WithName getPriorWithName(WithName post) { return withNameList.get(-i - 2); } } - - @Override - public final void addSpaceConsumed(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - INodeReference parentRef = getParentReference(); - if (parentRef != null) { - parentRef.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - } - addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId); - } - - @Override - public final void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - if (snapshotId != Snapshot.INVALID_ID) { - for (INodeReference.WithName withName : withNameList) { - if (withName.getLastSnapshotId() >= snapshotId) { - withName.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - break; - } - } - } - } } /** A reference with a fixed name. */ @@ -476,15 +453,20 @@ public final Quota.Counts computeQuotaUsage(Quota.Counts counts, Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); - // we cannot use cache for the referred node since its cached quota may - // have already been updated by changes in the current tree - return referred.computeQuotaUsage(counts, false, this.lastSnapshotId); + // We will continue the quota usage computation using the same snapshot id + // as time line (if the given snapshot id is valid). Also, we cannot use + // cache for the referred node since its cached quota may have already + // been updated by changes in the current tree. + int id = lastSnapshotId > Snapshot.INVALID_ID ? + lastSnapshotId : this.lastSnapshotId; + return referred.computeQuotaUsage(counts, false, id); } @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { // since WithName node resides in deleted list acting as a snapshot copy, // the parameter snapshot must be non-null Preconditions.checkArgument(snapshot != null); @@ -500,11 +482,19 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, } Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, false); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); + } + + if (snapshot.getId() < lastSnapshotId) { + // for a WithName node, when we compute its quota usage, we only count + // in all the nodes existing at the time of the corresponding rename op. + // Thus if we are deleting a snapshot before/at the snapshot associated + // with lastSnapshotId, we do not need to update the quota upwards. + counts = Quota.Counts.newInstance(); } return counts; } @@ -529,16 +519,17 @@ public void destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks, // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar // 3. create snapshot s2 on /test - // 4. delete snapshot s2 + // 4. rename foo2 again + // 5. delete snapshot s2 return; } try { Quota.Counts counts = referred.cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, false); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); } } catch (QuotaExceededException e) { LOG.error("should not exceed quota while snapshot deletion", e); @@ -588,8 +579,8 @@ public DstReference(INodeDirectory parent, WithCount referred, @Override public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, List removedINodes, + final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null && prior == null) { Quota.Counts counts = Quota.Counts.newInstance(); this.computeQuotaUsage(counts, true); @@ -609,7 +600,7 @@ public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, return Quota.Counts.newInstance(); } return getReferredINode().cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, countDiffChange); } } @@ -648,8 +639,11 @@ public void destroyAndCollectBlocks( sfile.deleteCurrentFile(); if (snapshot != null) { try { + // when calling cleanSubtree of the referred node, since we + // compute quota usage updates before calling this destroy + // function, we use true for countDiffChange referred.cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes); + removedINodes, true); } catch (QuotaExceededException e) { LOG.error("should not exceed quota while snapshot deletion", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index 653ec8d3d9..ba41a2ecdb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -76,7 +76,8 @@ public byte[] getSymlink() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) { if (snapshot == null && prior == null) { destroyAndCollectBlocks(collectedBlocks, removedINodes); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index 88879cedc6..23ca4ea9e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -68,7 +68,8 @@ public void clear() { */ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, Snapshot prior, final N currentINode, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, boolean countDiffChange) throws QuotaExceededException { int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId()); @@ -80,14 +81,14 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, diffs.get(snapshotIndex).setSnapshot(prior); } else { removed = diffs.remove(0); - counts.add(Quota.NAMESPACE, 1); - // We add 1 to the namespace quota usage since we delete a diff. - // The quota change will be propagated to - // 1) ancestors in the current tree, and - // 2) src tree of any renamed ancestor. - // Because for 2) we do not calculate the number of diff for quota - // usage, we need to compensate this diff change for 2) - currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId()); + if (countDiffChange) { + counts.add(Quota.NAMESPACE, 1); + } else { + // the currentINode must be a descendant of a WithName node, which set + // countDiffChange to false. In that case we should count in the diff + // change when updating the quota usage in the current tree + currentINode.addSpaceConsumed(-1, 0, false); + } counts.add(removed.destroyDiffAndCollectBlocks(currentINode, collectedBlocks, removedINodes)); } @@ -98,8 +99,11 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, } else { // combine the to-be-removed diff with its previous diff removed = diffs.remove(snapshotIndex); - counts.add(Quota.NAMESPACE, 1); - currentINode.addSpaceConsumedToRenameSrc(1, 0, false, snapshot.getId()); + if (countDiffChange) { + counts.add(Quota.NAMESPACE, 1); + } else { + currentINode.addSpaceConsumed(-1, 0, false); + } if (previous.snapshotINode == null) { previous.snapshotINode = removed.snapshotINode; } else if (removed.snapshotINode != null) { @@ -117,7 +121,7 @@ public final Quota.Counts deleteSnapshotDiff(final Snapshot snapshot, /** Add an {@link AbstractINodeDiff} for the given snapshot. */ final D addDiff(Snapshot latest, N currentINode) throws QuotaExceededException { - currentINode.addSpaceConsumed(1, 0, true, Snapshot.INVALID_ID); + currentINode.addSpaceConsumed(1, 0, true); return addLast(createDiff(latest, currentINode)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java index 86bcf90047..f5845645d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java @@ -319,21 +319,23 @@ Snapshot removeSnapshot(String snapshotName, + " from path " + this.getFullPathName() + ": the snapshot does not exist."); } else { - final Snapshot snapshot = snapshotsByNames.remove(i); + final Snapshot snapshot = snapshotsByNames.get(i); Snapshot prior = Snapshot.findLatestSnapshot(this, snapshot); try { Quota.Counts counts = cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes); + removedINodes, true); INodeDirectory parent = getParent(); if (parent != null) { // there will not be any WithName node corresponding to the deleted // snapshot, thus only update the quota usage in the current tree parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); } } catch(QuotaExceededException e) { LOG.error("BUG: removeSnapshot increases namespace usage.", e); } + // remove from snapshotsByNames after successfully cleaning the subtree + snapshotsByNames.remove(i); return snapshot; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index a400dc388e..994f9bd481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -23,8 +23,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Deque; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; @@ -658,7 +660,7 @@ public void undoRename4DstParent(final INode deletedChild, if (added && !removeDeletedChild) { final Quota.Counts counts = deletedChild.computeQuotaUsage(); addSpaceConsumed(counts.get(Quota.NAMESPACE), - counts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID); + counts.get(Quota.DISKSPACE), false); } } @@ -692,9 +694,12 @@ public void getSnapshotDirectory(List snapshotDir) { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); + Map priorCreated = null; + Map priorDeleted = null; if (snapshot == null) { // delete the current directory recordModification(prior, null); // delete everything in created list @@ -706,8 +711,28 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // update prior prior = getDiffs().updatePrior(snapshot, prior); + // if there is a snapshot diff associated with prior, we need to record + // its original created and deleted list before deleting post + if (prior != null) { + DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); + if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + List cList = priorDiff.diff.getList(ListType.CREATED); + List dList = priorDiff.diff.getList(ListType.DELETED); + priorCreated = new HashMap(cList.size()); + for (INode cNode : cList) { + priorCreated.put(cNode, cNode); + } + priorDeleted = new HashMap(dList.size()); + for (INode dNode : dList) { + priorDeleted.put(dNode, dNode); + } + } + } + counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this, - collectedBlocks, removedINodes)); + collectedBlocks, removedINodes, countDiffChange)); + + // check priorDiff again since it may be created during the diff deletion if (prior != null) { DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { @@ -716,11 +741,17 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, // use null as prior in the cleanSubtree call. Files/directories that // were created before "prior" will be covered by the later // cleanSubtreeRecursively call. - for (INode cNode : priorDiff.getChildrenDiff().getList( - ListType.CREATED)) { - counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, - removedINodes)); + if (priorCreated != null) { + // we only check the node originally in prior's created list + for (INode cNode : priorDiff.getChildrenDiff().getList( + ListType.CREATED)) { + if (priorCreated.containsKey(cNode)) { + counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, + removedINodes, countDiffChange)); + } + } } + // When a directory is moved from the deleted list of the posterior // diff to the deleted list of this diff, we need to destroy its // descendants that were 1) created after taking this diff and 2) @@ -728,16 +759,19 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, // For files moved from posterior's deleted list, we also need to // delete its snapshot copy associated with the posterior snapshot. + for (INode dNode : priorDiff.getChildrenDiff().getList( ListType.DELETED)) { - counts.add(cleanDeletedINode(dNode, snapshot, prior, - collectedBlocks, removedINodes)); + if (priorDeleted == null || !priorDeleted.containsKey(dNode)) { + counts.add(cleanDeletedINode(dNode, snapshot, prior, + collectedBlocks, removedINodes, countDiffChange)); + } } } } } counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks, - removedINodes)); + removedINodes, priorDeleted, countDiffChange)); if (isQuotaSet()) { this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE), @@ -755,9 +789,11 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ - private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post, - final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + private static Quota.Counts cleanDeletedINode(INode inode, + final Snapshot post, final Snapshot prior, + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); Deque queue = new ArrayDeque(); queue.addLast(inode); @@ -766,7 +802,8 @@ private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post, if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post.getId()) { - wn.cleanSubtree(post, prior, collectedBlocks, removedINodes); + wn.cleanSubtree(post, prior, collectedBlocks, removedINodes, + countDiffChange); } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir @@ -774,20 +811,28 @@ private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post, && topNode.asFile() instanceof FileWithSnapshot) { FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile(); counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, - topNode.asFile(), collectedBlocks, removedINodes)); + topNode.asFile(), collectedBlocks, removedINodes, countDiffChange)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); + ChildrenDiff priorChildrenDiff = null; if (dir instanceof INodeDirectoryWithSnapshot) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir; DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { - counts.add(priorDiff.diff.destroyCreatedList(sdir, + priorChildrenDiff = priorDiff.getChildrenDiff(); + counts.add(priorChildrenDiff.destroyCreatedList(sdir, collectedBlocks, removedINodes)); } } + for (INode child : dir.getChildrenList(prior)) { + if (priorChildrenDiff != null + && priorChildrenDiff.search(ListType.DELETED, + child.getLocalNameBytes()) != null) { + continue; + } queue.addLast(child); } } @@ -864,29 +909,39 @@ public static void destroyDstSubtree(INode inode, final Snapshot snapshot, if (inode instanceof INodeReference.WithName && snapshot != null) { // this inode has been renamed before the deletion of the DstReference // subtree - inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes); + inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, + true); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile() && snapshot != null) { - inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes); + inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true); } else if (inode.isDirectory()) { + Map excludedNodes = null; if (inode instanceof INodeDirectoryWithSnapshot) { INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode; DirectoryDiffList diffList = sdir.getDiffs(); if (snapshot != null) { diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks, - removedINodes); + removedINodes, true); } DirectoryDiff priorDiff = diffList.getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { priorDiff.diff.destroyCreatedList(sdir, collectedBlocks, removedINodes); + List dList = priorDiff.diff.getList(ListType.DELETED); + excludedNodes = new HashMap(dList.size()); + for (INode dNode : dList) { + excludedNodes.put(dNode, dNode); + } } } for (INode child : inode.asDirectory().getChildrenList(prior)) { + if (excludedNodes != null && excludedNodes.containsKey(child)) { + continue; + } destroyDstSubtree(child, snapshot, prior, collectedBlocks, removedINodes); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index 80e3ca8d0d..28f33123b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -104,7 +104,8 @@ public FileDiffList getDiffs() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior, null); @@ -114,7 +115,7 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index 06e833b096..7f37666f88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -91,7 +91,8 @@ public FileDiffList getDiffs() { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior, null); @@ -101,7 +102,7 @@ public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 9c2279fdf1..90bfa26ed7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -454,7 +454,7 @@ public void testRenameDirAndDeleteSnapshot_1() throws Exception { // delete snapshot s5. The diff of s5 should be combined to s4 hdfs.deleteSnapshot(sdir1, "s5"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertFalse(hdfs.exists(bar2_s5)); final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar2"); @@ -492,7 +492,7 @@ public void testRenameDirAndDeleteSnapshot_1() throws Exception { assertFalse(hdfs.exists(bar3_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); @@ -500,14 +500,15 @@ public void testRenameDirAndDeleteSnapshot_1() throws Exception { assertFalse(hdfs.exists(bar2_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s3"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } - private void restartClusterAndCheckImage() throws IOException { + private void restartClusterAndCheckImage(boolean compareQuota) + throws IOException { File fsnBefore = new File(testDir, "dumptree_before"); File fsnMiddle = new File(testDir, "dumptree_middle"); File fsnAfter = new File(testDir, "dumptree_after"); @@ -538,8 +539,10 @@ private void restartClusterAndCheckImage() throws IOException { // dump the namespace loaded from fsimage SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter); - SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, true); - SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true); + SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, + compareQuota); + SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, + compareQuota); } /** @@ -579,7 +582,7 @@ public void testRenameFileAndDeleteSnapshot() throws Exception { // delete snapshot s5. hdfs.deleteSnapshot(sdir1, "s5"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertFalse(hdfs.exists(foo_s5)); status = hdfs.getFileStatus(foo_s4); assertEquals(REPL_1, status.getReplication()); @@ -604,18 +607,18 @@ public void testRenameFileAndDeleteSnapshot() throws Exception { .getLocalName()); // restart cluster - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); assertFalse(hdfs.exists(foo_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s3"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -650,7 +653,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { hdfs.rename(bar2_dir1, bar2_dir2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir2/foo and /dir2/bar final Path bar1_dir2 = new Path(foo_dir2, "bar1"); @@ -686,7 +689,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { hdfs.rename(bar2_dir2, bar2_dir3); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir3/foo and /dir3/bar final Path bar1_dir3 = new Path(foo_dir3, "bar1"); @@ -718,7 +721,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { hdfs.rename(bar2_dir3, bar2_dir2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir2/foo hdfs.setReplication(bar1_dir2, REPL); @@ -773,14 +776,14 @@ public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { .getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete foo hdfs.delete(foo_dir1, true); hdfs.delete(bar2_dir1, true); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // check assertTrue(hdfs.exists(bar1_s1)); @@ -844,7 +847,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { hdfs.setReplication(bar_dir2, REPL_1); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11"); @@ -863,7 +866,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { hdfs.setReplication(bar_dir3, REPL_2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111"); @@ -917,7 +920,7 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { hdfs.setReplication(bar_dir2, REPL); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111"); @@ -999,14 +1002,14 @@ public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete foo hdfs.delete(foo_dir1, true); hdfs.delete(bar_dir1, true); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // check final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", @@ -1127,7 +1130,7 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception { hdfs.rename(foo, newfoo); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); final Path bar2 = new Path(newfoo, "bar2"); DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED); @@ -1145,7 +1148,7 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception { // delete snapshot s4. The diff of s4 should be combined to s3 hdfs.deleteSnapshot(sdir1, "s4"); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar"); assertFalse(hdfs.exists(bar_s3)); @@ -1175,18 +1178,18 @@ public void testRenameDirAndDeleteSnapshot_2() throws Exception { assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); assertFalse(hdfs.exists(bar_s2)); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // make sure the whole referred subtree has been destroyed assertEquals(4, fsdir.getRoot().getNamespace()); assertEquals(0, fsdir.getRoot().getDiskspace()); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertEquals(3, fsdir.getRoot().getNamespace()); assertEquals(0, fsdir.getRoot().getDiskspace()); } @@ -1232,7 +1235,7 @@ public void testRenameAndAppend() throws Exception { INode fooNode = fooRef.asFile(); assertTrue(fooNode instanceof INodeFileWithSnapshot); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1724,7 +1727,7 @@ public void testRenameUndo_7() throws Exception { cluster = new MiniDFSCluster.Builder(conf).format(false) .numDataNodes(REPL).build(); cluster.waitActive(); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1784,7 +1787,108 @@ public void testRename2PreDescendant() throws Exception { final Path foo2 = new Path(bar2, "foo"); hdfs.rename(foo, foo2); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); + + // delete snap1 + hdfs.deleteSnapshot(sdir1, snap1); + + restartClusterAndCheckImage(true); + } + + /** + * move a directory to its prior descedant + */ + @Test + public void testRename2PreDescendant_2() throws Exception { + final Path root = new Path("/"); + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + final Path file1InBar = new Path(bar, "file1"); + final Path file2InBar = new Path(bar, "file2"); + hdfs.mkdirs(bar); + hdfs.mkdirs(sdir2); + DFSTestUtil.createFile(hdfs, file1InBar, BLOCKSIZE, REPL, SEED); + DFSTestUtil.createFile(hdfs, file2InBar, BLOCKSIZE, REPL, SEED); + + hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + + // create snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap1); + // delete file1InBar + hdfs.delete(file1InBar, true); + + // create another snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap2); + // delete file2InBar + hdfs.delete(file2InBar, true); + + // /dir1/foo/bar -> /dir2/bar + final Path bar2 = new Path(sdir2, "bar2"); + hdfs.rename(bar, bar2); + + // /dir1/foo -> /dir2/bar/foo + final Path foo2 = new Path(bar2, "foo2"); + hdfs.rename(foo, foo2); + + restartClusterAndCheckImage(true); + + // delete snapshot snap2 + hdfs.deleteSnapshot(root, snap2); + + // after deleteing snap2, the WithName node "bar", which originally was + // stored in the deleted list of "foo" for snap2, is moved to its deleted + // list for snap1. In that case, it will not be counted when calculating + // quota for "foo". However, we do not update this quota usage change while + // deleting snap2. + restartClusterAndCheckImage(false); + } + + /** + * move a directory to its prior descedant + */ + @Test + public void testRename2PreDescendant_3() throws Exception { + final Path root = new Path("/"); + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + final Path fileInBar = new Path(bar, "file"); + hdfs.mkdirs(bar); + hdfs.mkdirs(sdir2); + DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED); + + hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + + // create snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap1); + // delete fileInBar + hdfs.delete(fileInBar, true); + // create another snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap2); + + // /dir1/foo/bar -> /dir2/bar + final Path bar2 = new Path(sdir2, "bar2"); + hdfs.rename(bar, bar2); + + // /dir1/foo -> /dir2/bar/foo + final Path foo2 = new Path(bar2, "foo2"); + hdfs.rename(foo, foo2); + + restartClusterAndCheckImage(true); + + // delete snapshot snap1 + hdfs.deleteSnapshot(root, snap1); + + restartClusterAndCheckImage(true); } /** @@ -1851,7 +1955,7 @@ public void testRenameDirAndDeleteSnapshot_3() throws Exception { assertEquals(0, diff.getList(ListType.CREATED).size()); assertEquals(0, diff.getList(ListType.DELETED).size()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1928,7 +2032,7 @@ public void testRenameDirAndDeleteSnapshot_4() throws Exception { assertSame(wc, wc2); assertSame(fooRef2, wc.getParentReference()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -2041,7 +2145,7 @@ public void testRenameDirAndDeleteSnapshot_6() throws Exception { .isEmpty()); assertTrue(barNode.getChildrenList(null).isEmpty()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -2137,6 +2241,6 @@ public void testRenameDirAndDeleteSnapshot_7() throws Exception { assertSame(fooNode.asReference().getReferredINode(), fooNode_s2.getReferredINode()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } }