From e097f8404b3ffbad5322e0f8381a0b9958c5b589 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 29 Apr 2013 22:03:23 +0000 Subject: [PATCH] HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1477367 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-2802.txt | 3 ++ .../hdfs/server/namenode/INodeFile.java | 8 ++--- .../hdfs/server/namenode/INodeReference.java | 7 ++-- .../snapshot/AbstractINodeDiffList.java | 35 +++++++++---------- .../namenode/snapshot/FileWithSnapshot.java | 5 ++- .../snapshot/INodeDirectoryWithSnapshot.java | 15 ++------ .../ImageLoaderCurrent.java | 25 +++++++++---- 7 files changed, 53 insertions(+), 45 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index bbfee35ab0..655363ca76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -321,3 +321,6 @@ Branch-2802 Snapshot (Unreleased) HDFS-4578. Restrict snapshot IDs to 24-bit wide. (Arpit Agarwal via szetszwo) + + HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer. + (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 54aa9e0c23..e3e1a000de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -342,9 +342,9 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect dsDelta = diskspaceConsumed(); } else if (last.getId() < lastSnapshotId) { dsDelta = computeFileSize(true, false) * getFileReplication(); - } else { - Snapshot s = fileDiffList.searchSnapshotById(lastSnapshotId); - dsDelta = diskspaceConsumed(s); + } else { + Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId); + dsDelta = diskspaceConsumed(s); } } else { dsDelta = diskspaceConsumed(); @@ -441,7 +441,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect * if includesLastUcBlock == false. * @return file size */ - private final long computeFileSize(boolean includesLastUcBlock, + public final long computeFileSize(boolean includesLastUcBlock, boolean usePreferredBlockSize4LastUcBlock) { if (blocks == null || blocks.length == 0) { return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 428194e827..7735d8ac15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -414,8 +414,11 @@ public abstract class INodeReference extends INode { @Override public final Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache, int lastSnapshotId) { - Preconditions.checkState(lastSnapshotId == Snapshot.INVALID_ID - || this.lastSnapshotId <= lastSnapshotId); + // if this.lastSnapshotId < lastSnapshotId, the rename of the referred + // node happened before the rename of its ancestor. This should be + // impossible since for WithName node we only count its children at the + // time of the rename. + Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); // we cannot use cache for the referred node since its cached quota may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index b0cc05e639..433f728048 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -150,20 +150,6 @@ abstract class AbstractINodeDiffList= 0) { // exact match return diffs.get(i); } else { // Exact match not found means that there were no changes between // given snapshot and the next state so that the diff for the given - // snapshot was not recorded. Thus, return the next state. + // snapshot was not recorded. Thus, return the next state. final int j = -i - 1; return j < diffs.size()? diffs.get(j): null; } } + /** + * Search for the snapshot whose id is 1) no less than the given id, + * and 2) most close to the given id. + */ + public final Snapshot getSnapshotById(final int snapshotId) { + D diff = getDiffById(snapshotId); + return diff == null ? null : diff.getSnapshot(); + } + /** * Check if changes have happened between two snapshots. * @param earlier The snapshot taken earlier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java index 9cc281bbba..8f96f3917d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java @@ -68,7 +68,10 @@ public interface FileWithSnapshot { long oldDiskspace = currentINode.diskspaceConsumed(); if (removed.snapshotINode != null) { short replication = removed.snapshotINode.getFileReplication(); - if (replication > currentINode.getBlockReplication()) { + short currentRepl = currentINode.getBlockReplication(); + if (currentRepl == 0) { + oldDiskspace = currentINode.computeFileSize(true, true) * replication; + } else if (replication > currentRepl) { oldDiskspace = oldDiskspace / currentINode.getBlockReplication() * replication; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index a62a77c54e..139499a696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -914,25 +914,14 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { return super.computeQuotaUsage(counts, useCache, lastSnapshotId); } - final int diffNum = 0; - Snapshot lastSnapshot = null; - Snapshot lastInDiff = diffs.getLastSnapshot(); - // if lastSnapshotId > lastInDiff.getId(), the snapshot diff associated with - // lastSnapshotId must have been deleted. We should call - // getChildrenList(null) to get the children list for the continuous - // computation. In the meanwhile, there must be some snapshot diff whose - // snapshot id is no less than lastSnapshotId. Otherwise the WithName node - // itself should have been deleted. - if (lastInDiff != null && lastInDiff.getId() >= lastSnapshotId) { - lastSnapshot = diffs.searchSnapshotById(lastSnapshotId); - } + Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId); ReadOnlyList childrenList = getChildrenList(lastSnapshot); for (INode child : childrenList) { child.computeQuotaUsage(counts, useCache, lastSnapshotId); } - counts.add(Quota.NAMESPACE, diffNum + 1); + counts.add(Quota.NAMESPACE, 1); return counts; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index b79870c3c1..201d1f0bc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; +import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; import org.apache.hadoop.io.Text; @@ -129,7 +130,8 @@ class ImageLoaderCurrent implements ImageLoader { -40, -41, -42, -43}; private int imageVersion = 0; - private final Map nodeMap = new HashMap(); + private final Map subtreeMap = new HashMap(); + private final Map dirNodeMap = new HashMap(); /* (non-Javadoc) * @see ImageLoader#canProcessVersion(int) @@ -196,7 +198,8 @@ class ImageLoaderCurrent implements ImageLoader { } } processINodes(in, v, numInodes, skipBlocks, supportSnapshot); - nodeMap.clear(); + subtreeMap.clear(); + dirNodeMap.clear(); processINodesUC(in, v, skipBlocks); @@ -441,10 +444,11 @@ class ImageLoaderCurrent implements ImageLoader { */ private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v, boolean skipBlocks) throws IOException { - // 1. load dir name - String dirName = FSImageSerialization.readString(in); + // 1. load dir node id + long inodeId = in.readLong(); - String oldValue = nodeMap.put(dirName, dirName); + String dirName = dirNodeMap.get(inodeId); + String oldValue = subtreeMap.put(inodeId, dirName); if (oldValue != null) { // the subtree has been visited return; } @@ -581,6 +585,8 @@ class ImageLoaderCurrent implements ImageLoader { throws IOException { boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT, imageVersion); + boolean supportInodeId = + LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion); v.visitEnclosingElement(ImageElement.INODE); String pathName = FSImageSerialization.readString(in); @@ -591,9 +597,11 @@ class ImageLoaderCurrent implements ImageLoader { } } + long inodeId = INodeId.GRANDFATHER_INODE_ID; v.visit(ImageElement.INODE_PATH, pathName); - if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) { - v.visit(ImageElement.INODE_ID, in.readLong()); + if (supportInodeId) { + inodeId = in.readLong(); + v.visit(ImageElement.INODE_ID, inodeId); } v.visit(ImageElement.REPLICATION, in.readShort()); v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong())); @@ -619,6 +627,9 @@ class ImageLoaderCurrent implements ImageLoader { } } } else if (numBlocks == -1) { // Directory + if (supportSnapshot && supportInodeId) { + dirNodeMap.put(inodeId, pathName); + } v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1); if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion)) v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);