From c0a8957c2bc505789df6be70be8665cdd4e4f649 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 3 Dec 2012 22:38:33 +0000 Subject: [PATCH] HDFS-4243. When replacing an INodeDirectory, the parent pointers of the children of the child have to be updated to the new child. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1416709 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../hdfs/server/namenode/FSDirectory.java | 50 +++++++++++-------- .../hdfs/server/namenode/FSEditLogLoader.java | 2 +- .../hdfs/server/namenode/INodeDirectory.java | 6 +++ .../hdfs/server/namenode/TestINodeFile.java | 47 +++++++++++++++++ 5 files changed, 87 insertions(+), 22 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9c39f9122a..61ae9947a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -655,6 +655,10 @@ Release 2.0.3-alpha - Unreleased HDFS-4231. BackupNode: Introduce BackupState. (shv) + HDFS-4243. When replacing an INodeDirectory, the parent pointers of the + children of the child have to be updated to the new child. (Jing Zhao + via szetszwo) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 737e5d310d..05d3b22791 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1075,31 +1075,39 @@ public void replaceNode(String path, INodeFile oldnode, INodeFile newnode) throws IOException, UnresolvedLinkException { writeLock(); try { - // - // Remove the node from the namespace - // - if (!oldnode.removeNode()) { - NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + - "failed to remove " + path); - throw new IOException("FSDirectory.replaceNode: " + - "failed to remove " + path); - } - - /* Currently oldnode and newnode are assumed to contain the same - * blocks. Otherwise, blocks need to be removed from the blocksMap. - */ - rootDir.addINode(path, newnode); - - int index = 0; - for (BlockInfo b : newnode.getBlocks()) { - BlockInfo info = getBlockManager().addBlockCollection(b, newnode); - newnode.setBlock(index, info); // inode refers to the block in BlocksMap - index++; - } + unprotectedReplaceNode(path, oldnode, newnode); } finally { writeUnlock(); } } + + void unprotectedReplaceNode(String path, INodeFile oldnode, INodeFile newnode) + throws IOException, UnresolvedLinkException { + assert hasWriteLock(); + INodeDirectory parent = oldnode.parent; + // Remove the node from the namespace + if (!oldnode.removeNode()) { + NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " + + "failed to remove " + path); + throw new IOException("FSDirectory.replaceNode: " + + "failed to remove " + path); + } + + // Parent should be non-null, otherwise oldnode.removeNode() will return + // false + newnode.setLocalName(oldnode.getLocalNameBytes()); + parent.addChild(newnode, true); + + /* Currently oldnode and newnode are assumed to contain the same + * blocks. Otherwise, blocks need to be removed from the blocksMap. + */ + int index = 0; + for (BlockInfo b : newnode.getBlocks()) { + BlockInfo info = getBlockManager().addBlockCollection(b, newnode); + newnode.setBlock(index, info); // inode refers to the block in BlocksMap + index++; + } + } /** * Get a partial listing of the indicated directory diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index e9ddeb6685..5b5d761a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -321,7 +321,7 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile; fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path); INodeFile newFile = ucFile.convertToInodeFile(); - fsDir.replaceNode(addCloseOp.path, ucFile, newFile); + fsDir.unprotectedReplaceNode(addCloseOp.path, ucFile, newFile); } break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 1b193edf42..832ca1af39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -73,6 +73,11 @@ public INodeDirectory(PermissionStatus permissions, long mTime) { INodeDirectory(INodeDirectory other) { super(other); this.children = other.children; + if (this.children != null) { + for (INode child : children) { + child.parent = this; + } + } } /** @return true unconditionally. */ @@ -106,6 +111,7 @@ void replaceChild(INode newChild) { final int low = searchChildren(newChild); if (low>=0) { // an old child exists so replace by the newChild + children.get(low).parent = null; children.set(low, newChild); } else { throw new IllegalArgumentException("No child exists to be replaced"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 695490e339..2174f94121 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -25,10 +25,15 @@ import java.io.FileNotFoundException; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.junit.Test; @@ -157,6 +162,48 @@ public void testGetFullPathName() { } + /** + * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to + * replace the original INodeDirectory. Before HDFS-4243, the parent field of + * all the children INodes of the target INodeDirectory is not changed to + * point to the new INodeDirectoryWithQuota. This testcase tests this + * scenario. + */ + @Test + public void testGetFullPathNameAfterSetQuota() throws Exception { + long fileLen = 1024; + replication = 3; + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes( + replication).build(); + cluster.waitActive(); + FSNamesystem fsn = cluster.getNamesystem(); + FSDirectory fsdir = fsn.getFSDirectory(); + DistributedFileSystem dfs = cluster.getFileSystem(); + + // Create a file for test + final Path dir = new Path("/dir"); + final Path file = new Path(dir, "file"); + DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L); + + // Check the full path name of the INode associating with the file + INode fnode = fsdir.getINode(file.toString()); + assertEquals(file.toString(), fnode.getFullPathName()); + + // Call FSDirectory#unprotectedSetQuota which calls + // INodeDirectory#replaceChild + dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10); + final Path newDir = new Path("/newdir"); + final Path newFile = new Path(newDir, "file"); + // Also rename dir + dfs.rename(dir, newDir, Options.Rename.OVERWRITE); + // /dir/file now should be renamed to /newdir/file + fnode = fsdir.getINode(newFile.toString()); + // getFullPathName can return correct result only if the parent field of + // child node is set correctly + assertEquals(newFile.toString(), fnode.getFullPathName()); + } + @Test public void testAppendBlocks() { INodeFile origFile = createINodeFiles(1, "origfile")[0];