HDFS-4243. When replacing an INodeDirectory, the parent pointers of the children of the child have to be updated to the new child. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1416709 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-12-03 22:38:33 +00:00
parent 8f7c92094d
commit c0a8957c2b
5 changed files with 87 additions and 22 deletions

View File

@ -655,6 +655,10 @@ Release 2.0.3-alpha - Unreleased
HDFS-4231. BackupNode: Introduce BackupState. (shv)
HDFS-4243. When replacing an INodeDirectory, the parent pointers of the
children of the child have to be updated to the new child. (Jing Zhao
via szetszwo)
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES

View File

@ -1075,32 +1075,40 @@ public void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
throws IOException, UnresolvedLinkException {
writeLock();
try {
//
// Remove the node from the namespace
//
if (!oldnode.removeNode()) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
"failed to remove " + path);
throw new IOException("FSDirectory.replaceNode: " +
"failed to remove " + path);
}
/* Currently oldnode and newnode are assumed to contain the same
* blocks. Otherwise, blocks need to be removed from the blocksMap.
*/
rootDir.addINode(path, newnode);
int index = 0;
for (BlockInfo b : newnode.getBlocks()) {
BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
newnode.setBlock(index, info); // inode refers to the block in BlocksMap
index++;
}
unprotectedReplaceNode(path, oldnode, newnode);
} finally {
writeUnlock();
}
}
void unprotectedReplaceNode(String path, INodeFile oldnode, INodeFile newnode)
throws IOException, UnresolvedLinkException {
assert hasWriteLock();
INodeDirectory parent = oldnode.parent;
// Remove the node from the namespace
if (!oldnode.removeNode()) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.replaceNode: " +
"failed to remove " + path);
throw new IOException("FSDirectory.replaceNode: " +
"failed to remove " + path);
}
// Parent should be non-null, otherwise oldnode.removeNode() will return
// false
newnode.setLocalName(oldnode.getLocalNameBytes());
parent.addChild(newnode, true);
/* Currently oldnode and newnode are assumed to contain the same
* blocks. Otherwise, blocks need to be removed from the blocksMap.
*/
int index = 0;
for (BlockInfo b : newnode.getBlocks()) {
BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
newnode.setBlock(index, info); // inode refers to the block in BlocksMap
index++;
}
}
/**
* Get a partial listing of the indicated directory
*

View File

@ -321,7 +321,7 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodeFileUnderConstruction ucFile = (INodeFileUnderConstruction) oldFile;
fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
INodeFile newFile = ucFile.convertToInodeFile();
fsDir.replaceNode(addCloseOp.path, ucFile, newFile);
fsDir.unprotectedReplaceNode(addCloseOp.path, ucFile, newFile);
}
break;
}

View File

@ -73,6 +73,11 @@ public INodeDirectory(PermissionStatus permissions, long mTime) {
INodeDirectory(INodeDirectory other) {
super(other);
this.children = other.children;
if (this.children != null) {
for (INode child : children) {
child.parent = this;
}
}
}
/** @return true unconditionally. */
@ -106,6 +111,7 @@ void replaceChild(INode newChild) {
final int low = searchChildren(newChild);
if (low>=0) { // an old child exists so replace by the newChild
children.get(low).parent = null;
children.set(low, newChild);
} else {
throw new IllegalArgumentException("No child exists to be replaced");

View File

@ -25,10 +25,15 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.junit.Test;
@ -157,6 +162,48 @@ public void testGetFullPathName() {
}
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen = 1024;
replication = 3;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
replication).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
FSDirectory fsdir = fsn.getFSDirectory();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create a file for test
final Path dir = new Path("/dir");
final Path file = new Path(dir, "file");
DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);
// Check the full path name of the INode associating with the file
INode fnode = fsdir.getINode(file.toString());
assertEquals(file.toString(), fnode.getFullPathName());
// Call FSDirectory#unprotectedSetQuota which calls
// INodeDirectory#replaceChild
dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
final Path newDir = new Path("/newdir");
final Path newFile = new Path(newDir, "file");
// Also rename dir
dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
// /dir/file now should be renamed to /newdir/file
fnode = fsdir.getINode(newFile.toString());
// getFullPathName can return correct result only if the parent field of
// child node is set correctly
assertEquals(newFile.toString(), fnode.getFullPathName());
}
@Test
public void testAppendBlocks() {
INodeFile origFile = createINodeFiles(1, "origfile")[0];