HDFS-4785. Concat operation does not remove concatenated files from InodeMap. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1478267 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ca2c38fba1
commit
03ba436d42
@ -270,6 +270,9 @@ Trunk (Unreleased)
|
|||||||
HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with
|
HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with
|
||||||
JDK7. (Andrew Wang via atm)
|
JDK7. (Andrew Wang via atm)
|
||||||
|
|
||||||
|
HDFS-4785. Concat operation does not remove concatenated files from
|
||||||
|
InodeMap. (suresh)
|
||||||
|
|
||||||
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
||||||
|
@ -971,6 +971,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp)
|
|||||||
|
|
||||||
nodeToRemove.setBlocks(null);
|
nodeToRemove.setBlocks(null);
|
||||||
trgParent.removeChild(nodeToRemove);
|
trgParent.removeChild(nodeToRemove);
|
||||||
|
inodeMap.remove(nodeToRemove);
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,8 +451,27 @@ public void testInodeId() throws IOException {
|
|||||||
assertTrue(fs.delete(renamedPath, true));
|
assertTrue(fs.delete(renamedPath, true));
|
||||||
inodeCount -= 2;
|
inodeCount -= 2;
|
||||||
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
|
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
|
||||||
|
|
||||||
|
// Create and concat /test/file1 /test/file2
|
||||||
|
// Create /test1/file1 and /test1/file2
|
||||||
|
String file1 = "/test1/file1";
|
||||||
|
String file2 = "/test1/file2";
|
||||||
|
DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
|
||||||
|
DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
|
||||||
|
inodeCount += 3; // test1, file1 and file2 are created
|
||||||
|
expectedLastInodeId += 3;
|
||||||
|
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
|
||||||
|
assertEquals(expectedLastInodeId, fsn.getLastInodeId());
|
||||||
|
// Concat the /test1/file1 /test1/file2 into /test1/file2
|
||||||
|
nnrpc.concat(file2, new String[] {file1});
|
||||||
|
inodeCount--; // file1 and file2 are concatenated to file2
|
||||||
|
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
|
||||||
|
assertEquals(expectedLastInodeId, fsn.getLastInodeId());
|
||||||
|
assertTrue(fs.delete(new Path("/test1"), true));
|
||||||
|
inodeCount -= 2; // test1 and file2 is deleted
|
||||||
|
assertEquals(inodeCount, fsn.dir.getInodeMapSize());
|
||||||
|
|
||||||
// Make sure empty editlog can be handled
|
// Make sure editlog is loaded correctly
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fsn = cluster.getNamesystem();
|
fsn = cluster.getNamesystem();
|
||||||
|
Loading…
Reference in New Issue
Block a user