diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1e446de351..7c1685ccc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -270,6 +270,9 @@ Trunk (Unreleased) HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with JDK7. (Andrew Wang via atm) + HDFS-4785. Concat operation does not remove concatenated files from + InodeMap. (suresh) + BREAKDOWN OF HADOOP-8562 and HDFS-3602 SUBTASKS AND RELATED JIRAS HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 8e3a74bb0d..aa7bd609c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -971,6 +971,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp) nodeToRemove.setBlocks(null); trgParent.removeChild(nodeToRemove); + inodeMap.remove(nodeToRemove); count++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index ba1c968fce..01003a6671 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -451,8 +451,27 @@ public void testInodeId() throws IOException { assertTrue(fs.delete(renamedPath, true)); inodeCount -= 2; assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + + // Create and concat /test/file1 /test/file2 + // Create /test1/file1 and /test1/file2 + String file1 = "/test1/file1"; + String file2 = "/test1/file2"; + DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0); + DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0); + inodeCount += 3; // test1, file1 and file2 are created + expectedLastInodeId += 3; + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + assertEquals(expectedLastInodeId, fsn.getLastInodeId()); + // Concat the /test1/file1 /test1/file2 into /test1/file2 + nnrpc.concat(file2, new String[] {file1}); + inodeCount--; // file1 and file2 are concatenated to file2 + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); + assertEquals(expectedLastInodeId, fsn.getLastInodeId()); + assertTrue(fs.delete(new Path("/test1"), true)); + inodeCount -= 2; // test1 and file2 is deleted + assertEquals(inodeCount, fsn.dir.getInodeMapSize()); - // Make sure empty editlog can be handled + // Make sure editlog is loaded correctly cluster.restartNameNode(); cluster.waitActive(); fsn = cluster.getNamesystem();