diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 34bfe10a59..1682a30fa2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -33,9 +33,11 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; @@ -1058,6 +1060,18 @@ public void addDeleteBlock(BlockInfo toDelete) { assert toDelete != null : "toDelete is null"; toDelete.delete(); toDeleteList.add(toDelete); + // If the file is being truncated + // the copy-on-truncate block should also be collected for deletion + BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature(); + if(uc == null) { + return; + } + Block truncateBlock = uc.getTruncateBlock(); + if(truncateBlock == null || truncateBlock.equals(toDelete)) { + return; + } + assert truncateBlock instanceof BlockInfo : "should be BlockInfo"; + addDeleteBlock((BlockInfo) truncateBlock); } public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index d4215e8581..51a94e73f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; +import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -1155,6 +1156,46 @@ public void testTruncate4Symlink() throws IOException { fs.delete(parent, true); } + /** + * While rolling upgrade is in-progress the test truncates a file + * such that copy-on-truncate is triggered, then deletes the file, + * and makes sure that no blocks involved in truncate are hanging around. + */ + @Test + public void testTruncateWithRollingUpgrade() throws Exception { + final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0)); + DistributedFileSystem dfs = cluster.getFileSystem(); + //start rolling upgrade + dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"}); + assertEquals("could not prepare for rolling upgrade", 0, status); + dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + Path dir = new Path("/testTruncateWithRollingUpgrade"); + fs.mkdirs(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[3]; + ThreadLocalRandom.current().nextBytes(data); + writeContents(data, data.length, p); + + assertEquals("block num should 1", 1, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + + final boolean isReady = fs.truncate(p, 2); + assertFalse("should be copy-on-truncate", isReady); + assertEquals("block num should 2", 2, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + fs.delete(p, true); + + assertEquals("block num should 0", 0, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"}); + assertEquals("could not finalize rolling upgrade", 0, status); + } + static void writeContents(byte[] contents, int fileLength, Path p) throws IOException { FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,