From 57d2fff84bdf52db29ce44b1cb989b363fcaa8c3 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Tue, 21 Jul 2020 09:00:07 -0700 Subject: [PATCH] HDFS-15246. ArrayIndexOfboundsException in BlockManager CreateLocatedBlock. Contributed by Hemanth Boyina. (cherry picked from commit 8b7695bb2628574b4450bac19c12b29db9ee0628) --- .../hdfs/server/namenode/FSDirRenameOp.java | 7 +++- .../server/namenode/TestFileTruncate.java | 35 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 2fd25237f1..c60acaa003 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -655,7 +655,12 @@ private static class RenameOperation { // snapshot is taken on the dst tree, changes will be recorded in the // latest snapshot of the src tree. if (isSrcInSnapshot) { - srcChild.recordModification(srcLatestSnapshotId); + if (srcChild.isFile()) { + INodeFile file = srcChild.asFile(); + file.recordModification(srcLatestSnapshotId, true); + } else { + srcChild.recordModification(srcLatestSnapshotId); + } } // check srcChild for reference diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 335772bdc2..5c75abb0e6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -40,6 +40,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -70,6 +71,7 @@ import org.apache.hadoop.util.ToolRunner; import org.slf4j.event.Level; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -1460,4 +1462,37 @@ public void testQuotaSpaceConsumedWithSnapshots() throws IOException { fs.getQuotaUsage(root).getSpaceConsumed()); } + + /** + * Test truncate on a snapshotted file. + */ + @Test + public void testTruncatewithRenameandSnapshot() throws Exception { + final Path dir = new Path("/dir"); + fs.mkdirs(dir, new FsPermission((short) 0777)); + final Path file = new Path(dir, "file"); + final Path movedFile = new Path("/file"); + + // 1. create a file and snapshot for dir which is having a file + DFSTestUtil.createFile(fs, file, 10, (short) 3, 0); + fs.allowSnapshot(dir); + Path snapshotPath = fs.createSnapshot(dir, "s0"); + assertTrue(fs.exists(snapshotPath)); + + // 2. move the file + fs.rename(file, new Path("/")); + + // 3.truncate the moved file + final boolean isReady = fs.truncate(movedFile, 5); + if (!isReady) { + checkBlockRecovery(movedFile); + } + FileStatus fileStatus = fs.getFileStatus(movedFile); + assertEquals(5, fileStatus.getLen()); + + // 4. get block locations of file which is in snapshot + LocatedBlocks locations = fs.getClient().getNamenode() + .getBlockLocations("/dir/.snapshot/s0/file", 0, 10); + assertEquals(10, locations.get(0).getBlockSize()); + } }