From 8faf0b50d435039f69ea35f592856ca04d378809 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 8 Feb 2018 08:59:48 -0800 Subject: [PATCH] HDFS-13120. Snapshot diff could be corrupted after concat. Contributed by Xiaoyu Yao. --- .../hdfs/server/namenode/FSDirConcatOp.java | 4 +- .../snapshot/TestSnapshotDeletion.java | 126 ++++++++++++++++++ 2 files changed, 129 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 6a41cd8743..4cc5389a09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -253,7 +253,9 @@ static void unprotectedConcat(FSDirectory fsd, INodesInPath targetIIP, for (INodeFile nodeToRemove : srcList) { if(nodeToRemove != null) { nodeToRemove.clearBlocks(); - nodeToRemove.getParent().removeChild(nodeToRemove); + // Ensure the nodeToRemove is cleared from snapshot diff list + nodeToRemove.getParent().removeChild(nodeToRemove, + targetIIP.getLatestSnapshotId()); fsd.getINodeMap().remove(nodeToRemove); count++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index ca53788e98..8bd796718c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -26,18 +26,22 @@ import java.io.PrintStream; import java.security.PrivilegedAction; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; @@ -61,11 +65,15 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Tests snapshot deletion. */ public class TestSnapshotDeletion { + private static final Logger LOG = + LoggerFactory.getLogger(TestSnapshotDeletion.class); protected static final long seed = 0; protected static final short REPLICATION = 3; protected static final short REPLICATION_1 = 2; @@ -1232,4 +1240,122 @@ public void testRenameAndDelete() throws IOException { // make sure bar has been cleaned from inodeMap Assert.assertNull(fsdir.getInode(fileId)); } + + @Test + public void testSnapshotWithConcatException() throws Exception { + final Path st = new Path("/st"); + hdfs.mkdirs(st); + hdfs.allowSnapshot(st); + + Path[] files = new Path[3]; + for (int i = 0; i < 3; i++) { + files[i] = new Path(st, i+ ".txt"); + } + + Path dest = new Path(st, "dest.txt"); + hdfs.createNewFile(dest); + hdfs.createSnapshot(st, "ss"); + + for (int j = 0; j < 3; j++) { + FileSystem fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, files[j], false, 1024, + 1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true); + } + + hdfs.createSnapshot(st, "s0"); + + // Verify the SnapshotException is thrown as expected for HDFS-4529 + exception.expect(RemoteException.class); + String error = "Concat: the source file /st/0.txt is in snapshot"; + exception.expectMessage(error); + hdfs.concat(dest, files); + + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNodes(); + } + + @Test + public void testSnapshotDeleteWithConcat() throws Exception { + final Path st = new Path("/st"); + hdfs.mkdirs(st); + hdfs.allowSnapshot(st); + + Path[] files = new Path[3]; + for (int i = 0; i < 3; i++) { + files[i] = new Path(st, i+ ".txt"); + } + + Path dest = new Path(st, "dest.txt"); + hdfs.createNewFile(dest); + hdfs.createSnapshot(st, "ss"); + + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + FileSystem fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, files[j], false, 1024, + 1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true); + } + + hdfs.concat(dest, files); + + hdfs.createSnapshot(st, "s" + i); + } + + + hdfs.deleteSnapshot(st, "s1"); + + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNodes(); + } + + @Test + public void testSnapshotDiffReportWithConcat() throws Exception { + final Path st = new Path("/st"); + hdfs.mkdirs(st); + hdfs.allowSnapshot(st); + + Path[] files = new Path[3]; + for (int i = 0; i < 3; i++) { + files[i] = new Path(st, i+ ".txt"); + } + + Path dest = new Path(st, "dest.txt"); + hdfs.createNewFile(dest); + hdfs.createSnapshot(st, "ss"); + + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + FileSystem fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, files[j], false, 1024, + 1024, 512, (short) 1, RandomUtils.nextLong(1, 512), true); + } + + hdfs.concat(dest, files); + + hdfs.createSnapshot(st, "s" + i); + + SnapshotDiffReport sdr = hdfs.getSnapshotDiffReport(st, "s" + i, "ss"); + LOG.info("Snapshot Diff s{} to ss : {}", i, sdr); + Assert.assertEquals(sdr.getDiffList().size(), 1); + Assert.assertTrue(sdr.getDiffList().get(0).getType() == + SnapshotDiffReport.DiffType.MODIFY); + Assert.assertTrue(new Path(st, DFSUtilClient.bytes2String( + sdr.getDiffList().get(0).getSourcePath())).equals(dest)); + } + + hdfs.deleteSnapshot(st, "s1"); + + hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + hdfs.saveNamespace(); + hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNodes(); + } + }