From 52d7bafcf49916887197436ddb0f08f021d248d9 Mon Sep 17 00:00:00 2001 From: Manoj Govindassamy Date: Tue, 1 Aug 2017 16:28:20 -0700 Subject: [PATCH] HDFS-12217. HDFS snapshots doesn't capture all open files when one of the open files is deleted. --- .../hdfs/protocol/SnapshotException.java | 4 + .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hdfs/server/namenode/LeaseManager.java | 23 ++-- .../DirectorySnapshottableFeature.java | 16 ++- .../snapshot/TestOpenFilesWithSnapshot.java | 126 ++++++++++++++++++ 5 files changed, 157 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java index e9c5b2a4b1..49f3eaaba2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java @@ -30,4 +30,8 @@ public SnapshotException(final String message) { public SnapshotException(final Throwable cause) { super(cause); } + + public SnapshotException(final String message, final Throwable cause) { + super(message, cause); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fd4ab8d741..f0ebcbba68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4971,7 +4971,7 @@ private long nextBlockId(BlockType blockType) throws IOException { return blockId; } - private boolean isFileDeleted(INodeFile file) { + boolean isFileDeleted(INodeFile file) { // Not in the inodeMap or in the snapshot but marked deleted. if (dir.getInode(file.getId()) == null) { return true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 38cdbb30ef..6578ba97fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -162,18 +162,25 @@ synchronized long getNumUnderConstructionBlocks() { * * @return Set */ - public Set getINodeWithLeases() { + @VisibleForTesting + Set getINodeWithLeases() throws IOException { return getINodeWithLeases(null); } private synchronized INode[] getINodesWithLease() { - int inodeCount = 0; - INode[] inodes = new INode[leasesById.size()]; + List inodes = new ArrayList<>(leasesById.size()); + INode currentINode; for (long inodeId : leasesById.keySet()) { - inodes[inodeCount] = fsnamesystem.getFSDirectory().getInode(inodeId); - inodeCount++; + currentINode = fsnamesystem.getFSDirectory().getInode(inodeId); + // A file with an active lease could get deleted, or its + // parent directories could get recursively deleted. + if (currentINode != null && + currentINode.isFile() && + !fsnamesystem.isFileDeleted(currentINode.asFile())) { + inodes.add(currentINode); + } } - return inodes; + return inodes.toArray(new INode[0]); } /** @@ -186,7 +193,7 @@ private synchronized INode[] getINodesWithLease() { * @return Set */ public Set getINodeWithLeases(final INodeDirectory - ancestorDir) { + ancestorDir) throws IOException { assert fsnamesystem.hasReadLock(); final long startTimeMs = Time.monotonicNow(); Set iipSet = new HashSet<>(); @@ -233,7 +240,7 @@ public List call() { try { iipSet.addAll(f.get()); } catch (Exception e) { - LOG.warn("INode filter task encountered exception: ", e); + throw new IOException("Failed to get files with active leases", e); } } final long endTimeMs = Time.monotonicNow(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 0ab928d04d..23dcbe8c9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -195,11 +195,17 @@ public Snapshot addSnapshot(INodeDirectory snapshotRoot, int id, String name, s.getRoot().setModificationTime(now, Snapshot.CURRENT_STATE_ID); if (captureOpenFiles) { - Set openFilesIIP = - leaseManager.getINodeWithLeases(snapshotRoot); - for (INodesInPath openFileIIP : openFilesIIP) { - INodeFile openFile = openFileIIP.getLastINode().asFile(); - openFile.recordModification(openFileIIP.getLatestSnapshotId()); + try { + Set openFilesIIP = + leaseManager.getINodeWithLeases(snapshotRoot); + for (INodesInPath openFileIIP : openFilesIIP) { + INodeFile openFile = openFileIIP.getLastINode().asFile(); + openFile.recordModification(openFileIIP.getLatestSnapshotId()); + } + } catch (Exception e) { + throw new SnapshotException("Failed to add snapshot: Unable to " + + "capture all open files under the snapshot dir " + + snapshotRoot.getFullPathName() + " for snapshot '" + name + "'", e); } } return s; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 7aaadf85d0..fb83a3e3ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -496,6 +496,132 @@ public void testSnapshotsForOpenFilesWithNNRestart() throws Exception { flumeOutputStream.close(); } + /** + * Test snapshot capturing open files when an open file with active lease + * is deleted by the client. + */ + @Test (timeout = 120000) + public void testSnapshotsForOpenFilesAndDeletion() throws Exception { + // Construct the directory tree + final Path snapRootDir = new Path("/level_0_A"); + final String flumeFileName = "flume.log"; + final String hbaseFileName = "hbase.log"; + final String snap1Name = "snap_1"; + final String snap2Name = "snap_2"; + final String snap3Name = "snap_3"; + + // Create files and open streams + final Path flumeFile = new Path(snapRootDir, flumeFileName); + createFile(flumeFile); + final Path hbaseFile = new Path(snapRootDir, hbaseFileName); + createFile(hbaseFile); + FSDataOutputStream flumeOutputStream = fs.append(flumeFile); + FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile); + + // Create Snapshot S1 + final Path snap1Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap1Name); + final Path flumeS1Path = new Path(snap1Dir, flumeFileName); + final long flumeFileLengthAfterS1 = fs.getFileStatus(flumeFile).getLen(); + final Path hbaseS1Path = new Path(snap1Dir, hbaseFileName); + final long hbaseFileLengthAfterS1 = fs.getFileStatus(hbaseFile).getLen(); + + // Verify if Snap S1 file length is same as the the current versions + Assert.assertEquals(flumeFileLengthAfterS1, + fs.getFileStatus(flumeS1Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS1, + fs.getFileStatus(hbaseS1Path).getLen()); + + long flumeFileWrittenDataLength = flumeFileLengthAfterS1; + long hbaseFileWrittenDataLength = hbaseFileLengthAfterS1; + int newWriteLength = (int) (BLOCKSIZE * 1.5); + byte[] buf = new byte[newWriteLength]; + Random random = new Random(); + random.nextBytes(buf); + + // Write more data to files + flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf); + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Create Snapshot S2 + final Path snap2Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap2Name); + final Path flumeS2Path = new Path(snap2Dir, flumeFileName); + final Path hbaseS2Path = new Path(snap2Dir, hbaseFileName); + + // Verify current files length are same as all data written till now + final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen(); + Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2); + final long hbaseFileLengthAfterS2 = fs.getFileStatus(hbaseFile).getLen(); + Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2); + + // Verify if Snap S2 file length is same as the current versions + Assert.assertEquals(flumeFileLengthAfterS2, + fs.getFileStatus(flumeS2Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS2, + fs.getFileStatus(hbaseS2Path).getLen()); + + // Write more data to open files + writeToStream(flumeOutputStream, buf); + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Verify old snapshots have point-in-time/frozen file + // lengths even after the current versions have moved forward. + Assert.assertEquals(flumeFileLengthAfterS1, + fs.getFileStatus(flumeS1Path).getLen()); + Assert.assertEquals(flumeFileLengthAfterS2, + fs.getFileStatus(flumeS2Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS1, + fs.getFileStatus(hbaseS1Path).getLen()); + Assert.assertEquals(hbaseFileLengthAfterS2, + fs.getFileStatus(hbaseS2Path).getLen()); + + // Delete flume current file. Snapshots should + // still have references to flume file. + boolean flumeFileDeleted = fs.delete(flumeFile, true); + Assert.assertTrue(flumeFileDeleted); + Assert.assertFalse(fs.exists(flumeFile)); + Assert.assertTrue(fs.exists(flumeS1Path)); + Assert.assertTrue(fs.exists(flumeS2Path)); + + SnapshotTestHelper.createSnapshot(fs, snapRootDir, "tmp_snap"); + fs.deleteSnapshot(snapRootDir, "tmp_snap"); + + // Delete snap_2. snap_1 still has reference to + // the flume file. + fs.deleteSnapshot(snapRootDir, snap2Name); + Assert.assertFalse(fs.exists(flumeS2Path)); + Assert.assertTrue(fs.exists(flumeS1Path)); + + // Delete snap_1. Now all traces of flume file + // is gone. + fs.deleteSnapshot(snapRootDir, snap1Name); + Assert.assertFalse(fs.exists(flumeS2Path)); + Assert.assertFalse(fs.exists(flumeS1Path)); + + // Create Snapshot S3 + final Path snap3Dir = SnapshotTestHelper.createSnapshot( + fs, snapRootDir, snap3Name); + final Path hbaseS3Path = new Path(snap3Dir, hbaseFileName); + + // Verify live files length is same as all data written till now + final long hbaseFileLengthAfterS3 = fs.getFileStatus(hbaseFile).getLen(); + Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3); + + // Write more data to open files + hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf); + + // Verify old snapshots have point-in-time/frozen file + // lengths even after the flume open file is deleted and + // the hbase live file has moved forward. + Assert.assertEquals(hbaseFileLengthAfterS3, + fs.getFileStatus(hbaseS3Path).getLen()); + Assert.assertEquals(hbaseFileWrittenDataLength, + fs.getFileStatus(hbaseFile).getLen()); + + hbaseOutputStream.close(); + } + private void restartNameNode() throws Exception { cluster.triggerBlockReports(); NameNode nameNode = cluster.getNameNode();