From 451265a83d8798624ae2a144bc58fa41db826704 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 26 Feb 2018 10:28:04 -0600 Subject: [PATCH] HDFS-12070. Failed block recovery leaves files open indefinitely and at risk for data loss. Contributed by Kihwal Lee. --- .../server/datanode/BlockRecoveryWorker.java | 6 +-- .../apache/hadoop/hdfs/TestLeaseRecovery.java | 44 +++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java index 2ecd986fac..94835e2208 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java @@ -307,10 +307,8 @@ void syncBlock(List syncList) throws IOException { } } - // If any of the data-nodes failed, the recovery fails, because - // we never know the actual state of the replica on failed data-nodes. - // The recovery should be started over. - if (!failedList.isEmpty()) { + // Abort if all failed. + if (successList.isEmpty()) { throw new IOException("Cannot recover " + block + ", the following datanodes failed: " + failedList); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index d62194c16a..c82b47cec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -227,6 +227,50 @@ public void testBlockRecoveryWithLessMetafile() throws Exception { assertEquals(newFileLen, expectedNewFileLen); } + /** + * Block/lease recovery should be retried with failed nodes from the second + * stage removed to avoid perpetual recovery failures. + */ + @Test + public void testBlockRecoveryRetryAfterFailedRecovery() throws Exception { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + Path file = new Path("/testBlockRecoveryRetryAfterFailedRecovery"); + DistributedFileSystem dfs = cluster.getFileSystem(); + + // Create a file. + FSDataOutputStream out = dfs.create(file); + final int FILE_SIZE = 128 * 1024; + int count = 0; + while (count < FILE_SIZE) { + out.writeBytes("DE K9SUL"); + count += 8; + } + out.hsync(); + + // Abort the original stream. + ((DFSOutputStream) out.getWrappedStream()).abort(); + + LocatedBlocks locations = cluster.getNameNodeRpc().getBlockLocations( + file.toString(), 0, count); + ExtendedBlock block = locations.get(0).getBlock(); + + // Finalize one replica to simulate a partial close failure. + cluster.getDataNodes().get(0).getFSDataset().finalizeBlock(block, false); + // Delete the meta file to simulate a rename/move failure. + cluster.deleteMeta(0, block); + + // Try to recover the lease. + DistributedFileSystem newDfs = (DistributedFileSystem) FileSystem + .newInstance(cluster.getConfiguration(0)); + count = 0; + while (count++ < 15 && !newDfs.recoverLease(file)) { + Thread.sleep(1000); + } + // The lease should have been recovered. + assertTrue("File should be closed", newDfs.recoverLease(file)); + } + /** * Recover the lease on a file and append file from another client. */