diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 6530720b36..4564595c99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -894,6 +894,9 @@ public void logDelaySendingAckToUpstream( 1, fsVolumeReferences.size()); } + // Add a new DataNode to help with the pipeline recover. + cluster.startDataNodes(conf, 1, true, null, null, null); + // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back @@ -925,6 +928,32 @@ public void logDelaySendingAckToUpstream( assertTrue(String.format("DataNode(%d) should have more than 1 blocks", dataNodeIdx), blockCount > 1); } + + // Write more files to make sure that the DataNode that has removed volume + // is still alive to receive data. + for (int i = 0; i < 10; i++) { + final Path file = new Path("/after-" + i); + try (FSDataOutputStream fout = fs.create(file, REPLICATION)) { + rb.nextBytes(writeBuf); + fout.write(writeBuf); + } + } + + try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi + .getFsVolumeReferences()) { + assertEquals("Volume remove wasn't successful.", + 1, fsVolumeReferences.size()); + FsVolumeSpi volume = fsVolumeReferences.get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test"); + int blockCount = 0; + while (!blkIter.atEnd()) { + blkIter.nextBlock(); + blockCount++; + } + assertTrue(String.format("DataNode(%d) should have more than 1 blocks", + dataNodeIdx), blockCount > 1); + } } @Test(timeout=60000)