HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

This commit is contained in:
Lei Xu 2017-05-30 11:09:03 -07:00 committed by Owen O'Malley
parent 665f7c8b69
commit 08460f1559

View File

@ -894,6 +894,9 @@ public void logDelaySendingAckToUpstream(
1, fsVolumeReferences.size());
}
// Add a new DataNode to help with the pipeline recover.
cluster.startDataNodes(conf, 1, true, null, null, null);
// Verify the file has sufficient replications.
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
// Read the content back
@ -925,6 +928,32 @@ public void logDelaySendingAckToUpstream(
assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
dataNodeIdx), blockCount > 1);
}
// Write more files to make sure that the DataNode that has removed volume
// is still alive to receive data.
for (int i = 0; i < 10; i++) {
final Path file = new Path("/after-" + i);
try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
rb.nextBytes(writeBuf);
fout.write(writeBuf);
}
}
try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
.getFsVolumeReferences()) {
assertEquals("Volume remove wasn't successful.",
1, fsVolumeReferences.size());
FsVolumeSpi volume = fsVolumeReferences.get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
int blockCount = 0;
while (!blkIter.atEnd()) {
blkIter.nextBlock();
blockCount++;
}
assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
dataNodeIdx), blockCount > 1);
}
}
@Test(timeout=60000)