HDFS-10960. TestDataNodeHotSwapVolumes#testRemoveVolumeBeingWritten fails at disk error verification after volume remove. (Manoj Govindassamy via lei)
This commit is contained in:
parent
adb96e109f
commit
8c520a27cb
@ -642,8 +642,6 @@ private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
|
||||
final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
final Path testFile = new Path("/test");
|
||||
final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
|
||||
|
||||
FSDataOutputStream out = fs.create(testFile, REPLICATION);
|
||||
|
||||
Random rb = new Random(0);
|
||||
@ -699,17 +697,24 @@ public void run() {
|
||||
|
||||
reconfigThread.join();
|
||||
|
||||
// Verify if the data directory reconfigure was successful
|
||||
FsDatasetSpi<? extends FsVolumeSpi> fsDatasetSpi = dn.getFSDataset();
|
||||
try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
|
||||
.getFsVolumeReferences()) {
|
||||
for (int i =0; i < fsVolumeReferences.size(); i++) {
|
||||
System.out.println("Vol: " +
|
||||
fsVolumeReferences.get(i).getBaseURI().toString());
|
||||
}
|
||||
assertEquals("Volume remove wasn't successful.",
|
||||
1, fsVolumeReferences.size());
|
||||
}
|
||||
|
||||
// Verify the file has sufficient replications.
|
||||
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
|
||||
// Read the content back
|
||||
byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
|
||||
assertEquals(BLOCK_SIZE, content.length);
|
||||
|
||||
// If an IOException thrown from BlockReceiver#run, it triggers
|
||||
// DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
|
||||
// to see whether there is IOException in BlockReceiver#run().
|
||||
assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
|
||||
|
||||
if (!exceptions.isEmpty()) {
|
||||
throw new IOException(exceptions.get(0).getCause());
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user