diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 06387c5968..83c231d233 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -642,8 +642,6 @@ private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx) final DataNode dn = cluster.getDataNodes().get(dataNodeIdx); final FileSystem fs = cluster.getFileSystem(); final Path testFile = new Path("/test"); - final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck(); - FSDataOutputStream out = fs.create(testFile, REPLICATION); Random rb = new Random(0); @@ -699,17 +697,24 @@ public void run() { reconfigThread.join(); + // Verify if the data directory reconfigure was successful + FsDatasetSpi fsDatasetSpi = dn.getFSDataset(); + try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi + .getFsVolumeReferences()) { + for (int i =0; i < fsVolumeReferences.size(); i++) { + System.out.println("Vol: " + + fsVolumeReferences.get(i).getBaseURI().toString()); + } + assertEquals("Volume remove wasn't successful.", + 1, fsVolumeReferences.size()); + } + // Verify the file has sufficient replications. DFSTestUtil.waitReplication(fs, testFile, REPLICATION); // Read the content back byte[] content = DFSTestUtil.readFileBuffer(fs, testFile); assertEquals(BLOCK_SIZE, content.length); - // If an IOException thrown from BlockReceiver#run, it triggers - // DataNode#checkDiskError(). So we can test whether checkDiskError() is called, - // to see whether there is IOException in BlockReceiver#run(). - assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck()); - if (!exceptions.isEmpty()) { throw new IOException(exceptions.get(0).getCause()); }