HDFS-11399. Many tests fails in Windows due to injecting disk failures. Contributed by Yiqun Lin.

This commit is contained in:
Inigo Goiri 2018-03-12 09:58:56 -07:00
parent dd05871b8b
commit ac627f561f
2 changed files with 17 additions and 0 deletions

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
@ -160,6 +161,10 @@ public void testStorageTypeStatsJMX() throws Exception {
@Test @Test
public void testStorageTypeStatsWhenStorageFailed() throws Exception { public void testStorageTypeStatsWhenStorageFailed() throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
DFSTestUtil.createFile(cluster.getFileSystem(), DFSTestUtil.createFile(cluster.getFileSystem(),
new Path("/blockStatsFile1"), 1024, (short) 1, 0L); new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster Map<StorageType, StorageTypeStats> storageTypeStatsMap = cluster

View File

@ -293,6 +293,10 @@ public void testFailedVolumeBeingRemovedFromDataNode()
@Test(timeout=10000) @Test(timeout=10000)
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated() public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
throws Exception { throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
// make both data directories to fail on dn0 // make both data directories to fail on dn0
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
@ -314,6 +318,10 @@ public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
@Test @Test
public void testVolumeFailureRecoveredByHotSwappingVolume() public void testVolumeFailureRecoveredByHotSwappingVolume()
throws Exception { throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final DataNode dn0 = cluster.getDataNodes().get(0); final DataNode dn0 = cluster.getDataNodes().get(0);
@ -354,6 +362,10 @@ public void testVolumeFailureRecoveredByHotSwappingVolume()
@Test @Test
public void testTolerateVolumeFailuresAfterAddingMoreVolumes() public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
throws Exception { throws Exception {
// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
// volume failures which is currently not supported on Windows.
assumeNotWindows();
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final File dn0VolNew = new File(dataDir, "data_new"); final File dn0VolNew = new File(dataDir, "data_new");