diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java index 11bfff8ef0..81549a6d5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java @@ -179,10 +179,9 @@ public void testStorageTypeStatsWhenStorageFailed() throws Exception { storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE); assertEquals(3, storageTypeStats.getNodesInService()); - String dataDir = cluster.getDataDirectory(); - File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2)); - File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2)); - File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2)); + File dn1ArcVol1 = cluster.getInstanceStorageDir(0, 1); + File dn2ArcVol1 = cluster.getInstanceStorageDir(1, 1); + File dn3ArcVol1 = cluster.getInstanceStorageDir(2, 1); DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1); DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1); DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index df5e29775a..6530720b36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -305,7 +305,6 @@ private void addVolumes(int numNewVolumes) private void addVolumes(int numNewVolumes, CountDownLatch waitLatch) throws ReconfigurationException, IOException, InterruptedException { - File dataDir = new File(cluster.getDataDirectory()); DataNode dn = cluster.getDataNodes().get(0); // First DataNode. Configuration conf = dn.getConf(); String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY); @@ -315,14 +314,14 @@ private void addVolumes(int numNewVolumes, CountDownLatch waitLatch) int startIdx = oldDataDir.split(",").length + 1; // Find the first available (non-taken) directory name for data volume. while (true) { - File volumeDir = new File(dataDir, "data" + startIdx); + File volumeDir = cluster.getInstanceStorageDir(0, startIdx); if (!volumeDir.exists()) { break; } startIdx++; } for (int i = startIdx; i < startIdx + numNewVolumes; i++) { - File volumeDir = new File(dataDir, "data" + String.valueOf(i)); + File volumeDir = cluster.getInstanceStorageDir(0, i); newVolumeDirs.add(volumeDir); volumeDir.mkdirs(); newDataDirBuf.append(","); @@ -985,7 +984,7 @@ public void testDirectlyReloadAfterCheckDiskError() DataNode dn = cluster.getDataNodes().get(0); final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY); - File dirToFail = new File(cluster.getDataDirectory(), "data1"); + File dirToFail = cluster.getInstanceStorageDir(0, 0); FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail); assertTrue("No FsVolume was found for " + dirToFail, @@ -1037,7 +1036,7 @@ public void testFullBlockReportAfterRemovingVolumes() InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode()); // Remove a data dir from datanode - File dataDirToKeep = new File(cluster.getDataDirectory(), "data1"); + File dataDirToKeep = cluster.getInstanceStorageDir(0, 0); assertThat( "DN did not update its own config", dn.reconfigurePropertyImpl( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 6385367c1d..c116ce0fa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -168,7 +168,7 @@ public void testVolumeFailure() throws Exception { // fail the volume // delete/make non-writable one of the directories (failed volume) - data_fail = new File(dataDir, "data3"); + data_fail = cluster.getInstanceStorageDir(1, 0); failedDir = MiniDFSCluster.getFinalizedDir(data_fail, cluster.getNamesystem().getBlockPoolId()); if (failedDir.exists() && @@ -235,7 +235,7 @@ public void testFailedVolumeBeingRemovedFromDataNode() DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L); DFSTestUtil.waitReplication(fs, file1, (short) 2); - File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); + File dn0Vol1 = cluster.getInstanceStorageDir(0, 0); DataNodeTestUtils.injectDataDirFailure(dn0Vol1); DataNode dn0 = cluster.getDataNodes().get(0); DataNodeTestUtils.waitForDiskError(dn0, @@ -298,8 +298,8 @@ public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated() assumeNotWindows(); // make both data directories to fail on dn0 - final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); - final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); + final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0); + final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1); DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2); DataNode dn0 = cluster.getDataNodes().get(0); DataNodeTestUtils.waitForDiskError(dn0, @@ -322,8 +322,8 @@ public void testVolumeFailureRecoveredByHotSwappingVolume() // volume failures which is currently not supported on Windows. assumeNotWindows(); - final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); - final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); + final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0); + final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1); final DataNode dn0 = cluster.getDataNodes().get(0); final String oldDataDirs = dn0.getConf().get( DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); @@ -366,8 +366,8 @@ public void testTolerateVolumeFailuresAfterAddingMoreVolumes() // volume failures which is currently not supported on Windows. assumeNotWindows(); - final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); - final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); + final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0); + final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1); final File dn0VolNew = new File(dataDir, "data_new"); final DataNode dn0 = cluster.getDataNodes().get(0); final String oldDataDirs = dn0.getConf().get( @@ -413,8 +413,8 @@ public void testUnderReplicationAfterVolFailure() throws Exception { DFSTestUtil.waitReplication(fs, file1, (short)3); // Fail the first volume on both datanodes - File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); - File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); + File dn1Vol1 = cluster.getInstanceStorageDir(0, 0); + File dn2Vol1 = cluster.getInstanceStorageDir(1, 0); DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1); Path file2 = new Path("/test2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index a3850efc98..f7c716d871 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -78,7 +78,6 @@ public class TestDataNodeVolumeFailureReporting { private FileSystem fs; private MiniDFSCluster cluster; private Configuration conf; - private String dataDir; private long volumeCapacity; // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow @@ -134,10 +133,10 @@ public void testSuccessiveVolumeFailures() throws Exception { final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0); - File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); - File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - File dn3Vol1 = new File(dataDir, "data"+(2*2+1)); - File dn3Vol2 = new File(dataDir, "data"+(2*2+2)); + File dn1Vol1 = cluster.getInstanceStorageDir(0, 0); + File dn2Vol1 = cluster.getInstanceStorageDir(1, 0); + File dn3Vol1 = cluster.getInstanceStorageDir(2, 0); + File dn3Vol2 = cluster.getInstanceStorageDir(2, 1); /* * Make the 1st volume directories on the first two datanodes @@ -275,8 +274,8 @@ public void testVolFailureStatsPreservedOnNNRestart() throws Exception { // Fail the first volume on both datanodes (we have to keep the // third healthy so one node in the pipeline will not fail). - File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); - File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); + File dn1Vol1 = cluster.getInstanceStorageDir(0, 0); + File dn2Vol1 = cluster.getInstanceStorageDir(1, 0); DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1); Path file1 = new Path("/test1"); @@ -317,10 +316,10 @@ public void testMultipleVolFailuresOnNode() throws Exception { long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0); - File dn1Vol1 = new File(dataDir, "data"+(4*0+1)); - File dn1Vol2 = new File(dataDir, "data"+(4*0+2)); - File dn2Vol1 = new File(dataDir, "data"+(4*1+1)); - File dn2Vol2 = new File(dataDir, "data"+(4*1+2)); + File dn1Vol1 = cluster.getInstanceStorageDir(0, 0); + File dn1Vol2 = cluster.getInstanceStorageDir(0, 1); + File dn2Vol1 = cluster.getInstanceStorageDir(1, 0); + File dn2Vol2 = cluster.getInstanceStorageDir(1, 1); // Make the first two volume directories on the first two datanodes // non-accessible. @@ -376,10 +375,10 @@ public void testDataNodeReconfigureWithVolumeFailures() throws Exception { // Fail the first volume on both datanodes (we have to keep the // third healthy so one node in the pipeline will not fail). - File dn1Vol1 = new File(dataDir, "data"+(2*0+1)); - File dn1Vol2 = new File(dataDir, "data"+(2*0+2)); - File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); - File dn2Vol2 = new File(dataDir, "data"+(2*1+2)); + File dn1Vol1 = cluster.getInstanceStorageDir(0, 0); + File dn1Vol2 = cluster.getInstanceStorageDir(0, 1); + File dn2Vol1 = cluster.getInstanceStorageDir(1, 0); + File dn2Vol2 = cluster.getInstanceStorageDir(1, 1); DataNodeTestUtils.injectDataDirFailure(dn1Vol1); DataNodeTestUtils.injectDataDirFailure(dn2Vol1); @@ -528,8 +527,8 @@ public void testAutoFormatEmptyBlockPoolDirectory() throws Exception { @Test public void testHotSwapOutFailedVolumeAndReporting() throws Exception { - final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1)); - final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2)); + final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0); + final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1); final DataNode dn0 = cluster.getDataNodes().get(0); final String oldDataDirs = dn0.getConf().get( DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); @@ -777,7 +776,6 @@ private void initCluster(int numDataNodes, int storagesPerDatanode, .storagesPerDatanode(storagesPerDatanode).build(); cluster.waitActive(); fs = cluster.getFileSystem(); - dataDir = cluster.getDataDirectory(); long dnCapacity = DFSTestUtil.getDatanodeCapacity( cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0); volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 46f9bf792f..9a09570a6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -50,7 +50,6 @@ public class TestDataNodeVolumeFailureToleration { private FileSystem fs; private MiniDFSCluster cluster; private Configuration conf; - private String dataDir; // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow // for heartbeats to propagate from the datanodes to the namenode. @@ -80,7 +79,6 @@ public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); - dataDir = cluster.getDataDirectory(); } @After @@ -161,7 +159,7 @@ public void testConfigureMinValidVolumes() throws Exception { long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0); // Fail a volume on the 2nd DN - File dn2Vol1 = new File(dataDir, "data"+(2*1+1)); + File dn2Vol1 = cluster.getStorageDir(1, 0); DataNodeTestUtils.injectDataDirFailure(dn2Vol1); // Should only get two replicas (the first DN and the 3rd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java index 0f41d2394b..aa9a70728d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java @@ -102,9 +102,7 @@ public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception { ArrayList dns = cluster.getDataNodes(); assertTrue("DN1 should be up", dns.get(0).isDatanodeUp()); - - final String dataDir = cluster.getDataDirectory(); - final File dn1Vol2 = new File(dataDir, "data2"); + final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1); DataNodeTestUtils.injectDataDirFailure(dn1Vol2); verifyDataNodeVolumeMetrics(fs, cluster, fileName); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index f3100eae56..8266c1f5b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -615,13 +615,15 @@ public void testReportNodeWithoutJson() throws Exception { assertThat( outputs.get(3), is(allOf(containsString("DISK"), - containsString("/dfs/data/data1"), + containsString(cluster.getInstanceStorageDir(0, 0) + .getAbsolutePath()), containsString("0.00"), containsString("1.00")))); assertThat( outputs.get(4), is(allOf(containsString("DISK"), - containsString("/dfs/data/data2"), + containsString(cluster.getInstanceStorageDir(0, 1) + .getAbsolutePath()), containsString("0.00"), containsString("1.00")))); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 1365b1a27f..34d34b8851 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -349,11 +349,14 @@ private void testDataNodeGetReconfigurationStatus(boolean expectedSuccuss) containsString("FAILED: Change property " + DFS_DATANODE_DATA_DIR_KEY)); } + File dnDir0 = cluster.getInstanceStorageDir(0, 0); + File dnDir1 = cluster.getInstanceStorageDir(0, 1); assertThat(outs.get(offset + 1), - is(allOf(containsString("From:"), containsString("data1"), - containsString("data2")))); - assertThat(outs.get(offset + 2), - is(not(anyOf(containsString("data1"), containsString("data2"))))); + is(allOf(containsString("From:"), containsString(dnDir0.getName()), + containsString(dnDir1.getName())))); + assertThat(outs.get(offset + 2), is(not( + anyOf(containsString(dnDir0.getName()), + containsString(dnDir1.getName()))))); assertThat(outs.get(offset + 2), is(allOf(containsString("To"), containsString("data_new")))); }