diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 90a46698a8..fd90ae921a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -151,7 +151,7 @@ private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo, throws IOException { StorageDirectory sd = new StorageDirectory(dataDir, null, true); try { - StorageState curState = sd.analyzeStorage(startOpt, this); + StorageState curState = sd.analyzeStorage(startOpt, this, true); // sd is locked but not opened switch (curState) { case NORMAL: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index 6c58743194..8d021cd002 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; @@ -480,6 +481,25 @@ public void testAutoFormatEmptyDirectory() throws Exception { checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath()); } + @Test + public void testAutoFormatEmptyBlockPoolDirectory() throws Exception { + // remove the version file + DataNode dn = cluster.getDataNodes().get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(bpid); + Storage.StorageDirectory dir = bps.getStorageDir(0); + File current = dir.getCurrentDir(); + + File currentVersion = new File(current, "VERSION"); + currentVersion.delete(); + // restart the data node + assertTrue(cluster.restartDataNodes(true)); + // the DN should tolerate one volume failure. + cluster.waitActive(); + assertFalse("DataNode should not reformat if VERSION is missing", + currentVersion.exists()); + } + /** * Checks the NameNode for correct values of aggregate counters tracking failed * volumes across all DataNodes.