diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 0cbfd9f35f..6b90146753 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -37,6 +37,7 @@ import java.io.File; import java.io.IOException; import java.util.Properties; +import java.util.UUID; /** * HddsVolume represents volume in a datanode. {@link VolumeSet} maitains a @@ -84,6 +85,7 @@ public static class Builder { private String datanodeUuid; private String clusterID; + private boolean failedVolume = false; public Builder(String rootDirStr) { this.volumeRootStr = rootDirStr; @@ -114,29 +116,47 @@ public Builder clusterID(String cid) { return this; } + // This is added just to create failed volume objects, which will be used + // to create failed HddsVolume objects in the case of any exceptions caused + // during creating HddsVolume object. + public Builder failedVolume(boolean failed) { + this.failedVolume = failed; + return this; + } + public HddsVolume build() throws IOException { return new HddsVolume(this); } } private HddsVolume(Builder b) throws IOException { - StorageLocation location = StorageLocation.parse(b.volumeRootStr); - hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); - this.state = VolumeState.NOT_INITIALIZED; - this.clusterID = b.clusterID; - this.datanodeUuid = b.datanodeUuid; - this.volumeIOStats = new VolumeIOStats(); + if (!b.failedVolume) { + StorageLocation location = StorageLocation.parse(b.volumeRootStr); + hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); + this.state = VolumeState.NOT_INITIALIZED; + this.clusterID = b.clusterID; + this.datanodeUuid = b.datanodeUuid; + this.volumeIOStats = new VolumeIOStats(); - VolumeInfo.Builder volumeBuilder = - new VolumeInfo.Builder(b.volumeRootStr, b.conf) - .storageType(b.storageType) - .configuredCapacity(b.configuredCapacity); - this.volumeInfo = volumeBuilder.build(); + VolumeInfo.Builder volumeBuilder = + new VolumeInfo.Builder(b.volumeRootStr, b.conf) + .storageType(b.storageType) + .configuredCapacity(b.configuredCapacity); + this.volumeInfo = volumeBuilder.build(); - LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + - b.storageType + " and capacity : " + volumeInfo.getCapacity()); + LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + + b.storageType + " and capacity : " + volumeInfo.getCapacity()); - initialize(); + initialize(); + } else { + // Builder is called with failedVolume set, so create a failed volume + // HddsVolumeObject. + hddsRootDir = new File(b.volumeRootStr); + volumeIOStats = null; + volumeInfo = null; + storageID = UUID.randomUUID().toString(); + state = VolumeState.FAILED; + } } public VolumeInfo getVolumeInfo() { @@ -285,7 +305,10 @@ public File getHddsRootDir() { } public StorageType getStorageType() { - return volumeInfo.getStorageType(); + if(volumeInfo != null) { + return volumeInfo.getStorageType(); + } + return StorageType.DEFAULT; } public String getStorageID() { @@ -313,11 +336,17 @@ public VolumeState getStorageState() { } public long getCapacity() throws IOException { - return volumeInfo.getCapacity(); + if(volumeInfo != null) { + return volumeInfo.getCapacity(); + } + return 0; } public long getAvailable() throws IOException { - return volumeInfo.getAvailable(); + if(volumeInfo != null) { + return volumeInfo.getAvailable(); + } + return 0; } public void setState(VolumeState state) { @@ -334,12 +363,16 @@ public VolumeIOStats getVolumeIOStats() { public void failVolume() { setState(VolumeState.FAILED); - volumeInfo.shutdownUsageThread(); + if (volumeInfo != null) { + volumeInfo.shutdownUsageThread(); + } } public void shutdown() { this.state = VolumeState.NON_EXISTENT; - volumeInfo.shutdownUsageThread(); + if (volumeInfo != null) { + volumeInfo.shutdownUsageThread(); + } } /** @@ -368,6 +401,8 @@ public enum VolumeState { */ @VisibleForTesting public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - volumeInfo.setScmUsageForTesting(scmUsageForTest); + if (volumeInfo != null) { + volumeInfo.setScmUsageForTesting(scmUsageForTest); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java index 4dfde37ce3..4a1487b1de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java @@ -76,6 +76,7 @@ public class VolumeSet { * mutually exclusive. */ private Map failedVolumeMap; + /** * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per * StorageType. @@ -95,12 +96,12 @@ public class VolumeSet { private Runnable shutdownHook; public VolumeSet(String dnUuid, Configuration conf) - throws DiskOutOfSpaceException { + throws IOException { this(dnUuid, null, conf); } public VolumeSet(String dnUuid, String clusterID, Configuration conf) - throws DiskOutOfSpaceException { + throws IOException { this.datanodeUuid = dnUuid; this.clusterID = clusterID; this.conf = conf; @@ -120,7 +121,7 @@ public VolumeSet(String dnUuid, String clusterID, Configuration conf) } // Add DN volumes configured through ConfigKeys to volumeMap. - private void initializeVolumeSet() throws DiskOutOfSpaceException { + private void initializeVolumeSet() throws IOException { volumeMap = new ConcurrentHashMap<>(); failedVolumeMap = new ConcurrentHashMap<>(); volumeStateMap = new EnumMap<>(StorageType.class); @@ -153,6 +154,9 @@ private void initializeVolumeSet() throws DiskOutOfSpaceException { LOG.info("Added Volume : {} to VolumeSet", hddsVolume.getHddsRootDir().getPath()); } catch (IOException e) { + HddsVolume volume = new HddsVolume.Builder(locationString) + .failedVolume(true).build(); + failedVolumeMap.put(locationString, volume); LOG.error("Failed to parse the storage location: " + locationString, e); } } @@ -337,11 +341,12 @@ public Map> getVolumeStateMap() { public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() throws IOException { boolean failed; - StorageLocationReport[] reports = - new StorageLocationReport[volumeMap.size()]; + StorageLocationReport[] reports = new StorageLocationReport[volumeMap + .size() + failedVolumeMap.size()]; int counter = 0; + HddsVolume hddsVolume; for (Map.Entry entry : volumeMap.entrySet()) { - HddsVolume hddsVolume = entry.getValue(); + hddsVolume = entry.getValue(); VolumeInfo volumeInfo = hddsVolume.getVolumeInfo(); long scmUsed = 0; long remaining = 0; @@ -370,6 +375,17 @@ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() StorageLocationReport r = builder.build(); reports[counter++] = r; } + for (Map.Entry entry : failedVolumeMap.entrySet()) { + hddsVolume = entry.getValue(); + StorageLocationReport.Builder builder = StorageLocationReport + .newBuilder(); + builder.setStorageLocation(hddsVolume.getHddsRootDir() + .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true) + .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType( + hddsVolume.getStorageType()); + StorageLocationReport r = builder.build(); + reports[counter++] = r; + } NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); for (int i = 0; i < reports.length; i++) { nrb.addStorageReport(reports[i].getProtoBufMessage()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 3ee9343a4b..fca68b19ac 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -27,8 +27,10 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.container.common.volume.HddsVolume .HDDS_VOLUME_DIR; import static org.junit.Assert.assertEquals; @@ -82,14 +84,16 @@ public void setup() throws Exception { @After public void shutdown() throws IOException { // Delete the hdds volume root dir - List volumes = new ArrayList<>(); - volumes.addAll(volumeSet.getVolumesList()); - volumes.addAll(volumeSet.getFailedVolumesList()); + List hddsVolumes = new ArrayList<>(); + hddsVolumes.addAll(volumeSet.getVolumesList()); + hddsVolumes.addAll(volumeSet.getFailedVolumesList()); - for (HddsVolume volume : volumes) { + for (HddsVolume volume : hddsVolumes) { FileUtils.deleteDirectory(volume.getHddsRootDir()); } volumeSet.shutdown(); + + FileUtil.fullyDelete(new File(baseDir)); } private boolean checkVolumeExistsInVolumeSet(String volume) { @@ -222,6 +226,29 @@ public void testShutdown() throws Exception { // Do Nothing. Exception is expected. } } + } + + @Test + public void testFailVolumes() throws Exception{ + VolumeSet volSet = null; + File readOnlyVolumePath = new File(baseDir); + //Set to readonly, so that this volume will be failed + readOnlyVolumePath.setReadOnly(); + File volumePath = GenericTestUtils.getRandomizedTestDir(); + OzoneConfiguration ozoneConfig = new OzoneConfiguration(); + ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() + + "," + volumePath.getAbsolutePath()); + volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig); + assertTrue(volSet.getFailedVolumesList().size() == 1); + assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) + .getHddsRootDir()); + + //Set back to writable + try { + readOnlyVolumePath.setWritable(true); + } finally { + FileUtil.fullyDelete(volumePath); + } } }