From e1dfc060f8f0247f97127c75c9284a068fc93907 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 6 Jun 2019 11:59:53 -0700 Subject: [PATCH] HDFS-14486. The exception classes in some throw statements do not accurately describe why they are thrown. Contributed by Ayush Saxena. --- .../hdfs/server/datanode/BlockReceiver.java | 3 +- .../hadoop/hdfs/server/datanode/DataNode.java | 3 +- .../checker/DatasetVolumeChecker.java | 10 +++-- .../checker/StorageLocationChecker.java | 8 ++-- .../fsdataset/impl/FsDatasetImpl.java | 4 +- .../TestDataNodeVolumeFailureToleration.java | 4 +- .../server/datanode/TestDatanodeRegister.java | 12 ++++++ .../checker/TestDatasetVolumeChecker.java | 37 +++++++++++++++++++ .../checker/TestStorageLocationChecker.java | 33 ++++++++++++++++- 9 files changed, 101 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 9509184043..dad964c3d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -25,6 +25,7 @@ import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; +import java.io.InterruptedIOException; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.ByteBuffer; @@ -1074,7 +1075,7 @@ void receiveBlock( responder.interrupt(); // do not throw if shutting down for restart. if (!datanode.isRestarting()) { - throw new IOException("Interrupted receiveBlock"); + throw new InterruptedIOException("Interrupted receiveBlock"); } } responder = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f9ef83d934..09c99fdfec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -96,6 +96,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurableBase; @@ -1420,7 +1421,7 @@ void startDataNode(List dataDirectories, int volsConfigured = dnConf.getVolsConfigured(); if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT || volFailuresTolerated >= volsConfigured) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated + ". Value configured is either less than -1 or >= " + "to the number of configured volumes (" + volsConfigured + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index b4922875bd..91582fe055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -26,6 +26,8 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -120,7 +122,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + maxAllowedTimeForCheckMs + " (should be > 0)"); } @@ -137,7 +139,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (minDiskCheckGapMs < 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - " + minDiskCheckGapMs + " (should be >= 0)"); } @@ -148,7 +150,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (diskCheckTimeout < 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + diskCheckTimeout + " (should be >= 0)"); } @@ -156,7 +158,7 @@ public DatasetVolumeChecker(Configuration conf, Timer timer) lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + " " + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java index c5de065f4f..0332bc8633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java @@ -23,6 +23,8 @@ import com.google.common.collect.Maps; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -93,7 +95,7 @@ public StorageLocationChecker(Configuration conf, Timer timer) TimeUnit.MILLISECONDS); if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + maxAllowedTimeForCheckMs + " (should be > 0)"); } @@ -107,7 +109,7 @@ public StorageLocationChecker(Configuration conf, Timer timer) DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + " " + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); @@ -170,7 +172,7 @@ public List check( } if (maxVolumeFailuresTolerated >= dataDirs.size()) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 76110d68b8..f8507633fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -48,6 +48,8 @@ import javax.management.StandardMBean; import com.google.common.annotations.VisibleForTesting; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -292,7 +294,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) if (volFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT || volFailuresTolerated >= volsConfigured) { - throw new DiskErrorException("Invalid value configured for " + throw new HadoopIllegalArgumentException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated + ". Value configured is either less than maxVolumeFailureLimit or greater than " + "to the number of configured volumes (" + volsConfigured + ")."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java index 825887c1af..a9e4096df4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -242,7 +242,7 @@ private void testVolumeConfig(int volumesTolerated, int volumesFailed, prepareDirToFail(dirs[i]); } restartDatanodes(volumesTolerated, manageDfsDirs); - } catch (DiskErrorException e) { + } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated", e); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index 95a361a65d..bffdaae369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Mockito.doReturn; @@ -36,6 +37,7 @@ import org.junit.Assert; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -162,4 +164,14 @@ public void testDNShutdwonBeforeRegister() throws Exception { e.getMessage()); } } + + @Test + public void testInvalidConfigurationValue() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new DataNode(conf, new ArrayList<>(), null, null)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java index 9a45e62274..9b0636fada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java @@ -20,6 +20,8 @@ import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; + +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.fsdataset.*; @@ -44,6 +46,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; @@ -226,4 +232,35 @@ static List makeVolumes( } return volumes; } + + @Test + public void testInvalidConfigurationValues() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - 0 (should be > 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, -1); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.min.gap" + + " - -1 (should be >= 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, -1); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - -1 (should be > 0)", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new DatasetVolumeChecker(conf, new FakeTimer())); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java index 169a1b9333..80f0396c6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestStorageLocationChecker.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -36,6 +37,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; @@ -129,7 +131,7 @@ public void testBadConfiguration() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3); - thrown.expect(IOException.class); + thrown.expect(HadoopIllegalArgumentException.class); thrown.expectMessage("Invalid value configured"); StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer()); @@ -214,4 +216,33 @@ public VolumeCheckResult answer(InvocationOnMock invocation) } return locations; } + + @Test + public void testInvalidConfigurationValues() throws Exception { + final List locations = + makeMockLocations(HEALTHY, HEALTHY, HEALTHY); + Configuration conf = new HdfsConfiguration(); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 4); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - 4. Value configured is >= to the " + + "number of configured volumes (3).", + () -> new StorageLocationChecker(conf, new FakeTimer()).check(conf, + locations)); + conf.unset(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY); + + conf.setInt(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, 0); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.disk.check.timeout" + + " - 0 (should be > 0)", + () -> new StorageLocationChecker(conf, new FakeTimer())); + conf.unset(DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY); + + conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, -2); + intercept(HadoopIllegalArgumentException.class, + "Invalid value configured for dfs.datanode.failed.volumes.tolerated" + + " - -2 should be greater than or equal to -1", + () -> new StorageLocationChecker(conf, new FakeTimer())); + } }