From 730cb0cff6a6e2f1a6eef3593568e8a1b5172cf7 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 2 Nov 2016 17:46:56 -0700 Subject: [PATCH] HDFS-10566. Submit plan request should throw exception if Datanode is in non-REGULAR status. Contributed by Xiaobing Zhou. --- .../hadoop/hdfs/server/datanode/DataNode.java | 6 + .../diskbalancer/DiskBalancerException.java | 1 + .../diskbalancer/DiskBalancerTestUtil.java | 19 ++++ .../command/TestDiskBalancerCommand.java | 103 ++++++++++++++---- 4 files changed, 105 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index f89d38c5e7..de9e48e990 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -3414,6 +3414,12 @@ public void submitDiskBalancerPlan(String planID, long planVersion, String planFile, String planData, boolean skipDateCheck) throws IOException { checkSuperuserPrivilege(); + if (getStartupOption(getConf()) != StartupOption.REGULAR) { + throw new DiskBalancerException( + "Datanode is in special state, e.g. Upgrade/Rollback etc." + + " Disk balancing not permitted.", + DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR); + } // TODO : Support force option this.diskBalancer.submitPlan(planID, planVersion, planFile, planData, skipDateCheck); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java index 95ff722a2f..a730a57924 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java @@ -40,6 +40,7 @@ public enum Result { NO_SUCH_PLAN, UNKNOWN_KEY, INVALID_NODE, + DATANODE_STATUS_NOT_REGULAR, } private final Result result; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java index c60fe2104a..bc4181bc68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -262,6 +263,23 @@ public static MiniDFSCluster newImbalancedCluster( final int defaultBlockSize, final int fileLen) throws IOException, InterruptedException, TimeoutException { + return newImbalancedCluster( + conf, + numDatanodes, + storageCapacities, + defaultBlockSize, + fileLen, + null); + } + + public static MiniDFSCluster newImbalancedCluster( + final Configuration conf, + final int numDatanodes, + final long[] storageCapacities, + final int defaultBlockSize, + final int fileLen, + final StartupOption dnOption) + throws IOException, InterruptedException, TimeoutException { conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize); @@ -281,6 +299,7 @@ public static MiniDFSCluster newImbalancedCluster( .storageCapacities(storageCapacities) .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK}) .storagesPerDatanode(2) + .dnStartupOption(dnOption) .build(); FsVolumeImpl source = null; FsVolumeImpl dest = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index 0f65f256de..ad16cfa7a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerTestUtil; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; @@ -53,6 +54,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.tools.DiskBalancerCLI; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Tool; @@ -103,6 +105,50 @@ public void tearDown() throws Exception { } } + /** + * Tests if it's allowed to submit and execute plan when Datanode is in status + * other than REGULAR. + */ + @Test(timeout = 60000) + public void testSubmitPlanInNonRegularStatus() throws Exception { + final int numDatanodes = 1; + MiniDFSCluster miniCluster = null; + final Configuration hdfsConf = new HdfsConfiguration(); + + try { + /* new cluster with imbalanced capacity */ + miniCluster = DiskBalancerTestUtil.newImbalancedCluster( + hdfsConf, + numDatanodes, + CAPACITIES, + DEFAULT_BLOCK_SIZE, + FILE_LEN, + StartupOption.ROLLBACK); + + /* get full path of plan */ + final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf); + + try { + /* run execute command */ + final String cmdLine = String.format( + "hdfs diskbalancer -%s %s", + EXECUTE, + planFileFullName); + runCommand(cmdLine, hdfsConf, miniCluster); + } catch(RemoteException e) { + assertThat(e.getClassName(), containsString("DiskBalancerException")); + assertThat(e.toString(), + is(allOf( + containsString("Datanode is in special state"), + containsString("Disk balancing not permitted.")))); + } + } finally { + if (miniCluster != null) { + miniCluster.shutdown(); + } + } + } + /** * Tests running multiple commands under on setup. This mainly covers * {@link org.apache.hadoop.hdfs.server.diskbalancer.command.Command#close} @@ -122,36 +168,16 @@ public void testRunMultipleCommandsUnderOneSetup() throws Exception { CAPACITIES, DEFAULT_BLOCK_SIZE, FILE_LEN); - String cmdLine = ""; - List outputs = null; - final DataNode dn = miniCluster.getDataNodes().get(0); - /* run plan command */ - cmdLine = String.format( - "hdfs diskbalancer -%s %s", - PLAN, - dn.getDatanodeUuid()); - outputs = runCommand(cmdLine, hdfsConf, miniCluster); - - /* get path of plan file*/ - final String planFileName = dn.getDatanodeUuid(); - - /* verify plan command */ - assertEquals( - "There must be two lines: the 1st is writing plan to...," - + " the 2nd is actual full path of plan file.", - 2, outputs.size()); - assertThat(outputs.get(1), containsString(planFileName)); - - /* get full path of plan file*/ - final String planFileFullName = outputs.get(1); + /* get full path of plan */ + final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf); /* run execute command */ - cmdLine = String.format( + final String cmdLine = String.format( "hdfs diskbalancer -%s %s", EXECUTE, planFileFullName); - outputs = runCommand(cmdLine, hdfsConf, miniCluster); + runCommand(cmdLine, hdfsConf, miniCluster); } finally { if (miniCluster != null) { miniCluster.shutdown(); @@ -159,6 +185,35 @@ public void testRunMultipleCommandsUnderOneSetup() throws Exception { } } + private String runAndVerifyPlan( + final MiniDFSCluster miniCluster, + final Configuration hdfsConf) throws Exception { + String cmdLine = ""; + List outputs = null; + final DataNode dn = miniCluster.getDataNodes().get(0); + + /* run plan command */ + cmdLine = String.format( + "hdfs diskbalancer -%s %s", + PLAN, + dn.getDatanodeUuid()); + outputs = runCommand(cmdLine, hdfsConf, miniCluster); + + /* get path of plan file*/ + final String planFileName = dn.getDatanodeUuid(); + + /* verify plan command */ + assertEquals( + "There must be two lines: the 1st is writing plan to...," + + " the 2nd is actual full path of plan file.", + 2, outputs.size()); + assertThat(outputs.get(1), containsString(planFileName)); + + /* get full path of plan file*/ + final String planFileFullName = outputs.get(1); + return planFileFullName; + } + /* test basic report */ @Test(timeout = 60000) public void testReportSimple() throws Exception {