HDFS-10566. Submit plan request should throw exception if Datanode is in non-REGULAR status. Contributed by Xiaobing Zhou.

This commit is contained in:
Anu Engineer 2016-11-02 17:46:56 -07:00
parent b59206190e
commit 730cb0cff6
4 changed files with 105 additions and 24 deletions

View File

@ -3414,6 +3414,12 @@ public void submitDiskBalancerPlan(String planID, long planVersion,
String planFile, String planData, boolean skipDateCheck)
throws IOException {
checkSuperuserPrivilege();
if (getStartupOption(getConf()) != StartupOption.REGULAR) {
throw new DiskBalancerException(
"Datanode is in special state, e.g. Upgrade/Rollback etc."
+ " Disk balancing not permitted.",
DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR);
}
// TODO : Support force option
this.diskBalancer.submitPlan(planID, planVersion, planFile, planData,
skipDateCheck);

View File

@ -40,6 +40,7 @@ public enum Result {
NO_SUCH_PLAN,
UNKNOWN_KEY,
INVALID_NODE,
DATANODE_STATUS_NOT_REGULAR,
}
private final Result result;

View File

@ -28,6 +28,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@ -262,6 +263,23 @@ public static MiniDFSCluster newImbalancedCluster(
final int defaultBlockSize,
final int fileLen)
throws IOException, InterruptedException, TimeoutException {
return newImbalancedCluster(
conf,
numDatanodes,
storageCapacities,
defaultBlockSize,
fileLen,
null);
}
public static MiniDFSCluster newImbalancedCluster(
final Configuration conf,
final int numDatanodes,
final long[] storageCapacities,
final int defaultBlockSize,
final int fileLen,
final StartupOption dnOption)
throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
@ -281,6 +299,7 @@ public static MiniDFSCluster newImbalancedCluster(
.storageCapacities(storageCapacities)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
.storagesPerDatanode(2)
.dnStartupOption(dnOption)
.build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;

View File

@ -46,6 +46,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerTestUtil;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
@ -53,6 +54,7 @@
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Tool;
@ -103,6 +105,50 @@ public void tearDown() throws Exception {
}
}
/**
* Tests if it's allowed to submit and execute plan when Datanode is in status
* other than REGULAR.
*/
@Test(timeout = 60000)
public void testSubmitPlanInNonRegularStatus() throws Exception {
final int numDatanodes = 1;
MiniDFSCluster miniCluster = null;
final Configuration hdfsConf = new HdfsConfiguration();
try {
/* new cluster with imbalanced capacity */
miniCluster = DiskBalancerTestUtil.newImbalancedCluster(
hdfsConf,
numDatanodes,
CAPACITIES,
DEFAULT_BLOCK_SIZE,
FILE_LEN,
StartupOption.ROLLBACK);
/* get full path of plan */
final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
try {
/* run execute command */
final String cmdLine = String.format(
"hdfs diskbalancer -%s %s",
EXECUTE,
planFileFullName);
runCommand(cmdLine, hdfsConf, miniCluster);
} catch(RemoteException e) {
assertThat(e.getClassName(), containsString("DiskBalancerException"));
assertThat(e.toString(),
is(allOf(
containsString("Datanode is in special state"),
containsString("Disk balancing not permitted."))));
}
} finally {
if (miniCluster != null) {
miniCluster.shutdown();
}
}
}
/**
* Tests running multiple commands under on setup. This mainly covers
* {@link org.apache.hadoop.hdfs.server.diskbalancer.command.Command#close}
@ -122,36 +168,16 @@ public void testRunMultipleCommandsUnderOneSetup() throws Exception {
CAPACITIES,
DEFAULT_BLOCK_SIZE,
FILE_LEN);
String cmdLine = "";
List<String> outputs = null;
final DataNode dn = miniCluster.getDataNodes().get(0);
/* run plan command */
cmdLine = String.format(
"hdfs diskbalancer -%s %s",
PLAN,
dn.getDatanodeUuid());
outputs = runCommand(cmdLine, hdfsConf, miniCluster);
/* get path of plan file*/
final String planFileName = dn.getDatanodeUuid();
/* verify plan command */
assertEquals(
"There must be two lines: the 1st is writing plan to...,"
+ " the 2nd is actual full path of plan file.",
2, outputs.size());
assertThat(outputs.get(1), containsString(planFileName));
/* get full path of plan file*/
final String planFileFullName = outputs.get(1);
/* get full path of plan */
final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
/* run execute command */
cmdLine = String.format(
final String cmdLine = String.format(
"hdfs diskbalancer -%s %s",
EXECUTE,
planFileFullName);
outputs = runCommand(cmdLine, hdfsConf, miniCluster);
runCommand(cmdLine, hdfsConf, miniCluster);
} finally {
if (miniCluster != null) {
miniCluster.shutdown();
@ -159,6 +185,35 @@ public void testRunMultipleCommandsUnderOneSetup() throws Exception {
}
}
private String runAndVerifyPlan(
final MiniDFSCluster miniCluster,
final Configuration hdfsConf) throws Exception {
String cmdLine = "";
List<String> outputs = null;
final DataNode dn = miniCluster.getDataNodes().get(0);
/* run plan command */
cmdLine = String.format(
"hdfs diskbalancer -%s %s",
PLAN,
dn.getDatanodeUuid());
outputs = runCommand(cmdLine, hdfsConf, miniCluster);
/* get path of plan file*/
final String planFileName = dn.getDatanodeUuid();
/* verify plan command */
assertEquals(
"There must be two lines: the 1st is writing plan to...,"
+ " the 2nd is actual full path of plan file.",
2, outputs.size());
assertThat(outputs.get(1), containsString(planFileName));
/* get full path of plan file*/
final String planFileFullName = outputs.get(1);
return planFileFullName;
}
/* test basic report */
@Test(timeout = 60000)
public void testReportSimple() throws Exception {