HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in ReportCommand test. Contributed by Xiaobing Zhou.
This commit is contained in:
parent
c6ed54808d
commit
7b23ad1ef7
@ -19,6 +19,7 @@
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
@ -32,6 +33,7 @@
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Executes the report command.
|
||||
@ -164,9 +166,10 @@ private void handleNodeReport(final CommandLine cmd, StrBuilder result,
|
||||
dbdn.getVolumeCount(),
|
||||
dbdn.getNodeDataDensity()));
|
||||
|
||||
List<String> volumeList = Lists.newArrayList();
|
||||
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
|
||||
for (DiskBalancerVolume vol : vset.getVolumes()) {
|
||||
result.appendln(String.format(volumeFormat,
|
||||
volumeList.add(String.format(volumeFormat,
|
||||
vol.getStorageType(),
|
||||
vol.getPath(),
|
||||
vol.getUsedRatio(),
|
||||
@ -181,6 +184,10 @@ private void handleNodeReport(final CommandLine cmd, StrBuilder result,
|
||||
vol.isTransient() ? trueStr : falseStr));
|
||||
}
|
||||
}
|
||||
|
||||
Collections.sort(volumeList);
|
||||
result.appendln(
|
||||
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -194,13 +201,13 @@ public void printHelp() {
|
||||
" datanode, or prints out the list of nodes that will benefit from " +
|
||||
"running disk balancer. Top defaults to " + getDefaultTop();
|
||||
String footer = ". E.g.:\n"
|
||||
+ "hdfs diskbalancer -uri http://namenode.uri -report\n"
|
||||
+ "hdfs diskbalancer -uri http://namenode.uri -report -top 5\n"
|
||||
+ "hdfs diskbalancer -uri http://namenode.uri -report "
|
||||
+ "hdfs diskbalancer -fs http://namenode.uri -report\n"
|
||||
+ "hdfs diskbalancer -fs http://namenode.uri -report -top 5\n"
|
||||
+ "hdfs diskbalancer -fs http://namenode.uri -report "
|
||||
+ "-node {DataNodeID | IP | Hostname}";
|
||||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -uri http://namenode.uri " +
|
||||
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
||||
"-report [options]",
|
||||
header, DiskBalancer.getReportOptions(), footer);
|
||||
}
|
||||
|
@ -71,8 +71,10 @@ public void tearDown() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
private void testReportSimple() throws Exception {
|
||||
final String cmdLine = String.format("hdfs diskbalancer -uri %s -report",
|
||||
/* test basic report */
|
||||
@Test(timeout=60000)
|
||||
public void testReportSimple() throws Exception {
|
||||
final String cmdLine = String.format("hdfs diskbalancer -fs %s -report",
|
||||
clusterJson.toString());
|
||||
final List<String> outputs = runCommand(cmdLine);
|
||||
|
||||
@ -98,9 +100,11 @@ private void testReportSimple() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
private void testReportLessThanTotal() throws Exception {
|
||||
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
|
||||
@Test(timeout=60000)
|
||||
public void testReportLessThanTotal() throws Exception {
|
||||
final String cmdLine = String.format(
|
||||
"hdfs diskbalancer -uri %s -report -top 32", clusterJson.toString());
|
||||
"hdfs diskbalancer -fs %s -report -top 32", clusterJson.toString());
|
||||
final List<String> outputs = runCommand(cmdLine);
|
||||
|
||||
assertThat(
|
||||
@ -120,9 +124,11 @@ private void testReportLessThanTotal() throws Exception {
|
||||
containsString("9 volumes with node data density 1.97"))));
|
||||
}
|
||||
|
||||
private void testReportMoreThanTotal() throws Exception {
|
||||
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
|
||||
@Test(timeout=60000)
|
||||
public void testReportMoreThanTotal() throws Exception {
|
||||
final String cmdLine = String.format(
|
||||
"hdfs diskbalancer -uri %s -report -top 128", clusterJson.toString());
|
||||
"hdfs diskbalancer -fs %s -report -top 128", clusterJson.toString());
|
||||
final List<String> outputs = runCommand(cmdLine);
|
||||
|
||||
assertThat(
|
||||
@ -143,9 +149,11 @@ private void testReportMoreThanTotal() throws Exception {
|
||||
|
||||
}
|
||||
|
||||
private void testReportInvalidTopLimit() throws Exception {
|
||||
/* test invalid top limit, e.g., -report -top xx */
|
||||
@Test(timeout=60000)
|
||||
public void testReportInvalidTopLimit() throws Exception {
|
||||
final String cmdLine = String.format(
|
||||
"hdfs diskbalancer -uri %s -report -top xx", clusterJson.toString());
|
||||
"hdfs diskbalancer -fs %s -report -top xx", clusterJson.toString());
|
||||
final List<String> outputs = runCommand(cmdLine);
|
||||
|
||||
assertThat(
|
||||
@ -169,10 +177,12 @@ private void testReportInvalidTopLimit() throws Exception {
|
||||
containsString("9 volumes with node data density 1.97"))));
|
||||
}
|
||||
|
||||
private void testReportNode() throws Exception {
|
||||
/* test -report -node DataNodeID */
|
||||
@Test(timeout=60000)
|
||||
public void testReportNode() throws Exception {
|
||||
final String cmdLine = String
|
||||
.format(
|
||||
"hdfs diskbalancer -uri %s -report -node "
|
||||
"hdfs diskbalancer -fs %s -report -node "
|
||||
+ "a87654a9-54c7-4693-8dd9-c9c7021dc340",
|
||||
clusterJson.toString());
|
||||
final List<String> outputs = runCommand(cmdLine);
|
||||
@ -192,9 +202,9 @@ private void testReportNode() throws Exception {
|
||||
assertThat(
|
||||
outputs.get(3),
|
||||
is(allOf(containsString("DISK"),
|
||||
containsString("/tmp/disk/xx3j3ph3zd"),
|
||||
containsString("0.72 used: 289544224916/400000000000"),
|
||||
containsString("0.28 free: 110455775084/400000000000"))));
|
||||
containsString("/tmp/disk/KmHefYNURo"),
|
||||
containsString("0.20 used: 39160240782/200000000000"),
|
||||
containsString("0.80 free: 160839759218/200000000000"))));
|
||||
assertThat(
|
||||
outputs.get(4),
|
||||
is(allOf(containsString("DISK"),
|
||||
@ -204,16 +214,15 @@ private void testReportNode() throws Exception {
|
||||
assertThat(
|
||||
outputs.get(5),
|
||||
is(allOf(containsString("DISK"),
|
||||
containsString("DISK"),
|
||||
containsString("/tmp/disk/KmHefYNURo"),
|
||||
containsString("0.20 used: 39160240782/200000000000"),
|
||||
containsString("0.80 free: 160839759218/200000000000"))));
|
||||
containsString("/tmp/disk/xx3j3ph3zd"),
|
||||
containsString("0.72 used: 289544224916/400000000000"),
|
||||
containsString("0.28 free: 110455775084/400000000000"))));
|
||||
assertThat(
|
||||
outputs.get(6),
|
||||
is(allOf(containsString("RAM_DISK"),
|
||||
containsString("/tmp/disk/MXRyYsCz3U"),
|
||||
containsString("0.55 used: 438102096853/800000000000"),
|
||||
containsString("0.45 free: 361897903147/800000000000"))));
|
||||
containsString("/tmp/disk/BoBlQFxhfw"),
|
||||
containsString("0.60 used: 477590453390/800000000000"),
|
||||
containsString("0.40 free: 322409546610/800000000000"))));
|
||||
assertThat(
|
||||
outputs.get(7),
|
||||
is(allOf(containsString("RAM_DISK"),
|
||||
@ -223,9 +232,9 @@ private void testReportNode() throws Exception {
|
||||
assertThat(
|
||||
outputs.get(8),
|
||||
is(allOf(containsString("RAM_DISK"),
|
||||
containsString("/tmp/disk/BoBlQFxhfw"),
|
||||
containsString("0.60 used: 477590453390/800000000000"),
|
||||
containsString("0.40 free: 322409546610/800000000000"))));
|
||||
containsString("/tmp/disk/MXRyYsCz3U"),
|
||||
containsString("0.55 used: 438102096853/800000000000"),
|
||||
containsString("0.45 free: 361897903147/800000000000"))));
|
||||
assertThat(
|
||||
outputs.get(9),
|
||||
is(allOf(containsString("SSD"),
|
||||
@ -247,25 +256,6 @@ private void testReportNode() throws Exception {
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
public void testReportCommmand() throws Exception {
|
||||
|
||||
/* test basic report */
|
||||
testReportSimple();
|
||||
|
||||
/* test less than 64 DataNode(s) as total, e.g., -report -top 32 */
|
||||
testReportLessThanTotal();
|
||||
|
||||
/* test more than 64 DataNode(s) as total, e.g., -report -top 128 */
|
||||
testReportMoreThanTotal();
|
||||
|
||||
/* test invalid top limit, e.g., -report -top xx */
|
||||
testReportInvalidTopLimit();
|
||||
|
||||
/* test -report -node DataNodeID */
|
||||
testReportNode();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadClusterFromJson() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
|
||||
|
Loading…
Reference in New Issue
Block a user