diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index a1c15aed68..5acd0aca3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.commons.lang.StringUtils; @@ -221,7 +224,7 @@ DiskBalancerDataNode getNode(String nodeName) { * @return Set of node names * @throws IOException */ - private Set getNodeList(String listArg) throws IOException { + protected Set getNodeList(String listArg) throws IOException { URL listURL; String nodeData; Set resultSet = new TreeSet<>(); @@ -242,6 +245,37 @@ private Set getNodeList(String listArg) throws IOException { return resultSet; } + /** + * Returns a DiskBalancer Node list from the Cluster or null if not found. + * + * @param listArg String File URL or a comma separated list of node names. + * @return List of DiskBalancer Node + * @throws IOException + */ + protected List getNodes(String listArg) + throws IOException { + Set nodeNames = null; + List nodeList = Lists.newArrayList(); + + if ((listArg == null) || listArg.isEmpty()) { + return nodeList; + } + nodeNames = getNodeList(listArg); + + DiskBalancerDataNode node = null; + if (!nodeNames.isEmpty()) { + for (String name : nodeNames) { + node = getNode(name); + + if (node != null) { + nodeList.add(node); + } + } + } + + return nodeList; + } + /** * Verifies if the command line options are sane. * @@ -471,4 +505,12 @@ public void setTopNodes(int topNodes) { public int getTopNodes() { return topNodes; } + + /** + * Set DiskBalancer cluster + */ + @VisibleForTesting + public void setCluster(DiskBalancerCluster newCluster) { + this.cluster = newCluster; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index 0d24f28bc8..7d659af54c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -435,4 +436,25 @@ public void testDiskBalancerQueryWithoutSubmit() throws Exception { miniDFSCluster.shutdown(); } } + + @Test(timeout = 60000) + public void testGetNodeList() throws Exception { + ClusterConnector jsonConnector = + ConnectorFactory.getCluster(clusterJson, conf); + DiskBalancerCluster diskBalancerCluster = + new DiskBalancerCluster(jsonConnector); + diskBalancerCluster.readClusterInfo(); + + int nodeNum = 5; + StringBuilder listArg = new StringBuilder(); + for (int i = 0; i < nodeNum; i++) { + listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()) + .append(","); + } + + ReportCommand command = new ReportCommand(conf, null); + command.setCluster(diskBalancerCluster); + List nodeList = command.getNodes(listArg.toString()); + assertEquals(nodeNum, nodeList.size()); + } }