From 8a6e3541226fb1b6798cedecc56f1f160012becf Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Wed, 22 Jun 2016 17:35:55 -0700 Subject: [PATCH] HDFS-10552. DiskBalancer "-query" results in NPE if no plan for the node. Contributed by Anu Engineer. --- .../hdfs/server/datanode/DiskBalancer.java | 5 +++- .../diskbalancer/command/QueryCommand.java | 2 +- .../command/TestDiskBalancerCommand.java | 25 +++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index b31b9973fb..5a1fb9ec40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -104,6 +104,7 @@ public DiskBalancer(String dataNodeUUID, scheduler = Executors.newSingleThreadExecutor(); lock = new ReentrantLock(); workMap = new ConcurrentHashMap<>(); + this.planID = ""; // to keep protobuf happy. this.isDiskBalancerEnabled = conf.getBoolean( DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT); @@ -223,7 +224,9 @@ public void cancelPlan(String planID) throws DiskBalancerException { lock.lock(); try { checkDiskBalancerEnabled(); - if ((this.planID == null) || (!this.planID.equals(planID))) { + if (this.planID == null || + !this.planID.equals(planID) || + this.planID.isEmpty()) { LOG.error("Disk Balancer - No such plan. Cancel plan failed. PlanID: " + planID); throw new DiskBalancerException("No such plan.", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index 6c759e2eaa..fac1e51bfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -63,7 +63,7 @@ public void execute(CommandLine cmd) throws Exception { String nodeAddress = nodeName; // if the string is not name:port format use the default port. - if (!nodeName.matches("^.*:\\d$")) { + if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) { int defaultIPC = NetUtils.createSocketAddr( getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java index ceb762f647..b0821e2e8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; @@ -351,4 +352,28 @@ private List runCommand(final String cmdLine) throws Exception { } return outputs; } + + /** + * Making sure that we can query the node without having done a submit. + * @throws Exception + */ + @Test + public void testDiskBalancerQueryWithoutSubmit() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); + final int numDatanodes = 2; + MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(numDatanodes).build(); + try { + miniDFSCluster.waitActive(); + DataNode dataNode = miniDFSCluster.getDataNodes().get(0); + final String queryArg = String.format("-query localhost:%d", dataNode + .getIpcPort()); + final String cmdLine = String.format("hdfs diskbalancer %s", + queryArg); + runCommand(cmdLine); + } finally { + miniDFSCluster.shutdown(); + } + } }