HDFS-10552. DiskBalancer "-query" results in NPE if no plan for the node. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-22 17:35:55 -07:00 committed by Arpit Agarwal
parent e8de28181a
commit 8a6e354122
3 changed files with 30 additions and 2 deletions

View File

@ -104,6 +104,7 @@ public DiskBalancer(String dataNodeUUID,
scheduler = Executors.newSingleThreadExecutor(); scheduler = Executors.newSingleThreadExecutor();
lock = new ReentrantLock(); lock = new ReentrantLock();
workMap = new ConcurrentHashMap<>(); workMap = new ConcurrentHashMap<>();
this.planID = ""; // to keep protobuf happy.
this.isDiskBalancerEnabled = conf.getBoolean( this.isDiskBalancerEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, DFSConfigKeys.DFS_DISK_BALANCER_ENABLED,
DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT); DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT);
@ -223,7 +224,9 @@ public void cancelPlan(String planID) throws DiskBalancerException {
lock.lock(); lock.lock();
try { try {
checkDiskBalancerEnabled(); checkDiskBalancerEnabled();
if ((this.planID == null) || (!this.planID.equals(planID))) { if (this.planID == null ||
!this.planID.equals(planID) ||
this.planID.isEmpty()) {
LOG.error("Disk Balancer - No such plan. Cancel plan failed. PlanID: " + LOG.error("Disk Balancer - No such plan. Cancel plan failed. PlanID: " +
planID); planID);
throw new DiskBalancerException("No such plan.", throw new DiskBalancerException("No such plan.",

View File

@ -63,7 +63,7 @@ public void execute(CommandLine cmd) throws Exception {
String nodeAddress = nodeName; String nodeAddress = nodeName;
// if the string is not name:port format use the default port. // if the string is not name:port format use the default port.
if (!nodeName.matches("^.*:\\d$")) { if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
int defaultIPC = NetUtils.createSocketAddr( int defaultIPC = NetUtils.createSocketAddr(
getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
@ -351,4 +352,28 @@ private List<String> runCommand(final String cmdLine) throws Exception {
} }
return outputs; return outputs;
} }
/**
* Making sure that we can query the node without having done a submit.
* @throws Exception
*/
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes).build();
try {
miniDFSCluster.waitActive();
DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
final String queryArg = String.format("-query localhost:%d", dataNode
.getIpcPort());
final String cmdLine = String.format("hdfs diskbalancer %s",
queryArg);
runCommand(cmdLine);
} finally {
miniDFSCluster.shutdown();
}
}
} }