From 121e1e1280c7b019f6d2cc3ba9eae1ead0dd8408 Mon Sep 17 00:00:00 2001 From: Anu Engineer Date: Tue, 20 Feb 2018 19:16:30 -0800 Subject: [PATCH] HDFS-13175. Add more information for checking argument in DiskBalancerVolume. Contributed by Lei (Eddy) Xu. --- .../server/diskbalancer/command/PlanCommand.java | 16 ++++++++-------- .../connectors/DBNameNodeConnector.java | 2 -- .../datamodel/DiskBalancerVolume.java | 4 +++- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index 6e45b9672a..b765885e0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -124,6 +124,14 @@ public void execute(CommandLine cmd) throws Exception { throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN)); } + + try (FSDataOutputStream beforeStream = create(String.format( + DiskBalancerCLI.BEFORE_TEMPLATE, + cmd.getOptionValue(DiskBalancerCLI.PLAN)))) { + beforeStream.write(getCluster().toJson() + .getBytes(StandardCharsets.UTF_8)); + } + this.thresholdPercentage = getThresholdPercentage(cmd); LOG.debug("threshold Percentage is {}", this.thresholdPercentage); @@ -138,14 +146,6 @@ public void execute(CommandLine cmd) throws Exception { plan = plans.get(0); } - - try (FSDataOutputStream beforeStream = create(String.format( - DiskBalancerCLI.BEFORE_TEMPLATE, - cmd.getOptionValue(DiskBalancerCLI.PLAN)))) { - beforeStream.write(getCluster().toJson() - .getBytes(StandardCharsets.UTF_8)); - } - try { if (plan != null && plan.getVolumeSetPlans().size() > 0) { outputLine = String.format("Writing plan to:"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java index b044baf478..2d8ba8a0a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java @@ -144,8 +144,6 @@ private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, // Does it make sense ? Balancer does do that. Right now // we only deal with volumes and not blockPools - volume.setUsed(report.getDfsUsed()); - volume.setUuid(storage.getStorageID()); // we will skip this volume for disk balancer if diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index 47a925c152..a9fd7f0e22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -269,7 +269,9 @@ public long getUsed() { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { - Preconditions.checkArgument(dfsUsedSpace < this.getCapacity()); + Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), + "DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", + dfsUsedSpace, getCapacity()); this.used = dfsUsedSpace; }