diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1d2a76afa1..ed5db9b66f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -139,6 +139,9 @@ Trunk (Unreleased)
HDFS-7546. Document, and set an accepting default for
dfs.namenode.kerberos.principal.pattern (Harsh J via aw)
+ HDFS-316. Balancer should run for a configurable # of iterations (Xiaoyu
+ Yao via aw)
+
OPTIMIZATIONS
BUG FIXES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index dba1e2d08c..5b87cb54ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -74,6 +74,10 @@
* start the balancer with a default threshold of 10%
* bin/ start-balancer.sh -threshold 5
* start the balancer with a threshold of 5%
+ * bin/ start-balancer.sh -idleiterations 20
+ * start the balancer with maximum 20 consecutive idle iterations
+ * bin/ start-balancer.sh -idleiterations -1
+ * run the balancer with default threshold infinitely
* To stop:
* bin/ stop-balancer.sh
*
@@ -136,7 +140,7 @@
*
* - The cluster is balanced;
*
- No block can be moved;
- *
- No block has been moved for five consecutive iterations;
+ *
- No block has been moved for specified consecutive iterations (5 by default);
*
- An IOException occurs while communicating with the namenode;
*
- Another balancer is running.
*
@@ -147,7 +151,7 @@
*
* - The cluster is balanced. Exiting
*
- No block can be moved. Exiting...
- *
- No block has been moved for 5 iterations. Exiting...
+ *
- No block has been moved for specified iterations (5 by default). Exiting...
*
- Received an IO exception: failure reason. Exiting...
*
- Another balancer is running. Exiting...
*
@@ -175,7 +179,9 @@ public class Balancer {
+ "\n\t[-exclude [-f | comma-sperated list of hosts]]"
+ "\tExcludes the specified datanodes."
+ "\n\t[-include [-f | comma-sperated list of hosts]]"
- + "\tIncludes only the specified datanodes.";
+ + "\tIncludes only the specified datanodes."
+ + "\n\t[-idleiterations ]"
+ + "\tNumber of consecutive idle iterations (-1 for Infinite) before exit.";
private final Dispatcher dispatcher;
private final BalancingPolicy policy;
@@ -572,7 +578,7 @@ static int run(Collection namenodes, final Parameters p,
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf);
+ Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, p.maxIdleIteration);
boolean done = false;
for(int iteration = 0; !done; iteration++) {
@@ -628,19 +634,22 @@ private static String time2Str(long elapsedTime) {
static class Parameters {
static final Parameters DEFAULT = new Parameters(
BalancingPolicy.Node.INSTANCE, 10.0,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
Collections. emptySet(), Collections. emptySet());
final BalancingPolicy policy;
final double threshold;
+ final int maxIdleIteration;
// exclude the nodes in this set from balancing operations
Set nodesToBeExcluded;
//include only these nodes in balancing operations
Set nodesToBeIncluded;
- Parameters(BalancingPolicy policy, double threshold,
+ Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
Set nodesToBeExcluded, Set nodesToBeIncluded) {
this.policy = policy;
this.threshold = threshold;
+ this.maxIdleIteration = maxIdleIteration;
this.nodesToBeExcluded = nodesToBeExcluded;
this.nodesToBeIncluded = nodesToBeIncluded;
}
@@ -649,6 +658,7 @@ static class Parameters {
public String toString() {
return Balancer.class.getSimpleName() + "." + getClass().getSimpleName()
+ "[" + policy + ", threshold=" + threshold +
+ ", max idle iteration = " + maxIdleIteration +
", number of nodes to be excluded = "+ nodesToBeExcluded.size() +
", number of nodes to be included = "+ nodesToBeIncluded.size() +"]";
}
@@ -687,6 +697,7 @@ public int run(String[] args) {
static Parameters parse(String[] args) {
BalancingPolicy policy = Parameters.DEFAULT.policy;
double threshold = Parameters.DEFAULT.threshold;
+ int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
Set nodesTobeExcluded = Parameters.DEFAULT.nodesToBeExcluded;
Set nodesTobeIncluded = Parameters.DEFAULT.nodesToBeIncluded;
@@ -742,6 +753,11 @@ static Parameters parse(String[] args) {
} else {
nodesTobeIncluded = Util.parseHostList(args[i]);
}
+ } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
+ checkArgument(++i < args.length,
+ "idleiterations value is missing: args = " + Arrays.toString(args));
+ maxIdleIteration = Integer.parseInt(args[i]);
+ LOG.info("Using a idleiterations of " + maxIdleIteration);
} else {
throw new IllegalArgumentException("args = "
+ Arrays.toString(args));
@@ -755,7 +771,7 @@ static Parameters parse(String[] args) {
}
}
- return new Parameters(policy, threshold, nodesTobeExcluded, nodesTobeIncluded);
+ return new Parameters(policy, threshold, maxIdleIteration, nodesTobeExcluded, nodesTobeIncluded);
}
private static void printUsage(PrintStream out) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index e01d57d85a..cf5f36f9e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -60,18 +60,18 @@
public class NameNodeConnector implements Closeable {
private static final Log LOG = LogFactory.getLog(NameNodeConnector.class);
- private static final int MAX_NOT_CHANGED_ITERATIONS = 5;
+ public static final int DEFAULT_MAX_IDLE_ITERATIONS = 5;
private static boolean write2IdFile = true;
/** Create {@link NameNodeConnector} for the given namenodes. */
public static List newNameNodeConnectors(
- Collection namenodes, String name, Path idPath, Configuration conf)
- throws IOException {
+ Collection namenodes, String name, Path idPath, Configuration conf,
+ int maxIdleIterations) throws IOException {
final List connectors = new ArrayList(
namenodes.size());
for (URI uri : namenodes) {
NameNodeConnector nnc = new NameNodeConnector(name, uri, idPath,
- null, conf);
+ null, conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
@@ -80,12 +80,12 @@ public static List newNameNodeConnectors(
public static List newNameNodeConnectors(
Map> namenodes, String name, Path idPath,
- Configuration conf) throws IOException {
+ Configuration conf, int maxIdleIterations) throws IOException {
final List connectors = new ArrayList(
namenodes.size());
for (Map.Entry> entry : namenodes.entrySet()) {
NameNodeConnector nnc = new NameNodeConnector(name, entry.getKey(),
- idPath, entry.getValue(), conf);
+ idPath, entry.getValue(), conf, maxIdleIterations);
nnc.getKeyManager().startBlockKeyUpdater();
connectors.add(nnc);
}
@@ -111,15 +111,18 @@ public static void setWrite2IdFile(boolean write2IdFile) {
private final List targetPaths;
private final AtomicLong bytesMoved = new AtomicLong();
+ private final int maxNotChangedIterations;
private int notChangedIterations = 0;
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
- List targetPaths, Configuration conf)
+ List targetPaths, Configuration conf,
+ int maxNotChangedIterations)
throws IOException {
this.nameNodeUri = nameNodeUri;
this.idPath = idPath;
this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
.asList(new Path("/")) : targetPaths;
+ this.maxNotChangedIterations = maxNotChangedIterations;
this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
NamenodeProtocol.class).getProxy();
@@ -182,7 +185,14 @@ public boolean shouldContinue(long dispatchBlockMoveBytes) {
notChangedIterations = 0;
} else {
notChangedIterations++;
- if (notChangedIterations >= MAX_NOT_CHANGED_ITERATIONS) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("No block has been moved for " +
+ notChangedIterations + " iterations, " +
+ "maximum notChangedIterations before exit is: " +
+ ((maxNotChangedIterations >= 0) ? maxNotChangedIterations : "Infinite"));
+ }
+ if ((maxNotChangedIterations >= 0) &&
+ (notChangedIterations >= maxNotChangedIterations)) {
System.out.println("No block has been moved for "
+ notChangedIterations + " iterations. Exiting...");
return false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index a22f920099..6fa6963d23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -530,7 +530,8 @@ static int run(Map> namenodes, Configuration conf)
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Mover.class.getSimpleName(), MOVER_ID_PATH, conf);
+ Mover.class.getSimpleName(), MOVER_ID_PATH, conf,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
while (connectors.size() > 0) {
Collections.shuffle(connectors);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
index 941a8eef7a..846b0b452d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSCommands.apt.vm
@@ -324,7 +324,7 @@ HDFS Commands Guide
** <<>>
- Usage: <<] [-policy ]>>>
+ Usage: <<] [-policy ] [-idleiterations ]>>>
*------------------------+----------------------------------------------------+
|| COMMAND_OPTION | Description
@@ -336,6 +336,9 @@ HDFS Commands Guide
*------------------------+----------------------------------------------------+
| -threshold | Percentage of disk capacity. This overwrites the
| | default threshold.
+*------------------------+----------------------------------------------------+
+| -idleiterations | Maximum number of idle iterations before exit.
+| | This overwrites the default idleiterations(5).
*------------------------+----------------------------------------------------+
Runs a cluster balancing utility. An administrator can simply press Ctrl-C
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 6955fcdce7..153baeb13d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -564,6 +564,7 @@ private void doTest(Configuration conf, long[] capacities,
p = new Balancer.Parameters(
Balancer.Parameters.DEFAULT.policy,
Balancer.Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded());
}
@@ -629,7 +630,8 @@ private static int runBalancer(Collection namenodes, final Parameters p,
List connectors = Collections.emptyList();
try {
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
- Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf);
+ Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf,
+ Balancer.Parameters.DEFAULT.maxIdleIteration);
boolean done = false;
for(int iteration = 0; !done; iteration++) {
@@ -801,6 +803,7 @@ public void testUnknownDatanode() throws Exception {
Balancer.Parameters p = new Balancer.Parameters(
Balancer.Parameters.DEFAULT.policy,
Balancer.Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
final int r = Balancer.run(namenodes, p, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
@@ -1233,6 +1236,7 @@ public void testBalancerWithRamDisk() throws Exception {
Balancer.Parameters p = new Balancer.Parameters(
Parameters.DEFAULT.policy,
Parameters.DEFAULT.threshold,
+ Balancer.Parameters.DEFAULT.maxIdleIteration,
Parameters.DEFAULT.nodesToBeExcluded,
Parameters.DEFAULT.nodesToBeIncluded);
final int r = Balancer.run(namenodes, p, conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
index c9fc5bafbf..f35e1c88bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
@@ -21,6 +21,7 @@
import java.net.URI;
import java.util.*;
+import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
@@ -39,9 +40,14 @@ public class TestMover {
static Mover newMover(Configuration conf) throws IOException {
final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(1, namenodes.size());
+ Map> nnMap = Maps.newHashMap();
+ for (URI nn : namenodes) {
+ nnMap.put(nn, null);
+ }
final List nncs = NameNodeConnector.newNameNodeConnectors(
- namenodes, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf);
+ nnMap, Mover.class.getSimpleName(), Mover.MOVER_ID_PATH, conf,
+ NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf);
}