diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b3834ae96b..66e3c8b71e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -735,7 +735,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY = "dfs.balancer.getBlocks.hot-time-interval"; public static final long DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT = - 0; + 0L; public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled"; public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false; public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 73a8e68191..67ecd5a4d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -324,10 +324,12 @@ static int getFailedTimesSinceLastSuccessfulBalance() { * Balancer prefer to get blocks which are belong to the cold files * created before this time period. */ - final long hotBlockTimeInterval = conf.getTimeDuration( - DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY, - DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); + final long hotBlockTimeInterval = + p.getHotBlockTimeInterval() != 0L ? p.getHotBlockTimeInterval() : + conf.getTimeDuration( + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY, + DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); // DataNode configuration parameters for balancing final int maxConcurrentMovesPerNode = getInt(conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java index 3d9cbb1802..2b53c15d1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java @@ -110,6 +110,10 @@ boolean getSortTopNodes() { return this.sortTopNodes; } + long getHotBlockTimeInterval() { + return this.hotBlockTimeInterval; + } + @Override public String toString() { return String.format("%s.%s [%s," + " threshold = %s," diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 9af8ade0ea..2070a332b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; +import java.lang.reflect.Field; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.AfterClass; import static org.junit.Assert.assertEquals; @@ -67,6 +68,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang3.StringUtils; +import org.junit.Assert; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1219,6 +1221,52 @@ public void testBalancerCliParseBlockpools() { assertEquals(1, p.getBlockPools().size()); } + @Test + public void testBalancerCliParseHotBlockTimeInterval() { + String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"}; + BalancerParameters p = Balancer.Cli.parse(parameters); + assertEquals(1000, p.getHotBlockTimeInterval()); + } + + @Test + public void testBalancerDispatchHotBlockTimeInterval() { + String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"}; + BalancerParameters p = Balancer.Cli.parse(parameters); + Configuration conf = new HdfsConfiguration(); + initConf(conf); + try { + cluster = new MiniDFSCluster + .Builder(conf) + .numDataNodes(0) + .setNNRedundancyConsiderLoad(false) + .build(); + cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY, + DFSConfigKeys.DFS_REPLICATION_DEFAULT); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, + DFSConfigKeys.DFS_REPLICATION_DEFAULT); + cluster.waitClusterUp(); + cluster.waitActive(); + Collection namenodes = DFSUtil.getInternalNsRpcUris(conf); + List connectors = + NameNodeConnector.newNameNodeConnectors(namenodes, + Balancer.class.getSimpleName(), + Balancer.BALANCER_ID_PATH, conf, + BalancerParameters.DEFAULT.getMaxIdleIteration()); + Balancer run = new Balancer( + connectors.get(0), p, new HdfsConfiguration()); + Field field = run.getClass().getDeclaredField("dispatcher"); + field.setAccessible(true); + Object dispatcher = field.get(run); + Field field1 = + dispatcher.getClass().getDeclaredField("hotBlockTimeInterval"); + field1.setAccessible(true); + Object hotBlockTimeInterval = field1.get(dispatcher); + assertEquals(1000, (long)hotBlockTimeInterval); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + /** * Verify balancer exits 0 on success. */