HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid. (#3185)

* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid

* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid

* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid

* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid

* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid

Co-authored-by: jiaguodong5 <jiaguodong5@jd.com>
This commit is contained in:
JiaguodongF 2021-07-28 00:55:21 +08:00 committed by GitHub
parent fa0289b022
commit aecfcf165f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 59 additions and 5 deletions

View File

@ -735,7 +735,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY = public static final String DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY =
"dfs.balancer.getBlocks.hot-time-interval"; "dfs.balancer.getBlocks.hot-time-interval";
public static final long DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT = public static final long DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT =
0; 0L;
public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled"; public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled";
public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false; public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false;
public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address"; public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address";

View File

@ -324,10 +324,12 @@ static int getFailedTimesSinceLastSuccessfulBalance() {
* Balancer prefer to get blocks which are belong to the cold files * Balancer prefer to get blocks which are belong to the cold files
* created before this time period. * created before this time period.
*/ */
final long hotBlockTimeInterval = conf.getTimeDuration( final long hotBlockTimeInterval =
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY, p.getHotBlockTimeInterval() != 0L ? p.getHotBlockTimeInterval() :
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT, conf.getTimeDuration(
TimeUnit.MILLISECONDS); DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY,
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
// DataNode configuration parameters for balancing // DataNode configuration parameters for balancing
final int maxConcurrentMovesPerNode = getInt(conf, final int maxConcurrentMovesPerNode = getInt(conf,

View File

@ -110,6 +110,10 @@ boolean getSortTopNodes() {
return this.sortTopNodes; return this.sortTopNodes;
} }
long getHotBlockTimeInterval() {
return this.hotBlockTimeInterval;
}
@Override @Override
public String toString() { public String toString() {
return String.format("%s.%s [%s," + " threshold = %s," return String.format("%s.%s [%s," + " threshold = %s,"

View File

@ -38,6 +38,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
import java.lang.reflect.Field;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.junit.AfterClass; import org.junit.AfterClass;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -67,6 +68,7 @@
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -1219,6 +1221,52 @@ public void testBalancerCliParseBlockpools() {
assertEquals(1, p.getBlockPools().size()); assertEquals(1, p.getBlockPools().size());
} }
@Test
public void testBalancerCliParseHotBlockTimeInterval() {
String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"};
BalancerParameters p = Balancer.Cli.parse(parameters);
assertEquals(1000, p.getHotBlockTimeInterval());
}
@Test
public void testBalancerDispatchHotBlockTimeInterval() {
String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"};
BalancerParameters p = Balancer.Cli.parse(parameters);
Configuration conf = new HdfsConfiguration();
initConf(conf);
try {
cluster = new MiniDFSCluster
.Builder(conf)
.numDataNodes(0)
.setNNRedundancyConsiderLoad(false)
.build();
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
cluster.waitClusterUp();
cluster.waitActive();
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
List<NameNodeConnector> connectors =
NameNodeConnector.newNameNodeConnectors(namenodes,
Balancer.class.getSimpleName(),
Balancer.BALANCER_ID_PATH, conf,
BalancerParameters.DEFAULT.getMaxIdleIteration());
Balancer run = new Balancer(
connectors.get(0), p, new HdfsConfiguration());
Field field = run.getClass().getDeclaredField("dispatcher");
field.setAccessible(true);
Object dispatcher = field.get(run);
Field field1 =
dispatcher.getClass().getDeclaredField("hotBlockTimeInterval");
field1.setAccessible(true);
Object hotBlockTimeInterval = field1.get(dispatcher);
assertEquals(1000, (long)hotBlockTimeInterval);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/** /**
* Verify balancer exits 0 on success. * Verify balancer exits 0 on success.
*/ */