HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid. (#3185)
* HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid * HDFS-16119. start balancer with parameters -hotBlockTimeInterval xxx is invalid Co-authored-by: jiaguodong5 <jiaguodong5@jd.com>
This commit is contained in:
parent
fa0289b022
commit
aecfcf165f
@ -735,7 +735,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY =
|
||||
"dfs.balancer.getBlocks.hot-time-interval";
|
||||
public static final long DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT =
|
||||
0;
|
||||
0L;
|
||||
public static final String DFS_BALANCER_KEYTAB_ENABLED_KEY = "dfs.balancer.keytab.enabled";
|
||||
public static final boolean DFS_BALANCER_KEYTAB_ENABLED_DEFAULT = false;
|
||||
public static final String DFS_BALANCER_ADDRESS_KEY = "dfs.balancer.address";
|
||||
|
@ -324,10 +324,12 @@ static int getFailedTimesSinceLastSuccessfulBalance() {
|
||||
* Balancer prefer to get blocks which are belong to the cold files
|
||||
* created before this time period.
|
||||
*/
|
||||
final long hotBlockTimeInterval = conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
final long hotBlockTimeInterval =
|
||||
p.getHotBlockTimeInterval() != 0L ? p.getHotBlockTimeInterval() :
|
||||
conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_HOT_TIME_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS);
|
||||
|
||||
// DataNode configuration parameters for balancing
|
||||
final int maxConcurrentMovesPerNode = getInt(conf,
|
||||
|
@ -110,6 +110,10 @@ boolean getSortTopNodes() {
|
||||
return this.sortTopNodes;
|
||||
}
|
||||
|
||||
long getHotBlockTimeInterval() {
|
||||
return this.hotBlockTimeInterval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("%s.%s [%s," + " threshold = %s,"
|
||||
|
@ -38,6 +38,7 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.AfterClass;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
@ -67,6 +68,7 @@
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -1219,6 +1221,52 @@ public void testBalancerCliParseBlockpools() {
|
||||
assertEquals(1, p.getBlockPools().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBalancerCliParseHotBlockTimeInterval() {
|
||||
String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"};
|
||||
BalancerParameters p = Balancer.Cli.parse(parameters);
|
||||
assertEquals(1000, p.getHotBlockTimeInterval());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBalancerDispatchHotBlockTimeInterval() {
|
||||
String[] parameters = new String[]{"-hotBlockTimeInterval", "1000"};
|
||||
BalancerParameters p = Balancer.Cli.parse(parameters);
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
initConf(conf);
|
||||
try {
|
||||
cluster = new MiniDFSCluster
|
||||
.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.setNNRedundancyConsiderLoad(false)
|
||||
.build();
|
||||
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_REPLICATION_KEY,
|
||||
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,
|
||||
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
||||
cluster.waitClusterUp();
|
||||
cluster.waitActive();
|
||||
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
|
||||
List<NameNodeConnector> connectors =
|
||||
NameNodeConnector.newNameNodeConnectors(namenodes,
|
||||
Balancer.class.getSimpleName(),
|
||||
Balancer.BALANCER_ID_PATH, conf,
|
||||
BalancerParameters.DEFAULT.getMaxIdleIteration());
|
||||
Balancer run = new Balancer(
|
||||
connectors.get(0), p, new HdfsConfiguration());
|
||||
Field field = run.getClass().getDeclaredField("dispatcher");
|
||||
field.setAccessible(true);
|
||||
Object dispatcher = field.get(run);
|
||||
Field field1 =
|
||||
dispatcher.getClass().getDeclaredField("hotBlockTimeInterval");
|
||||
field1.setAccessible(true);
|
||||
Object hotBlockTimeInterval = field1.get(dispatcher);
|
||||
assertEquals(1000, (long)hotBlockTimeInterval);
|
||||
} catch (Exception e) {
|
||||
Assert.fail(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify balancer exits 0 on success.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user