YARN-11393. Fs2cs could be extended to set ULF to -1 upon conversion (#5201)
This commit is contained in:
parent
b93b1c69cc
commit
c44c9f984b
@ -17,12 +17,15 @@
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter;
|
||||
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.DOT;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.USER_LIMIT_FACTOR;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.ConfigurableResource;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
|
||||
@ -79,6 +82,7 @@ public void convertQueueHierarchy(FSQueue queue) {
|
||||
emitMaxParallelApps(queueName, queue);
|
||||
emitMaxAllocations(queueName, queue);
|
||||
emitPreemptionDisabled(queueName, queue);
|
||||
emitDefaultUserLimitFactor(queueName, children);
|
||||
|
||||
emitChildCapacity(queue);
|
||||
emitMaximumCapacity(queueName, queue);
|
||||
@ -215,6 +219,15 @@ private void emitPreemptionDisabled(String queueName, FSQueue queue) {
|
||||
}
|
||||
}
|
||||
|
||||
public void emitDefaultUserLimitFactor(String queueName, List<FSQueue> children) {
|
||||
if (children.isEmpty()) {
|
||||
capacitySchedulerConfig.setFloat(
|
||||
CapacitySchedulerConfiguration.
|
||||
PREFIX + queueName + DOT + USER_LIMIT_FACTOR,
|
||||
-1.0f);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* yarn.scheduler.fair.sizebasedweight ==>
|
||||
* yarn.scheduler.capacity.<queue-path>
|
||||
|
@ -17,6 +17,7 @@
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter;
|
||||
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.USER_LIMIT_FACTOR;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter.FSConfigToCSConfigRuleHandler.DYNAMIC_MAX_ASSIGN;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter.FSConfigToCSConfigRuleHandler.MAX_CAPACITY_PERCENTAGE;
|
||||
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter.FSConfigToCSConfigRuleHandler.MAX_CHILD_CAPACITY;
|
||||
@ -182,7 +183,26 @@ public void testDefaultMaxAMShare() throws Exception {
|
||||
conf.get(PREFIX + "root.admins.alice.maximum-am-resource-percent"));
|
||||
|
||||
assertNull("root.users.joe maximum-am-resource-percent should be null",
|
||||
conf.get(PREFIX + "root.users.joe maximum-am-resource-percent"));
|
||||
conf.get(PREFIX + "root.users.joe.maximum-am-resource-percent"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDefaultUserLimitFactor() throws Exception {
|
||||
converter.convert(config);
|
||||
|
||||
Configuration conf = converter.getCapacitySchedulerConfig();
|
||||
|
||||
assertNull("root.users user-limit-factor should be null",
|
||||
conf.get(PREFIX + "root.users." + USER_LIMIT_FACTOR));
|
||||
|
||||
assertEquals("root.default user-limit-factor", "-1.0",
|
||||
conf.get(PREFIX + "root.default.user-limit-factor"));
|
||||
|
||||
assertEquals("root.users.joe user-limit-factor", "-1.0",
|
||||
conf.get(PREFIX + "root.users.joe.user-limit-factor"));
|
||||
|
||||
assertEquals("root.admins.bob user-limit-factor", "-1.0",
|
||||
conf.get(PREFIX + "root.admins.bob.user-limit-factor"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
Loading…
Reference in New Issue
Block a user