YARN-7632. Effective min and max resource need to be set for auto created leaf queues upon creation and capacity management. Contributed by Suma Shivaprasad.
This commit is contained in:
parent
a2edc4cbf5
commit
312ceebde8
@ -21,6 +21,8 @@
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
|
||||
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -101,14 +103,23 @@ public void reinitializeFromTemplate(AutoCreatedLeafQueueConfig
|
||||
|
||||
private void mergeCapacities(QueueCapacities capacities) {
|
||||
for ( String nodeLabel : capacities.getExistingNodeLabels()) {
|
||||
this.queueCapacities.setCapacity(nodeLabel,
|
||||
queueCapacities.setCapacity(nodeLabel,
|
||||
capacities.getCapacity(nodeLabel));
|
||||
this.queueCapacities.setAbsoluteCapacity(nodeLabel, capacities
|
||||
queueCapacities.setAbsoluteCapacity(nodeLabel, capacities
|
||||
.getAbsoluteCapacity(nodeLabel));
|
||||
this.queueCapacities.setMaximumCapacity(nodeLabel, capacities
|
||||
queueCapacities.setMaximumCapacity(nodeLabel, capacities
|
||||
.getMaximumCapacity(nodeLabel));
|
||||
this.queueCapacities.setAbsoluteMaximumCapacity(nodeLabel, capacities
|
||||
queueCapacities.setAbsoluteMaximumCapacity(nodeLabel, capacities
|
||||
.getAbsoluteMaximumCapacity(nodeLabel));
|
||||
|
||||
Resource resourceByLabel = labelManager.getResourceByLabel(nodeLabel,
|
||||
csContext.getClusterResource());
|
||||
getQueueResourceQuotas().setEffectiveMinResource(nodeLabel,
|
||||
Resources.multiply(resourceByLabel,
|
||||
queueCapacities.getAbsoluteCapacity(nodeLabel)));
|
||||
getQueueResourceQuotas().setEffectiveMaxResource(nodeLabel,
|
||||
Resources.multiply(resourceByLabel, queueCapacities
|
||||
.getAbsoluteMaximumCapacity(nodeLabel)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ public void testEffectiveResourceAfterReducingClusterResource()
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEffectiveResourceAfterIncreasinClusterResource()
|
||||
public void testEffectiveResourceAfterIncreasingClusterResource()
|
||||
throws Exception {
|
||||
// create conf with basic queue configuration.
|
||||
CapacitySchedulerConfiguration csConf = setupComplexQueueConfiguration(
|
||||
|
@ -17,17 +17,25 @@
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang.math.RandomUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Event;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels
|
||||
.NullRMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.placement
|
||||
.ApplicationPlacementContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.placement
|
||||
@ -51,13 +59,17 @@
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
|
||||
.SchedulerEvent;
|
||||
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
@ -73,13 +85,14 @@
|
||||
.capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class TestCapacitySchedulerAutoCreatedQueueBase {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
TestCapacitySchedulerAutoCreatedQueueBase.class);
|
||||
public final int GB = 1024;
|
||||
public static final int GB = 1024;
|
||||
public final static ContainerUpdates NULL_UPDATE_REQUESTS =
|
||||
new ContainerUpdates();
|
||||
|
||||
@ -107,6 +120,12 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
|
||||
public static final float C1_CAPACITY = 20f;
|
||||
public static final float C2_CAPACITY = 20f;
|
||||
|
||||
public static final int NODE_MEMORY = 16;
|
||||
|
||||
public static final int NODE1_VCORES = 16;
|
||||
public static final int NODE2_VCORES = 32;
|
||||
public static final int NODE3_VCORES = 48;
|
||||
|
||||
public static final String USER = "user_";
|
||||
public static final String USER0 = USER + 0;
|
||||
public static final String USER1 = USER + 1;
|
||||
@ -120,6 +139,9 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
|
||||
public static final String NODEL_LABEL_SSD = "SSD";
|
||||
|
||||
protected MockRM mockRM = null;
|
||||
protected MockNM nm1 = null;
|
||||
protected MockNM nm2 = null;
|
||||
protected MockNM nm3 = null;
|
||||
protected CapacityScheduler cs;
|
||||
private final TestCapacityScheduler tcs = new TestCapacityScheduler();
|
||||
protected SpyDispatcher dispatcher;
|
||||
@ -163,16 +185,43 @@ public void setUp() throws Exception {
|
||||
|
||||
setupQueueMappings(conf);
|
||||
|
||||
mockRM = new MockRM(conf);
|
||||
cs = (CapacityScheduler) mockRM.getResourceScheduler();
|
||||
|
||||
dispatcher = new SpyDispatcher();
|
||||
rmAppEventEventHandler = new SpyDispatcher.SpyRMAppEventHandler();
|
||||
dispatcher.register(RMAppEventType.class, rmAppEventEventHandler);
|
||||
|
||||
RMNodeLabelsManager mgr = setupNodeLabelManager(conf);
|
||||
|
||||
mockRM = new MockRM(conf) {
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
return mgr;
|
||||
}
|
||||
};
|
||||
|
||||
cs = (CapacityScheduler) mockRM.getResourceScheduler();
|
||||
cs.updatePlacementRules();
|
||||
mockRM.start();
|
||||
|
||||
cs.start();
|
||||
|
||||
setupNodes(mockRM);
|
||||
}
|
||||
|
||||
protected void setupNodes(MockRM newMockRM) throws Exception {
|
||||
nm1 = // label = SSD
|
||||
new MockNM("h1:1234", NODE_MEMORY * GB, NODE1_VCORES, newMockRM
|
||||
.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
|
||||
nm2 = // label = GPU
|
||||
new MockNM("h2:1234", NODE_MEMORY * GB, NODE2_VCORES, newMockRM
|
||||
.getResourceTrackerService
|
||||
());
|
||||
nm2.registerNode();
|
||||
|
||||
nm3 = // label = ""
|
||||
new MockNM("h3:1234", NODE_MEMORY * GB, NODE3_VCORES, newMockRM
|
||||
.getResourceTrackerService
|
||||
());
|
||||
nm3.registerNode();
|
||||
}
|
||||
|
||||
public static CapacitySchedulerConfiguration setupQueueMappings(
|
||||
@ -340,21 +389,23 @@ protected List<UserGroupMappingPlacementRule.QueueMapping> setupQueueMapping(
|
||||
return queueMappings;
|
||||
}
|
||||
|
||||
protected MockRM setupSchedulerInstance() {
|
||||
protected MockRM setupSchedulerInstance() throws Exception {
|
||||
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
|
||||
setupQueueConfiguration(conf);
|
||||
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
|
||||
ResourceScheduler.class);
|
||||
|
||||
List<String> queuePlacementRules = new ArrayList<String>();
|
||||
queuePlacementRules.add(YarnConfiguration.USER_GROUP_PLACEMENT_RULE);
|
||||
conf.setQueuePlacementRules(queuePlacementRules);
|
||||
|
||||
setupQueueMappings(conf);
|
||||
|
||||
MockRM newMockRM = new MockRM(conf);
|
||||
RMNodeLabelsManager mgr = setupNodeLabelManager(conf);
|
||||
MockRM newMockRM = new MockRM(conf) {
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
return mgr;
|
||||
}
|
||||
};
|
||||
newMockRM.start();
|
||||
((CapacityScheduler) newMockRM.getResourceScheduler()).start();
|
||||
setupNodes(newMockRM);
|
||||
return newMockRM;
|
||||
}
|
||||
|
||||
@ -390,6 +441,21 @@ static String getQueueMapping(String parentQueue, String leafQueue) {
|
||||
return parentQueue + DOT + leafQueue;
|
||||
}
|
||||
|
||||
protected RMNodeLabelsManager setupNodeLabelManager(
|
||||
CapacitySchedulerConfiguration conf) throws IOException {
|
||||
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
|
||||
mgr.init(conf);
|
||||
mgr.addToCluserNodeLabelsWithDefaultExclusivity(
|
||||
ImmutableSet.of(NODEL_LABEL_SSD, NODEL_LABEL_GPU));
|
||||
mgr.addLabelsToNode(ImmutableMap
|
||||
.of(NodeId.newInstance("h1", 0),
|
||||
TestUtils.toSet(NODEL_LABEL_SSD)));
|
||||
mgr.addLabelsToNode(ImmutableMap
|
||||
.of(NodeId.newInstance("h2", 0),
|
||||
TestUtils.toSet(NODEL_LABEL_GPU)));
|
||||
return mgr;
|
||||
}
|
||||
|
||||
protected ApplicationAttemptId submitApp(CapacityScheduler newCS, String user,
|
||||
String queue, String parentQueue) {
|
||||
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
|
||||
@ -460,8 +526,19 @@ protected void validateInitialQueueEntitlement(
|
||||
AutoCreatedLeafQueue leafQueue =
|
||||
(AutoCreatedLeafQueue) capacityScheduler.getQueue(leafQueueName);
|
||||
|
||||
Map<String, QueueEntitlement> expectedEntitlements = new HashMap<>();
|
||||
QueueCapacities cap = autoCreateEnabledParentQueue.getLeafQueueTemplate()
|
||||
.getQueueCapacities();
|
||||
|
||||
for (String label : accessibleNodeLabelsOnC) {
|
||||
validateCapacitiesByLabel(autoCreateEnabledParentQueue, leafQueue, label);
|
||||
|
||||
QueueEntitlement expectedEntitlement = new QueueEntitlement(
|
||||
cap.getCapacity(label), cap.getMaximumCapacity(label));
|
||||
|
||||
expectedEntitlements.put(label, expectedEntitlement);
|
||||
|
||||
validateEffectiveMinResource(leafQueue, label, expectedEntitlements);
|
||||
}
|
||||
|
||||
assertEquals(true, policy.isActive(leafQueue));
|
||||
@ -480,6 +557,28 @@ protected void validateCapacitiesByLabel(
|
||||
leafQueue.getQueueCapacities().getMaximumCapacity(label), EPSILON);
|
||||
}
|
||||
|
||||
protected void validateEffectiveMinResource(CSQueue leafQueue,
|
||||
String label, Map<String, QueueEntitlement> expectedQueueEntitlements) {
|
||||
ManagedParentQueue parentQueue = (ManagedParentQueue) leafQueue.getParent();
|
||||
|
||||
Resource resourceByLabel = mockRM.getRMContext().getNodeLabelManager().
|
||||
getResourceByLabel(label, cs.getClusterResource());
|
||||
Resource effMinCapacity = Resources.multiply(resourceByLabel,
|
||||
expectedQueueEntitlements.get(label).getCapacity() * parentQueue
|
||||
.getQueueCapacities().getAbsoluteCapacity(label));
|
||||
assertEquals(effMinCapacity, Resources.multiply(resourceByLabel,
|
||||
leafQueue.getQueueCapacities().getAbsoluteCapacity(label)));
|
||||
assertEquals(effMinCapacity, leafQueue.getEffectiveCapacity(label));
|
||||
|
||||
if (leafQueue.getQueueCapacities().getAbsoluteCapacity(label) > 0) {
|
||||
assertTrue(Resources
|
||||
.greaterThan(cs.getResourceCalculator(), cs.getClusterResource(),
|
||||
effMinCapacity, Resources.none()));
|
||||
} else{
|
||||
assertTrue(Resources.equals(effMinCapacity, Resources.none()));
|
||||
}
|
||||
}
|
||||
|
||||
protected void validateActivatedQueueEntitlement(CSQueue parentQueue,
|
||||
String leafQueueName, float expectedTotalChildQueueAbsCapacity,
|
||||
List<QueueManagementChange> queueManagementChanges)
|
||||
@ -552,6 +651,8 @@ private void validateQueueEntitlementChangesForLeafQueue(CSQueue leafQueue,
|
||||
QueueEntitlement expectedQueueEntitlement,
|
||||
final List<QueueManagementChange> queueEntitlementChanges) {
|
||||
boolean found = false;
|
||||
|
||||
Map<String, QueueEntitlement> expectedQueueEntitlements = new HashMap<>();
|
||||
for (QueueManagementChange entitlementChange : queueEntitlementChanges) {
|
||||
if (leafQueue.getQueueName().equals(
|
||||
entitlementChange.getQueue().getQueueName())) {
|
||||
@ -565,6 +666,9 @@ private void validateQueueEntitlementChangesForLeafQueue(CSQueue leafQueue,
|
||||
updatedQueueTemplate.getQueueCapacities()
|
||||
.getMaximumCapacity(label));
|
||||
assertEquals(expectedQueueEntitlement, newEntitlement);
|
||||
expectedQueueEntitlements.put(label, expectedQueueEntitlement);
|
||||
validateEffectiveMinResource(leafQueue, label,
|
||||
expectedQueueEntitlements);
|
||||
}
|
||||
found = true;
|
||||
break;
|
||||
|
@ -24,6 +24,7 @@
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.api.records.QueueState;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
@ -64,6 +65,8 @@
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security
|
||||
.RMContainerTokenSecretManager;
|
||||
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -91,12 +94,19 @@ public class TestCapacitySchedulerAutoQueueCreation
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
TestCapacitySchedulerAutoQueueCreation.class);
|
||||
|
||||
private static final Resource TEMPLATE_MAX_RES = Resource.newInstance(16 *
|
||||
GB,
|
||||
48);
|
||||
private static final Resource TEMPLATE_MIN_RES = Resource.newInstance(1638,
|
||||
4);
|
||||
|
||||
|
||||
@Test(timeout = 10000)
|
||||
public void testAutoCreateLeafQueueCreation() throws Exception {
|
||||
|
||||
try {
|
||||
// submit an app
|
||||
submitApp(cs, USER0, USER0, PARENT_QUEUE);
|
||||
submitApp(mockRM, cs.getQueue(PARENT_QUEUE), USER0, USER0, 1, 1);
|
||||
|
||||
// check preconditions
|
||||
List<ApplicationAttemptId> appsInC = cs.getAppsInQueue(PARENT_QUEUE);
|
||||
@ -419,7 +429,7 @@ public void testParentQueueUpdateInQueueMappingFailsAfterAutoCreation()
|
||||
|
||||
@Test
|
||||
public void testAutoCreationFailsWhenParentCapacityExceeded()
|
||||
throws IOException, SchedulerDynamicEditException {
|
||||
throws Exception {
|
||||
MockRM newMockRM = setupSchedulerInstance();
|
||||
CapacityScheduler newCS =
|
||||
(CapacityScheduler) newMockRM.getResourceScheduler();
|
||||
@ -468,11 +478,6 @@ public void testAutoCreationFailsWhenParentCapacityExceeded()
|
||||
public void testAutoCreatedQueueActivationDeactivation() throws Exception {
|
||||
|
||||
try {
|
||||
String host = "127.0.0.1";
|
||||
RMNode node = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1,
|
||||
host);
|
||||
cs.handle(new NodeAddedSchedulerEvent(node));
|
||||
|
||||
CSQueue parentQueue = cs.getQueue(PARENT_QUEUE);
|
||||
|
||||
//submit app1 as USER1
|
||||
@ -530,6 +535,100 @@ public void testAutoCreatedQueueActivationDeactivation() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterResourceUpdationOnAutoCreatedLeafQueues() throws
|
||||
Exception {
|
||||
|
||||
MockRM newMockRM = setupSchedulerInstance();
|
||||
try {
|
||||
CapacityScheduler newCS =
|
||||
(CapacityScheduler) newMockRM.getResourceScheduler();
|
||||
|
||||
CSQueue parentQueue = newCS.getQueue(PARENT_QUEUE);
|
||||
|
||||
//submit app1 as USER1
|
||||
submitApp(newMockRM, parentQueue, USER1, USER1, 1, 1);
|
||||
validateInitialQueueEntitlement(newCS, parentQueue, USER1, 0.1f);
|
||||
CSQueue user1LeafQueue = newCS.getQueue(USER1);
|
||||
|
||||
//submit another app2 as USER2
|
||||
submitApp(newMockRM, parentQueue, USER2, USER2, 2, 1);
|
||||
validateInitialQueueEntitlement(newCS, parentQueue, USER2, 0.2f);
|
||||
CSQueue user2LeafQueue = newCS.getQueue(USER2);
|
||||
|
||||
//validate total activated abs capacity remains the same
|
||||
GuaranteedOrZeroCapacityOverTimePolicy autoCreatedQueueManagementPolicy =
|
||||
(GuaranteedOrZeroCapacityOverTimePolicy) ((ManagedParentQueue)
|
||||
parentQueue)
|
||||
.getAutoCreatedQueueManagementPolicy();
|
||||
assertEquals(autoCreatedQueueManagementPolicy
|
||||
.getAbsoluteActivatedChildQueueCapacity(), 0.2f, EPSILON);
|
||||
|
||||
//submit user_3 app. This cant be scheduled since there is no capacity
|
||||
submitApp(newMockRM, parentQueue, USER3, USER3, 3, 1);
|
||||
final CSQueue user3LeafQueue = newCS.getQueue(USER3);
|
||||
validateCapacities((AutoCreatedLeafQueue) user3LeafQueue, 0.0f, 0.0f,
|
||||
1.0f, 1.0f);
|
||||
|
||||
assertEquals(autoCreatedQueueManagementPolicy
|
||||
.getAbsoluteActivatedChildQueueCapacity(), 0.2f, EPSILON);
|
||||
|
||||
// add new NM.
|
||||
newMockRM.registerNode("127.0.0.3:1234", 125 * GB, 20);
|
||||
|
||||
// There will be change in effective resource when nodes are added
|
||||
// since we deal with percentages
|
||||
|
||||
Resource MAX_RES = Resources.addTo(TEMPLATE_MAX_RES,
|
||||
Resources.createResource(125 * GB, 20));
|
||||
|
||||
Resource MIN_RES = Resources.createResource(14438, 6);
|
||||
|
||||
Assert.assertEquals("Effective Min resource for USER3 is not correct",
|
||||
Resources.none(),
|
||||
user3LeafQueue.getQueueResourceQuotas().getEffectiveMinResource());
|
||||
Assert.assertEquals("Effective Max resource for USER3 is not correct",
|
||||
MAX_RES,
|
||||
user3LeafQueue.getQueueResourceQuotas().getEffectiveMaxResource());
|
||||
|
||||
Assert.assertEquals("Effective Min resource for USER2 is not correct",
|
||||
MIN_RES,
|
||||
user1LeafQueue.getQueueResourceQuotas().getEffectiveMinResource());
|
||||
Assert.assertEquals("Effective Max resource for USER2 is not correct",
|
||||
MAX_RES,
|
||||
user1LeafQueue.getQueueResourceQuotas().getEffectiveMaxResource());
|
||||
|
||||
Assert.assertEquals("Effective Min resource for USER1 is not correct",
|
||||
MIN_RES,
|
||||
user2LeafQueue.getQueueResourceQuotas().getEffectiveMinResource());
|
||||
Assert.assertEquals("Effective Max resource for USER1 is not correct",
|
||||
MAX_RES,
|
||||
user2LeafQueue.getQueueResourceQuotas().getEffectiveMaxResource());
|
||||
|
||||
// unregister one NM.
|
||||
newMockRM.unRegisterNode(nm3);
|
||||
Resource MIN_RES_UPDATED = Resources.createResource(12800, 2);
|
||||
Resource MAX_RES_UPDATED = Resources.createResource(128000, 20);
|
||||
|
||||
// After loosing one NM, resources will reduce
|
||||
Assert.assertEquals("Effective Min resource for USER2 is not correct",
|
||||
MIN_RES_UPDATED,
|
||||
user1LeafQueue.getQueueResourceQuotas().getEffectiveMinResource());
|
||||
Assert.assertEquals("Effective Max resource for USER2 is not correct",
|
||||
MAX_RES_UPDATED,
|
||||
user2LeafQueue.getQueueResourceQuotas().getEffectiveMaxResource());
|
||||
|
||||
} finally {
|
||||
cleanupQueue(USER1);
|
||||
cleanupQueue(USER2);
|
||||
cleanupQueue(USER3);
|
||||
if (newMockRM != null) {
|
||||
((CapacityScheduler) newMockRM.getResourceScheduler()).stop();
|
||||
newMockRM.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAutoCreatedQueueInheritsNodeLabels() throws Exception {
|
||||
|
||||
@ -559,11 +658,6 @@ public void testReinitializeQueuesWithAutoCreatedLeafQueues()
|
||||
(CapacityScheduler) newMockRM.getResourceScheduler();
|
||||
CapacitySchedulerConfiguration conf = newCS.getConfiguration();
|
||||
|
||||
String host = "127.0.0.1";
|
||||
RMNode node = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1,
|
||||
host);
|
||||
newCS.handle(new NodeAddedSchedulerEvent(node));
|
||||
|
||||
CSQueue parentQueue = newCS.getQueue(PARENT_QUEUE);
|
||||
|
||||
//submit app1 as USER1
|
||||
|
Loading…
Reference in New Issue
Block a user