YARN-9580. Fulfilled reservation information in assignment is lost when transferring in ParentQueue#assignContainers. Contributed by Tao Yang.
This commit is contained in:
parent
59719dc560
commit
bd2590d71b
@ -631,6 +631,10 @@ public CSAssignment assignContainers(Resource clusterResource,
|
||||
assignedToChild.getRequestLocalityType());
|
||||
assignment.setExcessReservation(assignedToChild.getExcessReservation());
|
||||
assignment.setContainersToKill(assignedToChild.getContainersToKill());
|
||||
assignment.setFulfilledReservation(
|
||||
assignedToChild.isFulfilledReservation());
|
||||
assignment.setFulfilledReservedContainer(
|
||||
assignedToChild.getFulfilledReservedContainer());
|
||||
|
||||
// Done if no child-queue assigned anything
|
||||
if (Resources.greaterThan(resourceCalculator, clusterResource,
|
||||
|
@ -245,4 +245,61 @@ public void testExcessReservationWillBeUnreserved() throws Exception {
|
||||
|
||||
rm1.close();
|
||||
}
|
||||
|
||||
@Test(timeout=30000)
|
||||
public void testAllocateForReservedContainer() throws Exception {
|
||||
CapacitySchedulerConfiguration newConf =
|
||||
new CapacitySchedulerConfiguration(conf);
|
||||
newConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
|
||||
YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
|
||||
newConf.setInt(CapacitySchedulerConfiguration.MULTI_NODE_SORTING_POLICY_NAME
|
||||
+ ".resource-based.sorting-interval.ms", 0);
|
||||
newConf.setMaximumApplicationMasterResourcePerQueuePercent("root.default",
|
||||
1.0f);
|
||||
MockRM rm1 = new MockRM(newConf);
|
||||
|
||||
rm1.start();
|
||||
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
|
||||
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);
|
||||
|
||||
// launch an app to queue, AM container should be launched in nm1
|
||||
RMApp app1 = rm1.submitApp(5 * GB, "app", "user", null, "default");
|
||||
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
|
||||
|
||||
// launch another app to queue, AM container should be launched in nm2
|
||||
RMApp app2 = rm1.submitApp(5 * GB, "app", "user", null, "default");
|
||||
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
|
||||
|
||||
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
|
||||
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
|
||||
FiCaSchedulerApp schedulerApp1 =
|
||||
cs.getApplicationAttempt(am1.getApplicationAttemptId());
|
||||
FiCaSchedulerApp schedulerApp2 =
|
||||
cs.getApplicationAttempt(am2.getApplicationAttemptId());
|
||||
|
||||
/*
|
||||
* Verify that reserved container will be allocated
|
||||
* after node has sufficient resource.
|
||||
*/
|
||||
// Ask a container with 6GB memory size for app2,
|
||||
// nm1 will reserve a container for app2
|
||||
am2.allocate("*", 6 * GB, 1, new ArrayList<>());
|
||||
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
|
||||
|
||||
// Check containers of app1 and app2.
|
||||
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
|
||||
Assert.assertEquals(1, schedulerApp1.getLiveContainers().size());
|
||||
Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
|
||||
Assert.assertEquals(1, schedulerApp2.getReservedContainers().size());
|
||||
|
||||
// Kill app1 to release resource on nm1.
|
||||
rm1.killApp(app1.getApplicationId());
|
||||
|
||||
// Trigger scheduling to allocate for reserved container on nm1.
|
||||
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
|
||||
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
|
||||
Assert.assertEquals(2, schedulerApp2.getLiveContainers().size());
|
||||
|
||||
rm1.close();
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user