YARN-8179: Preemption does not happen due to natural_termination_factor when DRF is used. Contributed by Kyungwan Nam.

This commit is contained in:
Eric E Payne 2018-05-21 20:14:58 +00:00
parent 132a547dea
commit 0b4c44bdee
2 changed files with 61 additions and 2 deletions

View File

@ -197,8 +197,11 @@ private void calculateResToObtainByPartitionForLeafQueues(
*/ */
Resource resToObtain = qT.toBePreempted; Resource resToObtain = qT.toBePreempted;
if (!isReservedPreemptionCandidatesSelector) { if (!isReservedPreemptionCandidatesSelector) {
resToObtain = Resources.multiply(qT.toBePreempted, if (Resources.greaterThan(rc, clusterResource, resToObtain,
context.getNaturalTerminationFactor()); Resource.newInstance(0, 0))) {
resToObtain = Resources.multiplyAndNormalizeUp(rc, qT.toBePreempted,
context.getNaturalTerminationFactor(), Resource.newInstance(1, 1));
}
} }
// Only add resToObtain when it >= 0 // Only add resToObtain when it >= 0

View File

@ -18,15 +18,28 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.never; import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF public class TestProportionalCapacityPreemptionPolicyInterQueueWithDRF
extends ProportionalCapacityPreemptionPolicyMockFramework { extends ProportionalCapacityPreemptionPolicyMockFramework {
@Before
public void setup() {
super.setup();
rc = new DominantResourceCalculator();
when(cs.getResourceCalculator()).thenReturn(rc);
policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
}
@Test @Test
public void testInterQueuePreemptionWithMultipleResource() public void testInterQueuePreemptionWithMultipleResource()
throws Exception { throws Exception {
@ -65,4 +78,47 @@ public void testInterQueuePreemptionWithMultipleResource()
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(2)))); getAppAttemptId(2))));
} }
@Test
public void testInterQueuePreemptionWithNaturalTerminationFactor()
throws Exception {
/**
* Queue structure is:
*
* <pre>
* root
* / \
* a b
* </pre>
*
* Guaranteed resource of a/b are 50:50 Total cluster resource = 100
* Scenario: All resources are allocated to Queue A.
* Even though Queue B needs few resources like 1 VCore, some resources
* must be preempted from the app which is running in Queue A.
*/
conf.setFloat(
CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
(float) 0.2);
String labelsConfig = "=100:50,true;";
String nodesConfig = // n1 has no label
"n1= res=100:50";
String queuesConfig =
// guaranteed,max,used,pending
"root(=[100:50 100:50 50:50 0:0]);" + // root
"-a(=[50:25 100:50 50:50 0:0]);" + // a
"-b(=[50:25 50:25 0:0 2:1]);"; // b
String appsConfig =
//queueName\t(priority,resource,host,expression,#repeat,reserved)
"a\t(1,2:1,n1,,50,false);"; // app1 in a
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
verify(mDisp, times(1)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
} }