YARN-6674 Add memory cgroup settings for opportunistic containers. (Miklos Szegedi via Haibo Chen)
This commit is contained in:
parent
f64cfeaf61
commit
c5d256c760
@ -25,7 +25,9 @@
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ExecutionType;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
|
||||
|
||||
@ -46,6 +48,8 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
|
||||
CGroupsMemoryResourceHandlerImpl.class);
|
||||
private static final CGroupsHandler.CGroupController MEMORY =
|
||||
CGroupsHandler.CGroupController.MEMORY;
|
||||
private static final int OPPORTUNISTIC_SWAPPINESS = 100;
|
||||
private static final int OPPORTUNISTIC_SOFT_LIMIT = 0;
|
||||
|
||||
private CGroupsHandler cGroupsHandler;
|
||||
private int swappiness = 0;
|
||||
@ -85,13 +89,15 @@ public List<PrivilegedOperation> bootstrap(Configuration conf)
|
||||
+ ". Value must be between 0 and 100.");
|
||||
}
|
||||
float softLimitPerc = conf.getFloat(
|
||||
YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE,
|
||||
YarnConfiguration.DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE);
|
||||
YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE,
|
||||
YarnConfiguration.
|
||||
DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE);
|
||||
softLimit = softLimitPerc / 100.0f;
|
||||
if (softLimitPerc < 0.0f || softLimitPerc > 100.0f) {
|
||||
throw new ResourceHandlerException(
|
||||
"Illegal value '" + softLimitPerc + "' "
|
||||
+ YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE
|
||||
+ YarnConfiguration.
|
||||
NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE
|
||||
+ ". Value must be between 0 and 100.");
|
||||
}
|
||||
return null;
|
||||
@ -122,12 +128,23 @@ public List<PrivilegedOperation> preStart(Container container)
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES,
|
||||
String.valueOf(containerHardLimit) + "M");
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
|
||||
String.valueOf(containerSoftLimit) + "M");
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
|
||||
String.valueOf(swappiness));
|
||||
ContainerTokenIdentifier id = container.getContainerTokenIdentifier();
|
||||
if (id != null && id.getExecutionType() ==
|
||||
ExecutionType.OPPORTUNISTIC) {
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
|
||||
String.valueOf(OPPORTUNISTIC_SOFT_LIMIT) + "M");
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
|
||||
String.valueOf(OPPORTUNISTIC_SWAPPINESS));
|
||||
} else {
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES,
|
||||
String.valueOf(containerSoftLimit) + "M");
|
||||
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS,
|
||||
String.valueOf(swappiness));
|
||||
}
|
||||
} catch (ResourceHandlerException re) {
|
||||
cGroupsHandler.deleteCGroup(MEMORY, cgroupId);
|
||||
LOG.warn("Could not update cgroup for container", re);
|
||||
|
@ -20,8 +20,10 @@
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ExecutionType;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
|
||||
import org.junit.Before;
|
||||
@ -32,6 +34,9 @@
|
||||
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
/**
|
||||
* Unit test for CGroupsMemoryResourceHandlerImpl.
|
||||
*/
|
||||
public class TestCGroupsMemoryResourceHandlerImpl {
|
||||
|
||||
private CGroupsHandler mockCGroupsHandler;
|
||||
@ -60,8 +65,7 @@ public void testBootstrap() throws Exception {
|
||||
try {
|
||||
cGroupsMemoryResourceHandler.bootstrap(conf);
|
||||
Assert.fail("Pmem check should not be allowed to run with cgroups");
|
||||
}
|
||||
catch(ResourceHandlerException re) {
|
||||
} catch(ResourceHandlerException re) {
|
||||
// do nothing
|
||||
}
|
||||
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
|
||||
@ -69,8 +73,7 @@ public void testBootstrap() throws Exception {
|
||||
try {
|
||||
cGroupsMemoryResourceHandler.bootstrap(conf);
|
||||
Assert.fail("Vmem check should not be allowed to run with cgroups");
|
||||
}
|
||||
catch(ResourceHandlerException re) {
|
||||
} catch(ResourceHandlerException re) {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
@ -84,8 +87,7 @@ public void testSwappinessValues() throws Exception {
|
||||
try {
|
||||
cGroupsMemoryResourceHandler.bootstrap(conf);
|
||||
Assert.fail("Negative values for swappiness should not be allowed.");
|
||||
}
|
||||
catch (ResourceHandlerException re) {
|
||||
} catch (ResourceHandlerException re) {
|
||||
// do nothing
|
||||
}
|
||||
try {
|
||||
@ -93,8 +95,7 @@ public void testSwappinessValues() throws Exception {
|
||||
cGroupsMemoryResourceHandler.bootstrap(conf);
|
||||
Assert.fail("Values greater than 100 for swappiness"
|
||||
+ " should not be allowed.");
|
||||
}
|
||||
catch (ResourceHandlerException re) {
|
||||
} catch (ResourceHandlerException re) {
|
||||
// do nothing
|
||||
}
|
||||
conf.setInt(YarnConfiguration.NM_MEMORY_RESOURCE_CGROUPS_SWAPPINESS, 60);
|
||||
@ -169,4 +170,32 @@ public void testPostComplete() throws Exception {
|
||||
public void testTeardown() throws Exception {
|
||||
Assert.assertNull(cGroupsMemoryResourceHandler.teardown());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOpportunistic() throws Exception {
|
||||
Configuration conf = new YarnConfiguration();
|
||||
conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
|
||||
conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
|
||||
|
||||
cGroupsMemoryResourceHandler.bootstrap(conf);
|
||||
ContainerTokenIdentifier tokenId = mock(ContainerTokenIdentifier.class);
|
||||
when(tokenId.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
|
||||
Container container = mock(Container.class);
|
||||
String id = "container_01_01";
|
||||
ContainerId mockContainerId = mock(ContainerId.class);
|
||||
when(mockContainerId.toString()).thenReturn(id);
|
||||
when(container.getContainerId()).thenReturn(mockContainerId);
|
||||
when(container.getContainerTokenIdentifier()).thenReturn(tokenId);
|
||||
when(container.getResource()).thenReturn(Resource.newInstance(1024, 2));
|
||||
cGroupsMemoryResourceHandler.preStart(container);
|
||||
verify(mockCGroupsHandler, times(1))
|
||||
.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, "0M");
|
||||
verify(mockCGroupsHandler, times(1))
|
||||
.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, "100");
|
||||
verify(mockCGroupsHandler, times(1))
|
||||
.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id,
|
||||
CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M");
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user