From b8102dbdf8b4dc2e99bc7c58f4085a7313830a2d Mon Sep 17 00:00:00 2001 From: Mahadev Konar Date: Fri, 7 Oct 2011 05:27:36 +0000 Subject: [PATCH] MAPREDUCE-2794. [MR-279] Incorrect metrics value for AvailableGB per queue per user. (John George via mahadev) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1179936 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../scheduler/capacity/LeafQueue.java | 23 +++++---- .../scheduler/capacity/TestLeafQueue.java | 50 +++++++++++++++++++ .../scheduler/capacity/TestParentQueue.java | 11 +++- 4 files changed, 76 insertions(+), 11 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index cdd954d670..b7be593bdf 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -1531,6 +1531,9 @@ Release 0.23.0 - Unreleased MAPREDUCE-2913. Fixed TestMRJobs.testFailingMapper to assert the correct TaskCompletionEventStatus. (Jonathan Eagles via vinodkv) + MAPREDUCE-2794. [MR-279] Incorrect metrics value for AvailableGB per + queue per user. (John George via mahadev) + Release 0.22.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index db597447a8..7e1c1befc8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -740,7 +740,7 @@ private synchronized SchedulerApp getApplication( // Book-keeping allocateResource(clusterResource, - application.getUser(), assignedResource); + application, assignedResource); // Reset scheduling opportunities application.resetSchedulingOpportunities(priority); @@ -810,7 +810,7 @@ private synchronized boolean assignToQueue(Resource clusterResource, private void setUserResourceLimit(SchedulerApp application, Resource resourceLimit) { application.setAvailableResourceLimit(resourceLimit); - metrics.setAvailableResourcesToUser(application.getUser(), resourceLimit); + metrics.setAvailableResourcesToUser(application.getUser(), application.getHeadroom()); } private int roundUp(int memory) { @@ -1216,7 +1216,7 @@ public void completedContainer(Resource clusterResource, // Book-keeping releaseResource(clusterResource, - application.getUser(), container.getResource()); + application, container.getResource()); LOG.info("completedContainer" + " container=" + container + @@ -1234,32 +1234,35 @@ public void completedContainer(Resource clusterResource, } synchronized void allocateResource(Resource clusterResource, - String userName, Resource resource) { + SchedulerApp application, Resource resource) { // Update queue metrics Resources.addTo(usedResources, resource); updateResource(clusterResource); ++numContainers; // Update user metrics + String userName = application.getUser(); User user = getUser(userName); user.assignContainer(resource); - + metrics.setAvailableResourcesToUser(userName, application.getHeadroom()); LOG.info(getQueueName() + " used=" + usedResources + " numContainers=" + numContainers + " user=" + userName + " resources=" + user.getConsumedResources()); } synchronized void releaseResource(Resource clusterResource, - String userName, Resource resource) { + SchedulerApp application, Resource resource) { // Update queue metrics Resources.subtractFrom(usedResources, resource); updateResource(clusterResource); --numContainers; // Update user metrics + String userName = application.getUser(); User user = getUser(userName); user.releaseContainer(resource); - + metrics.setAvailableResourcesToUser(userName, application.getHeadroom()); + LOG.info(getQueueName() + " used=" + usedResources + " numContainers=" + numContainers + " user=" + userName + " resources=" + user.getConsumedResources()); @@ -1282,9 +1285,9 @@ private synchronized void updateResource(Resource clusterResource) { usedResources.getMemory() / (clusterResource.getMemory() * capacity)); Resource resourceLimit = - Resources.createResource((int)queueLimit); + Resources.createResource(roundUp((int)queueLimit)); metrics.setAvailableResourcesToQueue( - Resources.subtractFrom(resourceLimit, usedResources)); + Resources.subtractFrom(resourceLimit, usedResources)); } @Override @@ -1340,7 +1343,7 @@ public void recoverContainer(Resource clusterResource, SchedulerApp application, Container container) { // Careful! Locking order is important! synchronized (this) { - allocateResource(clusterResource, application.getUser(), container.getResource()); + allocateResource(clusterResource, application, container.getResource()); } parent.recoverContainer(clusterResource, application, container); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 639daf9e5a..a7dd940de0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -158,6 +158,52 @@ public Container answer(InvocationOnMock invocation) return queue; } + + @Test + public void testSingleQueueOneUserMetrics() throws Exception { + + // Manipulate queue 'a' + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(B)); + + // Users + final String user_0 = "user_0"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + SchedulerApp app_0 = + new SchedulerApp(appAttemptId_0, user_0, a, rmContext, null); + a.submitApplication(app_0, user_0, B); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + SchedulerApp app_1 = + new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null); + a.submitApplication(app_1, user_0, B); // same user + + + // Setup some nodes + String host_0 = "host_0"; + SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); + + final int numNodes = 1; + Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + // Setup resource-requests + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(RMNodeImpl.ANY, 1*GB, 3, priority, + recordFactory))); + + // Start testing... + + // Only 1 container + a.assignContainers(clusterResource, node_0); + assertEquals(7, a.getMetrics().getAvailableGB()); + } + + @Test public void testSingleQueueWithOneUser() throws Exception { @@ -180,6 +226,7 @@ public void testSingleQueueWithOneUser() throws Exception { new SchedulerApp(appAttemptId_1, user_0, a, rmContext, null); a.submitApplication(app_1, user_0, A); // same user + // Setup some nodes String host_0 = "host_0"; SchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); @@ -207,6 +254,7 @@ public void testSingleQueueWithOneUser() throws Exception { assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0, a.getMetrics().getReservedGB()); assertEquals(1, a.getMetrics().getAllocatedGB()); + assertEquals(0, a.getMetrics().getAvailableGB()); // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit @@ -273,6 +321,7 @@ public void testSingleQueueWithOneUser() throws Exception { assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0, a.getMetrics().getReservedGB()); assertEquals(0, a.getMetrics().getAllocatedGB()); + assertEquals(1, a.getMetrics().getAvailableGB()); } @Test @@ -494,6 +543,7 @@ public void testReservation() throws Exception { assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0, a.getMetrics().getReservedGB()); assertEquals(1, a.getMetrics().getAllocatedGB()); + assertEquals(0, a.getMetrics().getAvailableGB()); // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 5a82afa1d5..791d5de1ab 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.junit.After; @@ -81,6 +82,13 @@ private void setupSingleLevelQueues(CapacitySchedulerConfiguration conf) { LOG.info("Setup top-level queues a and b"); } + private SchedulerApp getMockApplication(int appId, String user) { + SchedulerApp application = mock(SchedulerApp.class); + doReturn(user).when(application).getUser(); + doReturn(null).when(application).getHeadroom(); + return application; + } + private void stubQueueAllocation(final CSQueue queue, final Resource clusterResource, final SchedulerNode node, final int allocation) { @@ -100,7 +108,8 @@ public Resource answer(InvocationOnMock invocation) throws Throwable { ((ParentQueue)queue).allocateResource(clusterResource, allocatedResource); } else { - ((LeafQueue)queue).allocateResource(clusterResource, "", + SchedulerApp app1 = getMockApplication(0, ""); + ((LeafQueue)queue).allocateResource(clusterResource, app1, allocatedResource); }