diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsEntry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsEntry.java index 702d6f0d1a..905ceb64b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsEntry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsEntry.java @@ -44,6 +44,11 @@ public NodeToLabelsEntry(String nodeId, ArrayList labels) { this.labels = labels; } + public NodeToLabelsEntry(String nodeId, Collection pLabels) { + this.nodeId = nodeId; + this.labels.addAll(pLabels); + } + public String getNodeId() { return nodeId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java index 8806cbb9ae..d3dd7bab11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java @@ -147,6 +147,10 @@ public final class RouterMetrics { private MutableGaugeInt numRefreshSuperUserGroupsConfigurationFailedRetrieved; @Metric("# of refreshUserToGroupsMappings failed to be retrieved") private MutableGaugeInt numRefreshUserToGroupsMappingsFailedRetrieved; + @Metric("# of replaceLabelsOnNodes failed to be retrieved") + private MutableGaugeInt numReplaceLabelsOnNodesFailedRetrieved; + @Metric("# of replaceLabelsOnNode failed to be retrieved") + private MutableGaugeInt numReplaceLabelsOnNodeFailedRetrieved; @Metric("# of addToClusterNodeLabels failed to be retrieved") private MutableGaugeInt numAddToClusterNodeLabelsFailedRetrieved; @Metric("# of removeFromClusterNodeLabels failed to be retrieved") @@ -257,6 +261,10 @@ public final class RouterMetrics { private MutableRate totalSucceededRefreshSuperUserGroupsConfigurationRetrieved; @Metric("Total number of successful Retrieved RefreshUserToGroupsMappings and latency(ms)") private MutableRate totalSucceededRefreshUserToGroupsMappingsRetrieved; + @Metric("Total number of successful Retrieved ReplaceLabelsOnNodes and latency(ms)") + private MutableRate totalSucceededReplaceLabelsOnNodesRetrieved; + @Metric("Total number of successful Retrieved ReplaceLabelsOnNode and latency(ms)") + private MutableRate totalSucceededReplaceLabelsOnNodeRetrieved; @Metric("Total number of successful Retrieved GetSchedulerInfo and latency(ms)") private MutableRate totalSucceededGetSchedulerInfoRetrieved; @Metric("Total number of successful Retrieved AddToClusterNodeLabels and latency(ms)") @@ -320,6 +328,8 @@ public final class RouterMetrics { private MutableQuantiles getSchedulerInfoRetrievedLatency; private MutableQuantiles refreshSuperUserGroupsConfLatency; private MutableQuantiles refreshUserToGroupsMappingsLatency; + private MutableQuantiles replaceLabelsOnNodesLatency; + private MutableQuantiles replaceLabelsOnNodeLatency; private MutableQuantiles addToClusterNodeLabelsLatency; private MutableQuantiles removeFromClusterNodeLabelsLatency; @@ -514,6 +524,12 @@ private RouterMetrics() { refreshUserToGroupsMappingsLatency = registry.newQuantiles("refreshUserToGroupsMappingsLatency", "latency of refresh user to groups mappings timeouts", "ops", "latency", 10); + replaceLabelsOnNodesLatency = registry.newQuantiles("replaceLabelsOnNodesLatency", + "latency of replace labels on nodes timeouts", "ops", "latency", 10); + + replaceLabelsOnNodeLatency = registry.newQuantiles("replaceLabelsOnNodeLatency", + "latency of replace labels on node timeouts", "ops", "latency", 10); + addToClusterNodeLabelsLatency = registry.newQuantiles("addToClusterNodeLabelsLatency", "latency of add cluster nodelabels timeouts", "ops", "latency", 10); @@ -810,6 +826,16 @@ public long getNumSucceededRefreshSuperUserGroupsConfigurationRetrieved() { return totalSucceededRefreshSuperUserGroupsConfigurationRetrieved.lastStat().numSamples(); } + @VisibleForTesting + public long getNumSucceededReplaceLabelsOnNodesRetrieved() { + return totalSucceededReplaceLabelsOnNodesRetrieved.lastStat().numSamples(); + } + + @VisibleForTesting + public long getNumSucceededReplaceLabelsOnNodeRetrieved() { + return totalSucceededReplaceLabelsOnNodeRetrieved.lastStat().numSamples(); + } + @VisibleForTesting public double getLatencySucceededAppsCreated() { return totalSucceededAppsCreated.lastStat().mean(); @@ -1080,6 +1106,16 @@ public double getLatencySucceededRefreshSuperUserGroupsConfigurationRetrieved() return totalSucceededRefreshSuperUserGroupsConfigurationRetrieved.lastStat().mean(); } + @VisibleForTesting + public double getLatencySucceededReplaceLabelsOnNodesRetrieved() { + return totalSucceededReplaceLabelsOnNodesRetrieved.lastStat().mean(); + } + + @VisibleForTesting + public double getLatencySucceededReplaceLabelsOnNodeRetrieved() { + return totalSucceededReplaceLabelsOnNodeRetrieved.lastStat().mean(); + } + @VisibleForTesting public int getAppsFailedCreated() { return numAppsFailedCreated.value(); @@ -1286,6 +1322,14 @@ public int getNumRefreshUserToGroupsMappingsFailedRetrieved() { return numRefreshUserToGroupsMappingsFailedRetrieved.value(); } + public int getNumReplaceLabelsOnNodesFailedRetrieved() { + return numReplaceLabelsOnNodesFailedRetrieved.value(); + } + + public int getNumReplaceLabelsOnNodeFailedRetrieved() { + return numReplaceLabelsOnNodeFailedRetrieved.value(); + } + public int getNumAddToClusterNodeLabelsFailedRetrieved() { return numAddToClusterNodeLabelsFailedRetrieved.value(); } @@ -1597,6 +1641,16 @@ public void succeededRefreshUserToGroupsMappingsRetrieved(long duration) { refreshUserToGroupsMappingsLatency.add(duration); } + public void succeededReplaceLabelsOnNodesRetrieved(long duration) { + totalSucceededReplaceLabelsOnNodesRetrieved.add(duration); + replaceLabelsOnNodesLatency.add(duration); + } + + public void succeededReplaceLabelsOnNodeRetrieved(long duration) { + totalSucceededReplaceLabelsOnNodeRetrieved.add(duration); + replaceLabelsOnNodeLatency.add(duration); + } + public void incrAppsFailedCreated() { numAppsFailedCreated.incr(); } @@ -1801,6 +1855,14 @@ public void incrCancelDelegationTokenFailedRetrieved() { numCancelDelegationTokenFailedRetrieved.incr(); } + public void incrReplaceLabelsOnNodesFailedRetrieved() { + numReplaceLabelsOnNodesFailedRetrieved.incr(); + } + + public void incrReplaceLabelsOnNodeFailedRetrieved() { + numReplaceLabelsOnNodeFailedRetrieved.incr(); + } + public void incrDumpSchedulerLogsFailedRetrieved() { numDumpSchedulerLogsFailedRetrieved.incr(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index 1c7af64585..94b4b1ca25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -118,6 +118,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDefinitionInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntry; import org.apache.hadoop.yarn.server.router.RouterMetrics; import org.apache.hadoop.yarn.server.router.RouterServerUtil; import org.apache.hadoop.yarn.server.router.clientrm.ClientMethod; @@ -1539,16 +1540,130 @@ public LabelsToNodesInfo getLabelsToNodes(Set labels) "getLabelsToNodes by labels = %s Failed.", StringUtils.join(labels, ",")); } + /** + * This method replaces all the node labels for specific nodes, and it is + * reachable by using {@link RMWSConsts#REPLACE_NODE_TO_LABELS}. + * + * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode + * @param newNodeToLabels the list of new labels. It is a content param. + * @param hsr the servlet request + * @return Response containing the status code + * @throws IOException if an exception happened + */ @Override public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, HttpServletRequest hsr) throws IOException { - throw new NotImplementedException("Code is not implemented"); + + // Step1. Check the parameters to ensure that the parameters are not empty. + if (newNodeToLabels == null) { + routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved(); + throw new IllegalArgumentException("Parameter error, newNodeToLabels must not be empty."); + } + List nodeToLabelsEntries = newNodeToLabels.getNodeToLabels(); + if (CollectionUtils.isEmpty(nodeToLabelsEntries)) { + routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved(); + throw new IllegalArgumentException("Parameter error, " + + "nodeToLabelsEntries must not be empty."); + } + + try { + + // Step2. We map the NodeId and NodeToLabelsEntry in the request. + Map nodeIdToLabels = new HashMap<>(); + newNodeToLabels.getNodeToLabels().stream().forEach(nodeIdToLabel -> { + String nodeId = nodeIdToLabel.getNodeId(); + nodeIdToLabels.put(nodeId, nodeIdToLabel); + }); + + // Step3. We map SubCluster with NodeToLabelsEntryList + Map subClusterToNodeToLabelsEntryList = + new HashMap<>(); + nodeIdToLabels.forEach((nodeId, nodeToLabelsEntry) -> { + SubClusterInfo subClusterInfo = getNodeSubcluster(nodeId); + NodeToLabelsEntryList nodeToLabelsEntryList = subClusterToNodeToLabelsEntryList. + getOrDefault(subClusterInfo, new NodeToLabelsEntryList()); + nodeToLabelsEntryList.getNodeToLabels().add(nodeToLabelsEntry); + subClusterToNodeToLabelsEntryList.put(subClusterInfo, nodeToLabelsEntryList); + }); + + // Step4. Traverse the subCluster and call the replaceLabelsOnNodes interface. + long startTime = clock.getTime(); + final HttpServletRequest hsrCopy = clone(hsr); + StringBuilder builder = new StringBuilder(); + subClusterToNodeToLabelsEntryList.forEach((subCluster, nodeToLabelsEntryList) -> { + SubClusterId subClusterId = subCluster.getSubClusterId(); + try { + DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorForSubCluster( + subCluster.getSubClusterId(), subCluster.getRMWebServiceAddress()); + interceptor.replaceLabelsOnNodes(nodeToLabelsEntryList, hsrCopy); + builder.append("subCluster-").append(subClusterId.getId()).append(":Success,"); + } catch (Exception e) { + LOG.error("replaceLabelsOnNodes Failed. subClusterId = {}.", subClusterId, e); + builder.append("subCluster-").append(subClusterId.getId()).append(":Failed,"); + } + }); + long stopTime = clock.getTime(); + routerMetrics.succeededReplaceLabelsOnNodesRetrieved(stopTime - startTime); + + // Step5. return call result. + return Response.status(Status.OK).entity(builder.toString()).build(); + } catch (NotFoundException e) { + routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved(); + throw e; + } catch (Exception e) { + routerMetrics.incrReplaceLabelsOnNodesFailedRetrieved(); + throw e; + } } + /** + * This method replaces all the node labels for specific node, and it is + * reachable by using {@link RMWSConsts#NODES_NODEID_REPLACE_LABELS}. + * + * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode + * @param newNodeLabelsName the list of new labels. It is a QueryParam. + * @param hsr the servlet request + * @param nodeId the node we want to replace the node labels. It is a + * PathParam. + * @return Response containing the status code + * @throws Exception if an exception happened + */ @Override public Response replaceLabelsOnNode(Set newNodeLabelsName, HttpServletRequest hsr, String nodeId) throws Exception { - throw new NotImplementedException("Code is not implemented"); + + // Step1. Check the parameters to ensure that the parameters are not empty. + if (StringUtils.isBlank(nodeId)) { + routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved(); + throw new IllegalArgumentException("Parameter error, nodeId must not be null or empty."); + } + if (CollectionUtils.isEmpty(newNodeLabelsName)) { + routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved(); + throw new IllegalArgumentException("Parameter error, newNodeLabelsName must not be empty."); + } + + try { + // Step2. We find the subCluster according to the nodeId, + // and then call the replaceLabelsOnNode of the subCluster. + long startTime = clock.getTime(); + SubClusterInfo subClusterInfo = getNodeSubcluster(nodeId); + DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorForSubCluster( + subClusterInfo.getSubClusterId(), subClusterInfo.getRMWebServiceAddress()); + final HttpServletRequest hsrCopy = clone(hsr); + interceptor.replaceLabelsOnNode(newNodeLabelsName, hsrCopy, nodeId); + + // Step3. Return the response result. + long stopTime = clock.getTime(); + routerMetrics.succeededReplaceLabelsOnNodeRetrieved(stopTime - startTime); + String msg = "subCluster#" + subClusterInfo.getSubClusterId().getId() + ":Success;"; + return Response.status(Status.OK).entity(msg).build(); + } catch (NotFoundException e) { + routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved(); + throw e; + } catch (Exception e){ + routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved(); + throw e; + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java index 3e45162796..db0b683760 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java @@ -534,6 +534,16 @@ public void getRenewDelegationTokenFailed() { metrics.incrRenewDelegationTokenFailedRetrieved(); } + public void getReplaceLabelsOnNodesFailed() { + LOG.info("Mocked: failed replaceLabelsOnNodes call"); + metrics.incrReplaceLabelsOnNodesFailedRetrieved(); + } + + public void getReplaceLabelsOnNodeFailed() { + LOG.info("Mocked: failed ReplaceLabelOnNode call"); + metrics.incrReplaceLabelsOnNodeFailedRetrieved(); + } + public void getDumpSchedulerLogsFailed() { LOG.info("Mocked: failed DumpSchedulerLogs call"); metrics.incrDumpSchedulerLogsFailedRetrieved(); @@ -779,6 +789,16 @@ public void getRenewDelegationTokenRetrieved(long duration) { metrics.succeededRenewDelegationTokenRetrieved(duration); } + public void getNumSucceededReplaceLabelsOnNodesRetrieved(long duration) { + LOG.info("Mocked: successful ReplaceLabelsOnNodes call with duration {}", duration); + metrics.succeededReplaceLabelsOnNodesRetrieved(duration); + } + + public void getNumSucceededReplaceLabelsOnNodeRetrieved(long duration) { + LOG.info("Mocked: successful ReplaceLabelOnNode call with duration {}", duration); + metrics.succeededReplaceLabelsOnNodeRetrieved(duration); + } + public void getDumpSchedulerLogsRetrieved(long duration) { LOG.info("Mocked: successful DumpSchedulerLogs call with duration {}", duration); metrics.succeededDumpSchedulerLogsRetrieved(duration); @@ -1633,6 +1653,52 @@ public void testRenewDelegationTokenRetrievedFailed() { metrics.getRenewDelegationTokenFailedRetrieved()); } + @Test + public void testReplaceLabelsOnNodesRetrieved() { + long totalGoodBefore = metrics.getNumSucceededReplaceLabelsOnNodesRetrieved(); + goodSubCluster.getNumSucceededReplaceLabelsOnNodesRetrieved(150); + Assert.assertEquals(totalGoodBefore + 1, + metrics.getNumSucceededReplaceLabelsOnNodesRetrieved()); + Assert.assertEquals(150, + metrics.getLatencySucceededReplaceLabelsOnNodesRetrieved(), ASSERT_DOUBLE_DELTA); + goodSubCluster.getNumSucceededReplaceLabelsOnNodesRetrieved(300); + Assert.assertEquals(totalGoodBefore + 2, + metrics.getNumSucceededReplaceLabelsOnNodesRetrieved()); + Assert.assertEquals(225, + metrics.getLatencySucceededReplaceLabelsOnNodesRetrieved(), ASSERT_DOUBLE_DELTA); + } + + @Test + public void testReplaceLabelsOnNodesRetrievedFailed() { + long totalBadBefore = metrics.getNumReplaceLabelsOnNodesFailedRetrieved(); + badSubCluster.getReplaceLabelsOnNodesFailed(); + Assert.assertEquals(totalBadBefore + 1, + metrics.getNumReplaceLabelsOnNodesFailedRetrieved()); + } + + @Test + public void testReplaceLabelsOnNodeRetrieved() { + long totalGoodBefore = metrics.getNumSucceededReplaceLabelsOnNodeRetrieved(); + goodSubCluster.getNumSucceededReplaceLabelsOnNodeRetrieved(150); + Assert.assertEquals(totalGoodBefore + 1, + metrics.getNumSucceededReplaceLabelsOnNodeRetrieved()); + Assert.assertEquals(150, + metrics.getLatencySucceededReplaceLabelsOnNodeRetrieved(), ASSERT_DOUBLE_DELTA); + goodSubCluster.getNumSucceededReplaceLabelsOnNodeRetrieved(300); + Assert.assertEquals(totalGoodBefore + 2, + metrics.getNumSucceededReplaceLabelsOnNodeRetrieved()); + Assert.assertEquals(225, + metrics.getLatencySucceededReplaceLabelsOnNodeRetrieved(), ASSERT_DOUBLE_DELTA); + } + + @Test + public void testReplaceLabelOnNodeRetrievedFailed() { + long totalBadBefore = metrics.getNumReplaceLabelsOnNodeFailedRetrieved(); + badSubCluster.getReplaceLabelsOnNodeFailed(); + Assert.assertEquals(totalBadBefore + 1, + metrics.getNumReplaceLabelsOnNodeFailedRetrieved()); + } + @Test public void testDumpSchedulerLogsRetrieved() { long totalGoodBefore = metrics.getNumSucceededDumpSchedulerLogsRetrieved(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java index 9d3223f909..653224a7d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java @@ -137,6 +137,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationRequestsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateResponseInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteResponseInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.BulkActivitiesInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts; @@ -305,9 +306,14 @@ public NodeInfo getNode(String nodeId) { if (!isRunning) { throw new RuntimeException("RM is stopped"); } - NodeInfo node = new NodeInfo(); - node.setId(nodeId); - node.setLastHealthUpdate(Integer.valueOf(getSubClusterId().getId())); + NodeInfo node = null; + SubClusterId subCluster = getSubClusterId(); + String subClusterId = subCluster.getId(); + if (nodeId.contains(subClusterId) || nodeId.contains("test")) { + node = new NodeInfo(); + node.setId(nodeId); + node.setLastHealthUpdate(Integer.valueOf(getSubClusterId().getId())); + } return node; } @@ -1236,7 +1242,17 @@ public String dumpSchedulerLogs(String time, HttpServletRequest hsr) throws IOEx return webSvc.dumpSchedulerLogs(time, hsr); } + public Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels, + HttpServletRequest hsr) throws IOException { + return super.replaceLabelsOnNodes(newNodeToLabels, hsr); + } + @Override + public Response replaceLabelsOnNode(Set newNodeLabelsName, + HttpServletRequest hsr, String nodeId) throws Exception { + return super.replaceLabelsOnNode(newNodeLabelsName, hsr, nodeId); + } + public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, String groupBy) { if (!EnumUtils.isValidEnum(RMWSConsts.ActivitiesGroupBy.class, groupBy.toUpperCase())) { String errMessage = "Got invalid groupBy: " + groupBy + ", valid groupBy types: " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java index 5ec53a63e2..a2831657dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationInterceptorREST.java @@ -99,6 +99,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.RMQueueAclInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntry; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList; import org.apache.hadoop.yarn.server.resourcemanager.webapp.NodeIDsInfo; import org.apache.hadoop.yarn.server.router.clientrm.RouterClientRMService; import org.apache.hadoop.yarn.server.router.clientrm.RouterClientRMService.RequestInterceptorChainWrapper; @@ -1786,6 +1788,102 @@ public void testCancelDelegationToken() throws Exception { Assert.assertEquals(response.getStatus(), Status.OK.getStatusCode()); } + @Test + public void testReplaceLabelsOnNodes() throws Exception { + // subCluster0 -> node0:0 -> label:NodeLabel0 + // subCluster1 -> node1:1 -> label:NodeLabel1 + // subCluster2 -> node2:2 -> label:NodeLabel2 + // subCluster3 -> node3:3 -> label:NodeLabel3 + NodeToLabelsEntryList nodeToLabelsEntryList = new NodeToLabelsEntryList(); + for (int i = 0; i < NUM_SUBCLUSTER; i++) { + // labels + List labels = new ArrayList<>(); + labels.add("NodeLabel" + i); + // nodes + String nodeId = "node" + i + ":" + i; + NodeToLabelsEntry nodeToLabelsEntry = new NodeToLabelsEntry(nodeId, labels); + List nodeToLabelsEntries = nodeToLabelsEntryList.getNodeToLabels(); + nodeToLabelsEntries.add(nodeToLabelsEntry); + } + + // one of the results: + // subCluster#0:Success;subCluster#1:Success;subCluster#3:Success;subCluster#2:Success; + // We can't confirm the complete return order. + Response response = interceptor.replaceLabelsOnNodes(nodeToLabelsEntryList, null); + Assert.assertNotNull(response); + Assert.assertEquals(200, response.getStatus()); + + Object entityObject = response.getEntity(); + Assert.assertNotNull(entityObject); + + String entityValue = String.valueOf(entityObject); + String[] entities = entityValue.split(","); + Assert.assertNotNull(entities); + Assert.assertEquals(4, entities.length); + String expectValue = + "subCluster-0:Success,subCluster-1:Success,subCluster-2:Success,subCluster-3:Success,"; + for (String entity : entities) { + Assert.assertTrue(expectValue.contains(entity)); + } + } + + @Test + public void testReplaceLabelsOnNodesError() throws Exception { + // newNodeToLabels is null + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Parameter error, newNodeToLabels must not be empty.", + () -> interceptor.replaceLabelsOnNodes(null, null)); + + // nodeToLabelsEntryList is Empty + NodeToLabelsEntryList nodeToLabelsEntryList = new NodeToLabelsEntryList(); + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Parameter error, nodeToLabelsEntries must not be empty.", + () -> interceptor.replaceLabelsOnNodes(nodeToLabelsEntryList, null)); + } + + @Test + public void testReplaceLabelsOnNode() throws Exception { + // subCluster3 -> node3:3 -> label:NodeLabel3 + String nodeId = "node3:3"; + Set labels = Collections.singleton("NodeLabel3"); + + // We expect the following result: subCluster#3:Success; + String expectValue = "subCluster#3:Success;"; + Response response = interceptor.replaceLabelsOnNode(labels, null, nodeId); + Assert.assertNotNull(response); + Assert.assertEquals(200, response.getStatus()); + + Object entityObject = response.getEntity(); + Assert.assertNotNull(entityObject); + + String entityValue = String.valueOf(entityObject); + Assert.assertNotNull(entityValue); + Assert.assertEquals(expectValue, entityValue); + } + + @Test + public void testReplaceLabelsOnNodeError() throws Exception { + // newNodeToLabels is null + String nodeId = "node3:3"; + Set labels = Collections.singleton("NodeLabel3"); + Set labelsEmpty = new HashSet<>(); + + // nodeId is null + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Parameter error, nodeId must not be null or empty.", + () -> interceptor.replaceLabelsOnNode(labels, null, null)); + + // labels is null + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Parameter error, newNodeLabelsName must not be empty.", + () -> interceptor.replaceLabelsOnNode(null, null, nodeId)); + + // labels is empty + LambdaTestUtils.intercept(IllegalArgumentException.class, + "Parameter error, newNodeLabelsName must not be empty.", + () -> interceptor.replaceLabelsOnNode(labelsEmpty, null, nodeId)); + } + @Test public void testDumpSchedulerLogs() throws Exception { HttpServletRequest mockHsr = mockHttpServletRequestByUserName("admin");