diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
index 8db090062e..4d93b11a04 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
@@ -91,13 +91,19 @@
${project.artifactId}-${project.version}.jar
${project.artifactId}-${project.version}-tests.jar
- ${project.artifactId}-${project.version}-sources.jar
- ${project.artifactId}-${project.version}-test-sources.jar
hadoop-tools-dist-*.jar
+
+ ${project.build.directory}
+ /share/hadoop/${hadoop.component}/sources
+
+ ${project.artifactId}-${project.version}-sources.jar
+ ${project.artifactId}-${project.version}-test-sources.jar
+
+
${basedir}/dev-support/jdiff
/share/hadoop/${hadoop.component}/jdiff
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 87cc2496c4..bea151a7c5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -146,8 +146,9 @@ Trunk (Unreleased)
HADOOP-9162. Add utility to check native library availability.
(Binglin Chang via suresh)
- HADOOP-8924. Add maven plugin alternative to shell script to save
- package-info.java. (Chris Nauroth via suresh)
+ HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
+
+ HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
BUG FIXES
@@ -319,24 +320,60 @@ Trunk (Unreleased)
HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
a new module to the build (Chris Nauroth via bobby)
- HADOOP-9245. mvn clean without running mvn install before fails.
- (Karthik Kambatla via suresh)
-
HADOOP-9249. hadoop-maven-plugins version-info goal causes build failure
when running with Clover. (Chris Nauroth via suresh)
+ HADOOP-9264. Port change to use Java untar API on Windows from
+ branch-1-win to trunk. (Chris Nauroth via suresh)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
+ HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
+
+Release 2.0.4-beta - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
+
+ IMPROVEMENTS
+
+ HADOOP-9253. Capture ulimit info in the logs at service start time.
+ (Arpit Gupta via suresh)
+
+ HADOOP-8924. Add maven plugin alternative to shell script to save
+ package-info.java. (Chris Nauroth via suresh)
+
+ HADOOP-9117. replace protoc ant plugin exec with a maven plugin. (tucu)
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
+ HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
+
+ HADOOP-9305. Add support for running the Hadoop client on 64-bit AIX. (atm)
+
+ HADOOP-9245. mvn clean without running mvn install before fails.
+ (Karthik Kambatla via suresh)
+
HADOOP-9246 Execution phase for hadoop-maven-plugin should be
process-resources (Karthik Kambatla and Chris Nauroth via jlowe)
- HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
+ HADOOP-9297. remove old record IO generation and tests. (tucu)
-Release 2.0.3-alpha - Unreleased
+ HADOOP-9154. SortedMapWritable#putAll() doesn't add key/value classes to
+ the map. (Karthik Kambatla via tomwhite)
+
+ HADOOP-9304. remove addition of avro genreated-sources dirs to build. (tucu)
+
+Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES
@@ -464,6 +501,9 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9231. Parametrize staging URL for the uniformity of
distributionManagement. (Konstantin Boudnik via suresh)
+ HADOOP-9276. Allow BoundedByteArrayOutputStream to be resettable.
+ (Arun Murthy via hitesh)
+
OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
@@ -588,6 +628,24 @@ Release 2.0.3-alpha - Unreleased
HADOOP-9221. Convert remaining xdocs to APT. (Andy Isaacson via atm)
+ HADOOP-8981. TestMetricsSystemImpl fails on Windows. (Xuan Gong via suresh)
+
+ HADOOP-9124. SortedMapWritable violates contract of Map interface for
+ equals() and hashCode(). (Surenkumar Nihalani via tomwhite)
+
+ HADOOP-9252. In StringUtils, humanReadableInt(..) has a race condition and
+ the synchronization of limitDecimalTo2(double) can be avoided. (szetszwo)
+
+ HADOOP-9260. Hadoop version may be not correct when starting name node or
+ data node. (Chris Nauroth via jlowe)
+
+ HADOOP-9278. Fix the file handle leak in HarMetaData.parseMetaData() in
+ HarFileSystem. (Chris Nauroth via szetszwo)
+
+ HADOOP-9289. FsShell rm -f fails for non-matching globs. (Daryn Sharp via
+ suresh)
+
+
Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES
@@ -1289,10 +1347,19 @@ Release 0.23.7 - UNRELEASED
HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
permissions (Ivan A. Veselovsky via bobby)
+ HADOOP-9067. provide test for LocalFileSystem.reportChecksumFailure
+ (Ivan A. Veselovsky via bobby)
+
OPTIMIZATIONS
BUG FIXES
+ HADOOP-9302. HDFS docs not linked from top level (Andy Isaacson via
+ tgraves)
+
+ HADOOP-9303. command manual dfsadmin missing entry for restoreFailedStorage
+ option (Andy Isaacson via tgraves)
+
Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index e5f52ba303..a11383cd03 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -286,6 +286,10 @@
+
+
+
+
@@ -244,4 +249,11 @@
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index f5eb5a2cb3..27a2327b1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -35,66 +35,33 @@
- maven-antrun-plugin
+ org.apache.hadoop
+ hadoop-maven-plugins
- create-protobuf-generated-sources-directory
- initialize
-
-
-
-
-
-
- run
-
-
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
-
-
- generate-sources
- generate-sources
-
- protoc
-
- -I../../../hadoop-common-project/hadoop-common/src/main/proto/
- -Isrc/main/proto/
- --java_out=target/generated-sources/proto
- src/main/proto/yarn_protos.proto
- src/main/proto/yarn_service_protos.proto
- src/main/proto/AM_RM_protocol.proto
- src/main/proto/client_RM_protocol.proto
- src/main/proto/container_manager.proto
- src/main/proto/yarn_server_resourcemanager_service_protos.proto
- src/main/proto/RMAdminProtocol.proto
-
-
-
- exec
-
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
+ compile-protoc
generate-sources
- add-source
+ protoc
-
- target/generated-sources/proto
-
+
+ ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+ ${basedir}/src/main/proto
+
+
+ ${basedir}/src/main/proto
+
+ yarn_protos.proto
+ yarn_service_protos.proto
+ AM_RM_protocol.proto
+ client_RM_protocol.proto
+ container_manager.proto
+ yarn_server_resourcemanager_service_protos.proto
+ RMAdminProtocol.proto
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index f3834a4db6..ba064c73f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -166,6 +166,7 @@ public class ResourceRequestPBImpl extends ResourceRequest {
@Override
public String toString() {
return "{Priority: " + getPriority() + ", Capability: " + getCapability()
- + "}";
+ + ", # Containers: " + getNumContainers()
+ + ", Location: " + getHostName() + "}";
}
}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml
deleted file mode 100644
index 0e037a2ad0..0000000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/dev-support/findbugs-exclude.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 97ff1f7b5b..b2a59b9346 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -24,6 +24,11 @@
3.0.0-SNAPSHOT
hadoop-yarn-client
+
+
+ ${project.parent.basedir}
+
+
org.apache.hadoop
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/AMRMClientImpl.java
index 15c250e8d5..42b5adbbbf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/AMRMClientImpl.java
@@ -147,7 +147,9 @@ public class AMRMClientImpl extends AbstractService implements AMRMClient {
@Override
public synchronized void stop() {
- RPC.stopProxy(this.rmClient);
+ if (this.rmClient != null) {
+ RPC.stopProxy(this.rmClient);
+ }
super.stop();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/YarnClientImpl.java
index 14994f97a7..eb84b31c79 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/YarnClientImpl.java
@@ -25,13 +25,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
@@ -47,8 +45,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -199,30 +195,6 @@ public class YarnClientImpl extends AbstractService implements YarnClient {
}
- // Not part of YarnClient itself. Placed in YarnClientImpl while renew/cancel
- // are part of ClientRMProtocol.
- @Private
- public long renewRMDelegationToken(DelegationToken rmToken)
- throws YarnRemoteException {
- RenewDelegationTokenRequest request = Records
- .newRecord(RenewDelegationTokenRequest.class);
- request.setDelegationToken(rmToken);
- RenewDelegationTokenResponse response = rmClient
- .renewDelegationToken(request);
- return response.getNextExpirationTime();
- }
-
- // Not part of YarnClient itself. Placed in YarnClientImpl while renew/cancel
- // are part of ClietnRMProtocol
- @Private
- public void cancelRMDelegationToken(DelegationToken rmToken)
- throws YarnRemoteException {
- CancelDelegationTokenRequest request = Records
- .newRecord(CancelDelegationTokenRequest.class);
- request.setDelegationToken(rmToken);
- rmClient.cancelDelegationToken(request);
- }
-
private GetQueueInfoRequest
getQueueInfoRequest(String queueName, boolean includeApplications,
boolean includeChildQueues, boolean recursive) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/security/RMDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/security/RMDelegationTokenRenewer.java
deleted file mode 100644
index 3f1caeec18..0000000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/security/RMDelegationTokenRenewer.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.yarn.security;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenRenewer;
-import org.apache.hadoop.yarn.api.records.DelegationToken;
-import org.apache.hadoop.yarn.client.YarnClientImpl;
-import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
-import org.apache.hadoop.yarn.util.BuilderUtils;
-
-public class RMDelegationTokenRenewer extends TokenRenewer {
-
- @Override
- public boolean handleKind(Text kind) {
- return RMDelegationTokenIdentifier.KIND_NAME.equals(kind);
- }
-
- @Override
- public boolean isManaged(Token> token) throws IOException {
- return true;
- }
-
- @Override
- public long renew(Token> token, Configuration conf) throws IOException,
- InterruptedException {
- YarnClientImpl yarnClient = getYarnClient(conf,
- SecurityUtil.getTokenServiceAddr(token));
- try {
- DelegationToken dToken = BuilderUtils.newDelegationToken(
- token.getIdentifier(), token.getKind().toString(),
- token.getPassword(), token.getService().toString());
- return yarnClient.renewRMDelegationToken(dToken);
- } finally {
- yarnClient.stop();
- }
- }
-
- @Override
- public void cancel(Token> token, Configuration conf) throws IOException,
- InterruptedException {
- YarnClientImpl yarnClient = getYarnClient(conf,
- SecurityUtil.getTokenServiceAddr(token));
- try {
- DelegationToken dToken = BuilderUtils.newDelegationToken(
- token.getIdentifier(), token.getKind().toString(),
- token.getPassword(), token.getService().toString());
- yarnClient.cancelRMDelegationToken(dToken);
- return;
- } finally {
- yarnClient.stop();
- }
- }
-
- private YarnClientImpl getYarnClient(Configuration conf,
- InetSocketAddress rmAddress) {
- YarnClientImpl yarnClient = new YarnClientImpl(rmAddress);
- yarnClient.init(conf);
- yarnClient.start();
- return yarnClient;
- }
-}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
deleted file mode 100644
index 9e78b1187e..0000000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-org.apache.hadoop.yarn.security.RMDelegationTokenRenewer;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 07b7ab8428..c5732013fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -107,6 +107,27 @@
+
+ compile-protoc
+ generate-sources
+
+ protoc
+
+
+
+ ${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto
+ ${basedir}/../hadoop-yarn-api/src/main/proto
+ ${basedir}/src/main/proto
+
+
+ ${basedir}/src/main/proto
+
+ yarnprototunnelrpc.proto
+
+
+
+
+
@@ -124,18 +145,6 @@
maven-antrun-plugin
-
- create-protobuf-generated-sources-directory
- initialize
-
-
-
-
-
-
- run
-
-
pre-site
@@ -151,49 +160,6 @@
-
- org.codehaus.mojo
- exec-maven-plugin
-
-
- generate-sources
- generate-sources
-
- protoc
-
- -I../../../hadoop-common-project/hadoop-common/src/main/proto/
- -I../hadoop-yarn-api/src/main/proto/
- -Isrc/main/proto/
- --java_out=target/generated-sources/proto
- src/main/proto/yarnprototunnelrpc.proto
-
-
-
- exec
-
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
- generate-sources
-
- add-source
-
-
-
- target/generated-sources/proto
- target/generated-sources/version
-
-
-
-
-
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 81c1fe933c..5c22a7d2a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -379,6 +379,15 @@ public class YarnConfiguration extends Configuration {
+ "log-aggregation.retain-seconds";
public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1;
+ /**
+ * How long to wait between aggregated log retention checks. If set to
+ * a value <= 0 then the value is computed as one-tenth of the log retention
+ * setting. Be careful set this too small and you will spam the name node.
+ */
+ public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS =
+ YARN_PREFIX + "log-aggregation.retain-check-interval-seconds";
+ public static final long DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = -1;
+
/**
* Number of seconds to retain logs on the NodeManager. Only applicable if Log
* aggregation is disabled
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
index 9fbcae9989..c8603ab7c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java
@@ -140,9 +140,16 @@ public class AggregatedLogDeletionService extends AbstractService {
" too small (" + retentionSecs + ")");
return;
}
+ long checkIntervalMsecs = 1000 * conf.getLong(
+ YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,
+ YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS);
+ if (checkIntervalMsecs <= 0) {
+ // when unspecified compute check interval as 1/10th of retention
+ checkIntervalMsecs = (retentionSecs * 1000) / 10;
+ }
TimerTask task = new LogDeletionTask(conf, retentionSecs);
timer = new Timer();
- timer.scheduleAtFixedRate(task, 0, retentionSecs * 1000);
+ timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs);
super.start();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java
index fd3dbf06b6..73bdce4e99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java
@@ -19,10 +19,28 @@
package org.apache.hadoop.yarn.security.client;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
/**
* Delegation Token Identifier that identifies the delegation tokens from the
@@ -51,4 +69,100 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
public Text getKind() {
return KIND_NAME;
}
+
+ public static class Renewer extends TokenRenewer {
+
+ @Override
+ public boolean handleKind(Text kind) {
+ return KIND_NAME.equals(kind);
+ }
+
+ @Override
+ public boolean isManaged(Token> token) throws IOException {
+ return true;
+ }
+
+ private static
+ AbstractDelegationTokenSecretManager localSecretManager;
+ private static InetSocketAddress localServiceAddress;
+
+ @Private
+ public static void setSecretManager(
+ AbstractDelegationTokenSecretManager secretManager,
+ InetSocketAddress serviceAddress) {
+ localSecretManager = secretManager;
+ localServiceAddress = serviceAddress;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public long renew(Token> token, Configuration conf) throws IOException,
+ InterruptedException {
+ final ClientRMProtocol rmClient = getRmClient(token, conf);
+ if (rmClient != null) {
+ try {
+ RenewDelegationTokenRequest request =
+ Records.newRecord(RenewDelegationTokenRequest.class);
+ request.setDelegationToken(convertToProtoToken(token));
+ return rmClient.renewDelegationToken(request).getNextExpirationTime();
+ } finally {
+ RPC.stopProxy(rmClient);
+ }
+ } else {
+ return localSecretManager.renewToken(
+ (Token)token, getRenewer(token));
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void cancel(Token> token, Configuration conf) throws IOException,
+ InterruptedException {
+ final ClientRMProtocol rmClient = getRmClient(token, conf);
+ if (rmClient != null) {
+ try {
+ CancelDelegationTokenRequest request =
+ Records.newRecord(CancelDelegationTokenRequest.class);
+ request.setDelegationToken(convertToProtoToken(token));
+ rmClient.cancelDelegationToken(request);
+ } finally {
+ RPC.stopProxy(rmClient);
+ }
+ } else {
+ localSecretManager.cancelToken(
+ (Token)token, getRenewer(token));
+ }
+ }
+
+ private static ClientRMProtocol getRmClient(Token> token,
+ Configuration conf) {
+ InetSocketAddress addr = SecurityUtil.getTokenServiceAddr(token);
+ if (localSecretManager != null) {
+ // return null if it's our token
+ if (localServiceAddress.getAddress().isAnyLocalAddress()) {
+ if (NetUtils.isLocalAddress(addr.getAddress()) &&
+ addr.getPort() == localServiceAddress.getPort()) {
+ return null;
+ }
+ } else if (addr.equals(localServiceAddress)) {
+ return null;
+ }
+ }
+ final YarnRPC rpc = YarnRPC.create(conf);
+ return (ClientRMProtocol)rpc.getProxy(ClientRMProtocol.class, addr, conf);
+ }
+
+ // get renewer so we can always renew our own tokens
+ @SuppressWarnings("unchecked")
+ private static String getRenewer(Token> token) throws IOException {
+ return ((Token)token).decodeIdentifier()
+ .getRenewer().toString();
+ }
+
+ private static DelegationToken convertToProtoToken(Token> token) {
+ return BuilderUtils.newDelegationToken(
+ token.getIdentifier(), token.getKind().toString(),
+ token.getPassword(), token.getService().toString());
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
index 84c3b650e2..d38545fa37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
@@ -34,19 +34,21 @@ public class ResponseInfo implements Iterable {
public final String key;
public final String url;
public final Object value;
+ public final boolean isRaw;
- Item(String key, String url, Object value) {
+ Item(String key, String url, Object value, boolean isRaw) {
this.key = key;
this.url = url;
this.value = value;
+ this.isRaw = isRaw;
}
- public static Item of(String key, Object value) {
- return new Item(key, null, value);
+ public static Item of(String key, Object value, boolean isRaw) {
+ return new Item(key, null, value, isRaw);
}
public static Item of(String key, String url, Object value) {
- return new Item(key, url, value);
+ return new Item(key, url, value, false);
}
}
@@ -71,7 +73,7 @@ public class ResponseInfo implements Iterable {
}
public ResponseInfo _(String key, Object value) {
- items.add(Item.of(key, value));
+ items.add(Item.of(key, value, false));
return this;
}
@@ -80,6 +82,12 @@ public class ResponseInfo implements Iterable {
return this;
}
+ //Value is raw HTML and shouldn't be escaped
+ public ResponseInfo _r(String key, Object value) {
+ items.add(Item.of(key, value, true));
+ return this;
+ }
+
public void clear() {
items.clear();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
index 7cefe1d249..88b7297c13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
@@ -46,7 +46,11 @@ public class InfoBlock extends HtmlBlock {
th(item.key);
String value = String.valueOf(item.value);
if (item.url == null) {
- tr.td(value);
+ if (!item.isRaw) {
+ tr.td(value);
+ } else {
+ tr.td()._r(value)._();
+ }
} else {
tr.
td().
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
index 3380cb8b01..0e87a7c2d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -13,3 +13,4 @@
#
org.apache.hadoop.yarn.security.ApplicationTokenIdentifier$Renewer
org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer
+org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier$Renewer
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
index 3f42c7cc2f..d0bde290ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js
@@ -74,19 +74,19 @@ jQuery.fn.dataTableExt.oApi.fnSetFilteringDelay = function ( oSettings, iDelay )
}
function renderHadoopDate(data, type, full) {
- if (type === 'display') {
+ if (type === 'display' || type === 'filter') {
if(data === '0') {
return "N/A";
}
return new Date(parseInt(data)).toUTCString();
}
- // 'filter', 'sort', 'type' and undefined all just use the number
+ // 'sort', 'type' and undefined all just use the number
// If date is 0, then for purposes of sorting it should be consider max_int
return data === '0' ? '9007199254740992' : data;
}
function renderHadoopElapsedTime(data, type, full) {
- if (type === 'display') {
+ if (type === 'display' || type === 'filter') {
var timeDiff = parseInt(data);
if(timeDiff < 0)
return "N/A";
@@ -110,24 +110,37 @@ function renderHadoopElapsedTime(data, type, full) {
toReturn += "sec";
return toReturn;
}
- // 'filter', 'sort', 'type' and undefined all just use the number
+ // 'sort', 'type' and undefined all just use the number
return data;
}
function parseHadoopID(data, type, full) {
- if (type === 'display' || type === 'filter') {
+ if (type === 'display') {
return data;
}
+ //Return the visible string rather than the entire HTML tag
+ if (type === 'filter') {
+ return data.split('>')[1].split('<')[0];
+ }
//Parse the ID for 'sort', 'type' and undefined
//The number after the last '_' and before the end tag '<'
var splits = data.split('_');
return splits[parseInt(splits.length-1)].split('<')[0];
}
+//JSON array element is "20000 attempt_1360183373897_0001_m_000002_0"
+function parseHadoopAttemptID(data, type, full) {
+ if (type === 'display' || type === 'filter') {
+ return data.split(' ')[1];
+ }
+ //For sorting use the order as defined in the JSON element
+ return data.split(' ')[0];
+}
+
function parseHadoopProgress(data, type, full) {
if (type === 'display') {
return data;
}
//Return the title attribute for 'sort', 'filter', 'type' and undefined
return data.split("'")[1];
-}
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e28ac43e85..588bb1bccf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -410,6 +410,15 @@
-1
+
+ How long to wait between aggregated log retention checks.
+ If set to 0 or a negative value then the value is computed as one-tenth
+ of the aggregated log retention time. Be careful set this too small and
+ you will spam the name node.
+ yarn.log-aggregation.retain-check-interval-seconds
+ -1
+
+
Time in seconds to retain user logs. Only applicable if
log aggregation is disabled
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
similarity index 92%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
index d771a61d86..eb1aa9dc99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/InlineDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/InlineDispatcher.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.resourcetracker;
+package org.apache.hadoop.yarn.event;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -24,6 +24,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
+@SuppressWarnings({"unchecked", "rawtypes"})
public class InlineDispatcher extends AsyncDispatcher {
private static final Log LOG = LogFactory.getLog(InlineDispatcher.class);
@@ -48,7 +49,7 @@ public class InlineDispatcher extends AsyncDispatcher {
return new TestEventHandler();
}
- static class EmptyEventHandler implements EventHandler {
+ public static class EmptyEventHandler implements EventHandler {
@Override
public void handle(Event event) {
//do nothing
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
index c1cf9af360..035cd9515c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java
@@ -28,12 +28,19 @@ import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestAggregatedLogDeletionService {
+ @Before
+ public void closeFilesystems() throws IOException {
+ // prevent the same mockfs instance from being reused due to FS cache
+ FileSystem.closeAll();
+ }
+
@Test
public void testDeletion() throws Exception {
long now = System.currentTimeMillis();
@@ -121,6 +128,70 @@ public class TestAggregatedLogDeletionService {
verify(mockFs).delete(app4Dir, true);
}
+ @Test
+ public void testCheckInterval() throws Exception {
+ long RETENTION_SECS = 10 * 24 * 3600;
+ long now = System.currentTimeMillis();
+ long toDeleteTime = now - RETENTION_SECS*1000;
+
+ String root = "mockfs://foo/";
+ String remoteRootLogDir = root+"tmp/logs";
+ String suffix = "logs";
+ Configuration conf = new Configuration();
+ conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
+ conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED, "true");
+ conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, "864000");
+ conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS, "1");
+ conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteRootLogDir);
+ conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, suffix);
+
+ // prevent us from picking up the same mockfs instance from another test
+ FileSystem.closeAll();
+ Path rootPath = new Path(root);
+ FileSystem rootFs = rootPath.getFileSystem(conf);
+ FileSystem mockFs = ((FilterFileSystem)rootFs).getRawFileSystem();
+
+ Path remoteRootLogPath = new Path(remoteRootLogDir);
+
+ Path userDir = new Path(remoteRootLogPath, "me");
+ FileStatus userDirStatus = new FileStatus(0, true, 0, 0, now, userDir);
+
+ when(mockFs.listStatus(remoteRootLogPath)).thenReturn(
+ new FileStatus[]{userDirStatus});
+
+ Path userLogDir = new Path(userDir, suffix);
+ Path app1Dir = new Path(userLogDir, "application_1_1");
+ FileStatus app1DirStatus = new FileStatus(0, true, 0, 0, now, app1Dir);
+
+ when(mockFs.listStatus(userLogDir)).thenReturn(
+ new FileStatus[]{app1DirStatus});
+
+ Path app1Log1 = new Path(app1Dir, "host1");
+ FileStatus app1Log1Status = new FileStatus(10, false, 1, 1, now, app1Log1);
+
+ when(mockFs.listStatus(app1Dir)).thenReturn(
+ new FileStatus[]{app1Log1Status});
+
+ AggregatedLogDeletionService deletionSvc =
+ new AggregatedLogDeletionService();
+ deletionSvc.init(conf);
+ deletionSvc.start();
+
+ verify(mockFs, timeout(10000).atLeast(4)).listStatus(any(Path.class));
+ verify(mockFs, never()).delete(app1Dir, true);
+
+ // modify the timestamp of the logs and verify it's picked up quickly
+ app1DirStatus = new FileStatus(0, true, 0, 0, toDeleteTime, app1Dir);
+ app1Log1Status = new FileStatus(10, false, 1, 1, toDeleteTime, app1Log1);
+ when(mockFs.listStatus(userLogDir)).thenReturn(
+ new FileStatus[]{app1DirStatus});
+ when(mockFs.listStatus(app1Dir)).thenReturn(
+ new FileStatus[]{app1Log1Status});
+
+ verify(mockFs, timeout(10000)).delete(app1Dir, true);
+
+ deletionSvc.stop();
+ }
static class MockFileSystem extends FilterFileSystem {
MockFileSystem() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 428e64633e..569ac197c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -48,18 +48,6 @@
maven-antrun-plugin
-
- create-protobuf-generated-sources-directory
- initialize
-
-
-
-
-
-
- run
-
-
pre-site
@@ -75,45 +63,31 @@
- org.codehaus.mojo
- exec-maven-plugin
+ org.apache.hadoop
+ hadoop-maven-plugins
- generate-sources
- generate-sources
-
- protoc
-
- -I../../../../hadoop-common-project/hadoop-common/src/main/proto/
- -I../../hadoop-yarn-api/src/main/proto/
- -Isrc/main/proto/
- --java_out=target/generated-sources/proto
- src/main/proto/yarn_server_common_protos.proto
- src/main/proto/yarn_server_common_service_protos.proto
- src/main/proto/ResourceTracker.proto
-
-
-
- exec
-
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
+ compile-protoc
generate-sources
- add-source
+ protoc
-
- target/generated-sources/proto
-
+
+ ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+ ${basedir}/../../hadoop-yarn-api/src/main/proto
+ ${basedir}/src/main/proto
+
+
+ ${basedir}/src/main/proto
+
+ yarn_server_common_protos.proto
+ yarn_server_common_service_protos.proto
+ yarn_server_common_service_protos.proto
+ ResourceTracker.proto
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 19e906cb92..a4e074ff91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -151,69 +151,29 @@
- maven-antrun-plugin
+ org.apache.hadoop
+ hadoop-maven-plugins
- create-protobuf-generated-sources-directory
- initialize
-
-
-
-
-
-
- run
-
-
-
- compile
+ compile-protoc
generate-sources
- run
-
-
-
-
-
-
- org.codehaus.mojo
- exec-maven-plugin
-
-
- generate-sources
- generate-sources
-
- protoc
-
- -I../../../../hadoop-common-project/hadoop-common/src/main/proto/
- -I../../hadoop-yarn-api/src/main/proto/
- -Isrc/main/proto/
- --java_out=target/generated-sources/proto
- src/main/proto/yarn_server_nodemanager_service_protos.proto
- src/main/proto/LocalizationProtocol.proto
-
-
-
- exec
-
-
-
-
-
-
- org.codehaus.mojo
- build-helper-maven-plugin
-
-
- add-source
- generate-sources
-
- add-source
+ protoc
-
- target/generated-sources/proto
-
+
+ ${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
+ ${basedir}/../../hadoop-yarn-api/src/main/proto
+ ${basedir}/src/main/proto
+
+
+ ${basedir}/src/main/proto
+
+ yarn_server_nodemanager_service_protos.proto
+ LocalizationProtocol.proto
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 62d6afc557..d73f52c588 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -28,7 +28,7 @@ import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -125,10 +125,10 @@ public class ContainersMonitorImpl extends AbstractService implements
this.maxPmemAllottedForContainers >
totalPhysicalMemoryOnNM * 0.80f) {
LOG.warn("NodeManager configured with " +
- StringUtils.humanReadableInt(maxPmemAllottedForContainers) +
+ TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers, "", 1) +
" physical memory allocated to containers, which is more than " +
"80% of the total physical memory available (" +
- StringUtils.humanReadableInt(totalPhysicalMemoryOnNM) +
+ TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "", 1) +
"). Thrashing might happen.");
}
@@ -493,12 +493,12 @@ public class ContainersMonitorImpl extends AbstractService implements
private String formatUsageString(long currentVmemUsage, long vmemLimit,
long currentPmemUsage, long pmemLimit) {
- return String.format("%sb of %sb physical memory used; " +
- "%sb of %sb virtual memory used",
- StringUtils.humanReadableInt(currentPmemUsage),
- StringUtils.humanReadableInt(pmemLimit),
- StringUtils.humanReadableInt(currentVmemUsage),
- StringUtils.humanReadableInt(vmemLimit));
+ return String.format("%sB of %sB physical memory used; " +
+ "%sB of %sB virtual memory used",
+ TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
+ TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
+ TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
+ TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index 53a01ebdde..5bcd34f951 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -80,17 +80,17 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
this.cgroupMountPath = conf.get(YarnConfiguration.
NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
-
+
// remove extra /'s at end or start of cgroupPrefix
if (cgroupPrefix.charAt(0) == '/') {
- cgroupPrefix = cgroupPrefix.substring(1);
+ cgroupPrefix = cgroupPrefix.substring(1);
}
int len = cgroupPrefix.length();
if (cgroupPrefix.charAt(len - 1) == '/') {
- cgroupPrefix = cgroupPrefix.substring(0, len - 1);
+ cgroupPrefix = cgroupPrefix.substring(0, len - 1);
}
-
+
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList cgroupKVs = new ArrayList();
@@ -98,14 +98,14 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
-
+
initializeControllerPaths();
}
boolean isCpuWeightEnabled() {
return this.cpuWeightEnabled;
- }
+ }
/*
* Next four functions are for an individual cgroup.
@@ -155,7 +155,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
}
}
}
- }
+ }
private void deleteCgroup(String controller, String groupName) {
String path = pathForCgroup(controller, groupName);
@@ -165,7 +165,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
if (! new File(path).delete()) {
LOG.warn("Unable to delete cgroup at: " + path);
}
- }
+ }
/*
* Next three functions operate on all the resources we are enforcing.
@@ -178,7 +178,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
private void setupLimits(ContainerId containerId,
Resource containerResource) throws IOException {
String containerName = containerId.toString();
-
+
if (isCpuWeightEnabled()) {
createCgroup(CONTROLLER_CPU, containerName);
updateCgroup(CONTROLLER_CPU, containerName, "shares",
@@ -202,7 +202,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
if (isCpuWeightEnabled()) {
deleteCgroup(CONTROLLER_CPU, containerName);
- }
+ }
}
/*
@@ -222,7 +222,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
String containerName = containerId.toString();
StringBuilder sb = new StringBuilder("cgroups=");
-
+
if (isCpuWeightEnabled()) {
sb.append(pathForCgroup(CONTROLLER_CPU, containerName) + "/cgroup.procs");
sb.append(",");
@@ -231,7 +231,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
if (sb.charAt(sb.length() - 1) == ',') {
sb.deleteCharAt(sb.length() - 1);
}
-
+
return sb.toString();
}
@@ -255,8 +255,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
BufferedReader in = null;
try {
- in = new BufferedReader(new FileReader(new File(MTAB_FILE)));
-
+ in = new BufferedReader(new FileReader(new File(MTAB_FILE)));
+
for (String str = in.readLine(); str != null;
str = in.readLine()) {
Matcher m = MTAB_FILE_FORMAT.matcher(str);
@@ -316,6 +316,6 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
} else {
throw new IOException("Not able to enforce cpu weights; cannot find "
+ "cgroup for cpu controller in " + MTAB_FILE);
- }
+ }
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
index fcb166ffbf..9fb87074e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
@@ -33,7 +33,7 @@ public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
private Configuration conf;
public DefaultLCEResourcesHandler() {
- }
+ }
public void setConf(Configuration conf) {
this.conf = conf;
@@ -42,7 +42,7 @@ public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
@Override
public Configuration getConf() {
return conf;
- }
+ }
public void init(LinuxContainerExecutor lce) {
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
index 16db4a7fd9..41456fde9b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
@@ -267,8 +267,8 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
String expectedMsgPattern =
"Container \\[pid=" + pid + ",containerID=" + cId
+ "\\] is running beyond virtual memory limits. Current usage: "
- + "[0-9.]+m?b of [0-9.]+m?b physical memory used; "
- + "[0-9.]+m?b of [0-9.]+m?b virtual memory used. "
+ + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "
+ + "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "
+ "Killing container.\nDump of the process-tree for "
+ cId + " :\n";
Pattern pat = Pattern.compile(expectedMsgPattern);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index a464b3ae00..e0522a33fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -157,6 +157,10 @@ public class ClientRMService extends AbstractService implements
this.server.start();
clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_ADDRESS,
server.getListenerAddress());
+ // enable RM to short-circuit token operations directly to itself
+ RMDelegationTokenIdentifier.Renewer.setSecretManager(
+ rmDTSecretManager, clientBindAddress);
+
super.start();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 52b4d2892a..70fd2576ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -228,7 +228,7 @@ public class RMAppManager implements EventHandler,
}
@SuppressWarnings("unchecked")
- protected synchronized void submitApplication(
+ protected void submitApplication(
ApplicationSubmissionContext submissionContext, long submitTime) {
ApplicationId applicationId = submissionContext.getApplicationId();
RMApp application = null;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index e1bb437763..f651566d65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -108,7 +108,8 @@ public class SchedulerUtils {
Resource normalized =
Resources.normalize(
resourceCalculator, ask.getCapability(), minimumResource);
- ask.setCapability(normalized);
+ ask.getCapability().setMemory(normalized.getMemory());
+ ask.getCapability().setVirtualCores(normalized.getVirtualCores());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 7656ace5b0..719cf1e578 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -571,6 +571,19 @@ public class LeafQueue implements CSQueue {
return user;
}
+ /**
+ * @return an ArrayList of UserInfo objects who are active in this queue
+ */
+ public synchronized ArrayList getUsers() {
+ ArrayList usersToReturn = new ArrayList();
+ for (Map.Entry entry: users.entrySet()) {
+ usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(
+ entry.getValue().consumed), entry.getValue().getActiveApplications(),
+ entry.getValue().getPendingApplications()));
+ }
+ return usersToReturn;
+ }
+
@Override
public synchronized void reinitialize(
CSQueue newlyParsedQueue, Resource clusterResource)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
new file mode 100644
index 0000000000..65c911bbb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class UserInfo {
+ protected String username;
+ protected ResourceInfo resourcesUsed;
+ protected int numPendingApplications;
+ protected int numActiveApplications;
+
+ UserInfo() {}
+
+ UserInfo(String username, Resource resUsed, int activeApps, int pendingApps) {
+ this.username = username;
+ this.resourcesUsed = new ResourceInfo(resUsed);
+ this.numActiveApplications = activeApps;
+ this.numPendingApplications = pendingApps;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public ResourceInfo getResourcesUsed() {
+ return resourcesUsed;
+ }
+
+ public int getNumPendingApplications() {
+ return numPendingApplications;
+ }
+
+ public int getNumActiveApplications() {
+ return numActiveApplications;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 9232190ba3..066a0a5b96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -261,7 +261,7 @@ public class DelegationTokenRenewer extends AbstractService {
* done else false.
* @throws IOException
*/
- public synchronized void addApplication(
+ public void addApplication(
ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd)
throws IOException {
if (ts == null) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 110bf8d17a..52a1bc77e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -22,12 +22,15 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
import java.util.ArrayList;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -63,8 +66,42 @@ class CapacitySchedulerPage extends RmView {
lqinfo = (CapacitySchedulerLeafQueueInfo) info.qinfo;
}
+ //Return a string describing one resource as a percentage of another
+ private String getPercentage(ResourceInfo numerator, ResourceInfo denominator) {
+ StringBuilder percentString = new StringBuilder("Memory: ");
+ if (numerator != null) {
+ percentString.append(numerator.getMemory());
+ }
+ if (denominator.getMemory() != 0) {
+ percentString.append(" (")
+ .append(StringUtils.format("%.2f", numerator.getMemory() * 100.0 /
+ denominator.getMemory()) + "%)");
+ }
+ percentString.append(", vCores: ");
+ if (numerator != null) {
+ percentString.append(numerator.getvCores());
+ }
+ if (denominator.getvCores() != 0) {
+ percentString.append(" (")
+ .append(StringUtils.format("%.2f", numerator.getvCores() * 100.0 /
+ denominator.getvCores()) + "%)");
+ }
+ return percentString.toString();
+ }
+
@Override
protected void render(Block html) {
+ StringBuilder activeUserList = new StringBuilder("");
+ ResourceInfo usedResources = lqinfo.getResourcesUsed();
+ ArrayList users = lqinfo.getUsers().getUsersList();
+ for (UserInfo entry: users) {
+ activeUserList.append(entry.getUsername()).append(" <")
+ .append(getPercentage(entry.getResourcesUsed(), usedResources))
+ .append(", Active Apps: " + entry.getNumActiveApplications())
+ .append(", Pending Apps: " + entry.getNumPendingApplications())
+ .append(">
"); //Force line break
+ }
+
ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
_("Queue State:", lqinfo.getQueueState()).
_("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
@@ -81,7 +118,8 @@ class CapacitySchedulerPage extends RmView {
_("Configured Capacity:", percent(lqinfo.getCapacity() / 100)).
_("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
_("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
- _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
+ _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor())).
+ _r("Active users: ", activeUserList.toString());
html._(InfoBlock.class);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
index 12e77a7c49..5f50ed18f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
@@ -30,6 +30,7 @@ import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.Provider;
import javax.xml.bind.JAXBContext;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
@@ -42,9 +43,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsIn
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UsersInfo;
import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
@Singleton
@@ -61,7 +64,8 @@ public class JAXBContextResolver implements ContextResolver {
SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class,
CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
SchedulerInfo.class, AppsInfo.class, NodesInfo.class,
- RemoteExceptionData.class, CapacitySchedulerQueueInfoList.class};
+ RemoteExceptionData.class, CapacitySchedulerQueueInfoList.class,
+ ResourceInfo.class, UsersInfo.class, UserInfo.class};
public JAXBContextResolver() throws Exception {
this.types = new HashSet(Arrays.asList(cTypes));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index 5b2624ee98..d90e9631b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -35,6 +35,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
protected int maxActiveApplications;
protected int maxActiveApplicationsPerUser;
protected int userLimit;
+ protected UsersInfo users; // To add another level in the XML
protected float userLimitFactor;
CapacitySchedulerLeafQueueInfo() {
@@ -50,6 +51,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
maxActiveApplications = q.getMaximumActiveApplications();
maxActiveApplicationsPerUser = q.getMaximumActiveApplicationsPerUser();
userLimit = q.getUserLimit();
+ users = new UsersInfo(q.getUsers());
userLimitFactor = q.getUserLimitFactor();
}
@@ -85,6 +87,11 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
return userLimit;
}
+ //Placing here because of JERSEY-1199
+ public UsersInfo getUsers() {
+ return users;
+ }
+
public float getUserLimitFactor() {
return userLimitFactor;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java
index 6757227c63..2cfa660a35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java
@@ -48,6 +48,7 @@ public class CapacitySchedulerQueueInfo {
protected String queueName;
protected QueueState state;
protected CapacitySchedulerQueueInfoList queues;
+ protected ResourceInfo resourcesUsed;
CapacitySchedulerQueueInfo() {
};
@@ -69,6 +70,7 @@ public class CapacitySchedulerQueueInfo {
usedResources = q.getUsedResources().toString();
queueName = q.getQueueName();
state = q.getState();
+ resourcesUsed = new ResourceInfo(q.getUsedResources());
}
public float getCapacity() {
@@ -119,6 +121,10 @@ public class CapacitySchedulerQueueInfo {
return this.queues;
}
+ public ResourceInfo getResourcesUsed() {
+ return resourcesUsed;
+ }
+
/**
* Limit a value to a specified range.
* @param val the value to be capped
diff --git a/hadoop-common-project/hadoop-common/src/test/ddl/int.jr b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
similarity index 56%
rename from hadoop-common-project/hadoop-common/src/test/ddl/int.jr
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index 8068bf3269..84276bf105 100644
--- a/hadoop-common-project/hadoop-common/src/test/ddl/int.jr
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -15,9 +15,34 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-module org.apache.hadoop.record {
- class RecInt {
- int data;
- }
-}
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ResourceInfo {
+ int memory;
+ int vCores;
+
+ public ResourceInfo() {
+ }
+
+ public ResourceInfo(Resource res) {
+ memory = res.getMemory();
+ vCores = res.getVirtualCores();
+ }
+
+ public int getMemory() {
+ return memory;
+ }
+
+ public int getvCores() {
+ return vCores;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/ddl/string.jr b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UsersInfo.java
similarity index 52%
rename from hadoop-common-project/hadoop-common/src/test/ddl/string.jr
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UsersInfo.java
index 94abdf5854..3ee7edfca8 100644
--- a/hadoop-common-project/hadoop-common/src/test/ddl/string.jr
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UsersInfo.java
@@ -15,9 +15,32 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-module org.apache.hadoop.record {
- class RecString {
- ustring data;
- }
-}
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class UsersInfo {
+ @XmlElement(name="user")
+ protected ArrayList usersList = new ArrayList();
+
+ public UsersInfo() {
+ }
+
+ public UsersInfo(ArrayList usersList) {
+ this.usersList = usersList;
+ }
+
+ public ArrayList getUsersList() {
+ return usersList;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 0bc3211a81..12391c6097 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -18,16 +18,19 @@
package org.apache.hadoop.yarn.server.resourcemanager;
+import java.security.PrivilegedAction;
import java.util.Map;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -37,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
@@ -118,21 +122,27 @@ public class MockRM extends ResourceManager {
}
public RMApp submitApp(int masterMemory) throws Exception {
- return submitApp(masterMemory, "", "");
+ return submitApp(masterMemory, "", UserGroupInformation.getCurrentUser()
+ .getShortUserName());
}
// client
public RMApp submitApp(int masterMemory, String name, String user) throws Exception {
- return submitApp(masterMemory, name, user, null, false);
+ return submitApp(masterMemory, name, user, null, false, null);
}
public RMApp submitApp(int masterMemory, String name, String user,
Map acls) throws Exception {
- return submitApp(masterMemory, name, user, acls, false);
+ return submitApp(masterMemory, name, user, acls, false, null);
}
public RMApp submitApp(int masterMemory, String name, String user,
- Map acls, boolean unmanaged) throws Exception {
+ Map acls, String queue) throws Exception {
+ return submitApp(masterMemory, name, user, acls, false, queue);
+ }
+
+ public RMApp submitApp(int masterMemory, String name, String user,
+ Map acls, boolean unmanaged, String queue) throws Exception {
ClientRMProtocol client = getClientRMService();
GetNewApplicationResponse resp = client.getNewApplication(Records
.newRecord(GetNewApplicationRequest.class));
@@ -148,6 +158,9 @@ public class MockRM extends ResourceManager {
if(unmanaged) {
sub.setUnmanagedAM(true);
}
+ if (queue != null) {
+ sub.setQueue(queue);
+ }
ContainerLaunchContext clc = Records
.newRecord(ContainerLaunchContext.class);
Resource capability = Records.newRecord(Resource.class);
@@ -157,7 +170,29 @@ public class MockRM extends ResourceManager {
sub.setAMContainerSpec(clc);
req.setApplicationSubmissionContext(sub);
- client.submitApplication(req);
+ UserGroupInformation fakeUser =
+ UserGroupInformation.createUserForTesting(user, new String[] {"someGroup"});
+ PrivilegedAction action =
+ new PrivilegedAction() {
+ ClientRMProtocol client;
+ SubmitApplicationRequest req;
+ @Override
+ public SubmitApplicationResponse run() {
+ try {
+ return client.submitApplication(req);
+ } catch (YarnRemoteException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+ PrivilegedAction setClientReq(
+ ClientRMProtocol client, SubmitApplicationRequest req) {
+ this.client = client;
+ this.req = req;
+ return this;
+ }
+ }.setClientReq(client, req);
+ fakeUser.doAs(action);
// make sure app is immediately available after submit
waitForState(appId, RMAppState.ACCEPTED);
return getRMContext().getRMApps().get(appId);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 8479c2c87a..871755c8f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -27,7 +27,9 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.List;
+import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CyclicBarrier;
import junit.framework.Assert;
@@ -37,6 +39,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
@@ -44,28 +47,36 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.DelegationToken;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.Event;
+import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
+import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
-import org.junit.Test;
import org.junit.AfterClass;
import org.junit.BeforeClass;
-
+import org.junit.Test;
public class TestClientRMService {
@@ -235,6 +246,88 @@ public class TestClientRMService {
rmService.renewDelegationToken(request);
}
+ @Test(timeout=4000)
+ public void testConcurrentAppSubmit()
+ throws IOException, InterruptedException, BrokenBarrierException {
+ YarnScheduler yarnScheduler = mock(YarnScheduler.class);
+ RMContext rmContext = mock(RMContext.class);
+ mockRMContext(yarnScheduler, rmContext);
+ RMStateStore stateStore = mock(RMStateStore.class);
+ when(rmContext.getStateStore()).thenReturn(stateStore);
+ RMAppManager appManager = new RMAppManager(rmContext, yarnScheduler,
+ null, mock(ApplicationACLsManager.class), new Configuration());
+
+ final ApplicationId appId1 = getApplicationId(100);
+ final ApplicationId appId2 = getApplicationId(101);
+ final SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1);
+ final SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2);
+
+ final CyclicBarrier startBarrier = new CyclicBarrier(2);
+ final CyclicBarrier endBarrier = new CyclicBarrier(2);
+
+ @SuppressWarnings("rawtypes")
+ EventHandler eventHandler = new EventHandler() {
+ @Override
+ public void handle(Event rawEvent) {
+ if (rawEvent instanceof RMAppEvent) {
+ RMAppEvent event = (RMAppEvent) rawEvent;
+ if (event.getApplicationId().equals(appId1)) {
+ try {
+ startBarrier.await();
+ endBarrier.await();
+ } catch (BrokenBarrierException e) {
+ LOG.warn("Broken Barrier", e);
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while awaiting barriers", e);
+ }
+ }
+ }
+ }
+ };
+
+ when(rmContext.getDispatcher().getEventHandler()).thenReturn(eventHandler);
+
+ final ClientRMService rmService =
+ new ClientRMService(rmContext, yarnScheduler, appManager, null, null);
+
+ // submit an app and wait for it to block while in app submission
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ rmService.submitApplication(submitRequest1);
+ } catch (YarnRemoteException e) {}
+ }
+ };
+ t.start();
+
+ // submit another app, so go through while the first app is blocked
+ startBarrier.await();
+ rmService.submitApplication(submitRequest2);
+ endBarrier.await();
+ t.join();
+ }
+
+ private SubmitApplicationRequest mockSubmitAppRequest(ApplicationId appId) {
+ String user = MockApps.newUserName();
+ String queue = MockApps.newQueue();
+
+ ContainerLaunchContext amContainerSpec = mock(ContainerLaunchContext.class);
+ Resource resource = mock(Resource.class);
+ when(amContainerSpec.getResource()).thenReturn(resource);
+
+ ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
+ when(submissionContext.getUser()).thenReturn(user);
+ when(submissionContext.getQueue()).thenReturn(queue);
+ when(submissionContext.getAMContainerSpec()).thenReturn(amContainerSpec);
+ when(submissionContext.getApplicationId()).thenReturn(appId);
+
+ SubmitApplicationRequest submitRequest =
+ recordFactory.newRecordInstance(SubmitApplicationRequest.class);
+ submitRequest.setApplicationSubmissionContext(submissionContext);
+ return submitRequest;
+ }
+
private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext)
throws IOException {
Dispatcher dispatcher = mock(Dispatcher.class);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
index 3f78696474..5ee851b843 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
@@ -17,13 +17,12 @@
package org.apache.hadoop.yarn.server.resourcemanager;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
@@ -34,9 +33,15 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
@@ -46,12 +51,14 @@ import org.apache.hadoop.yarn.api.records.DelegationToken;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ProtoUtils;
import org.apache.hadoop.yarn.util.Records;
+import org.junit.Before;
import org.junit.Test;
@@ -59,6 +66,10 @@ public class TestClientRMTokens {
private static final Log LOG = LogFactory.getLog(TestClientRMTokens.class);
+ @Before
+ public void resetSecretManager() {
+ RMDelegationTokenIdentifier.Renewer.setSecretManager(null, null);
+ }
@Test
public void testDelegationToken() throws IOException, InterruptedException {
@@ -200,7 +211,122 @@ public class TestClientRMTokens {
RPC.stopProxy(clientRMWithDT);
}
}
+ }
+
+ @Test
+ public void testShortCircuitRenewCancel()
+ throws IOException, InterruptedException {
+ InetSocketAddress addr =
+ new InetSocketAddress(InetAddress.getLocalHost(), 123);
+ checkShortCircuitRenewCancel(addr, addr, true);
+ }
+
+ @Test
+ public void testShortCircuitRenewCancelWildcardAddress()
+ throws IOException, InterruptedException {
+ InetSocketAddress rmAddr = new InetSocketAddress(123);
+ checkShortCircuitRenewCancel(
+ rmAddr,
+ new InetSocketAddress(InetAddress.getLocalHost(), rmAddr.getPort()),
+ true);
+ }
+
+ @Test
+ public void testShortCircuitRenewCancelSameHostDifferentPort()
+ throws IOException, InterruptedException {
+ InetSocketAddress rmAddr =
+ new InetSocketAddress(InetAddress.getLocalHost(), 123);
+ checkShortCircuitRenewCancel(
+ rmAddr,
+ new InetSocketAddress(rmAddr.getAddress(), rmAddr.getPort()+1),
+ false);
+ }
+
+ @Test
+ public void testShortCircuitRenewCancelDifferentHostSamePort()
+ throws IOException, InterruptedException {
+ InetSocketAddress rmAddr =
+ new InetSocketAddress(InetAddress.getLocalHost(), 123);
+ checkShortCircuitRenewCancel(
+ rmAddr,
+ new InetSocketAddress("1.1.1.1", rmAddr.getPort()),
+ false);
+ }
+
+ @Test
+ public void testShortCircuitRenewCancelDifferentHostDifferentPort()
+ throws IOException, InterruptedException {
+ InetSocketAddress rmAddr =
+ new InetSocketAddress(InetAddress.getLocalHost(), 123);
+ checkShortCircuitRenewCancel(
+ rmAddr,
+ new InetSocketAddress("1.1.1.1", rmAddr.getPort()+1),
+ false);
+ }
+
+ @SuppressWarnings("unchecked")
+ private void checkShortCircuitRenewCancel(InetSocketAddress rmAddr,
+ InetSocketAddress serviceAddr,
+ boolean shouldShortCircuit
+ ) throws IOException, InterruptedException {
+ Configuration conf = new Configuration();
+ conf.setClass(YarnConfiguration.IPC_RPC_IMPL,
+ YarnBadRPC.class, YarnRPC.class);
+ RMDelegationTokenSecretManager secretManager =
+ mock(RMDelegationTokenSecretManager.class);
+ RMDelegationTokenIdentifier.Renewer.setSecretManager(secretManager, rmAddr);
+
+ RMDelegationTokenIdentifier ident = new RMDelegationTokenIdentifier(
+ new Text("owner"), new Text("renewer"), null);
+ Token token =
+ new Token(ident, secretManager);
+
+ SecurityUtil.setTokenService(token, serviceAddr);
+ if (shouldShortCircuit) {
+ token.renew(conf);
+ verify(secretManager).renewToken(eq(token), eq("renewer"));
+ reset(secretManager);
+ token.cancel(conf);
+ verify(secretManager).cancelToken(eq(token), eq("renewer"));
+ } else {
+ try {
+ token.renew(conf);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("getProxy", e.getMessage());
+ }
+ verify(secretManager, never()).renewToken(any(Token.class), anyString());
+ try {
+ token.cancel(conf);
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals("getProxy", e.getMessage());
+ }
+ verify(secretManager, never()).cancelToken(any(Token.class), anyString());
+ }
+ }
+
+ @SuppressWarnings("rawtypes")
+ public static class YarnBadRPC extends YarnRPC {
+ @Override
+ public Object getProxy(Class protocol, InetSocketAddress addr,
+ Configuration conf) {
+ throw new RuntimeException("getProxy");
+ }
+
+ @Override
+ public void stopProxy(Object proxy, Configuration conf) {
+ throw new RuntimeException("stopProxy");
+ }
+
+ @Override
+ public Server getServer(Class protocol, Object instance,
+ InetSocketAddress addr, Configuration conf,
+ SecretManager extends TokenIdentifier> secretManager,
+ int numHandlers, String portRangeConfig) {
+ throw new RuntimeException("getServer");
+ }
}
// Get the delegation token directly as it is a little difficult to setup
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 9ae8bf0290..6c14008626 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index d4f97380c3..babe79a2de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -152,7 +152,7 @@ public class TestRMRestart {
.getApplicationId());
// create unmanaged app
- RMApp appUnmanaged = rm1.submitApp(200, "", "", null, true);
+ RMApp appUnmanaged = rm1.submitApp(200, "someApp", "someUser", null, true, null);
ApplicationAttemptId unmanagedAttemptId =
appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
// assert appUnmanaged info is saved
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
index 1c4b6f9e0c..6ec3f5403b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
index a7a52c29ab..7d7f99d054 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 25a4b968fd..c907df389b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
@@ -54,7 +55,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
-import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 681e8d0169..bc806f60a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -42,23 +43,35 @@ public class TestSchedulerUtils {
// case negative memory
ask.setCapability(Resources.createResource(-1024));
+ Resource before = ask.getCapability();
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource);
+ Resource after = ask.getCapability();
assertEquals(minMemory, ask.getCapability().getMemory());
+ assertTrue(before == after);
// case zero memory
ask.setCapability(Resources.createResource(0));
+ before = ask.getCapability();
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource);
+ after = ask.getCapability();
assertEquals(minMemory, ask.getCapability().getMemory());
+ assertTrue(before == after);
// case memory is a multiple of minMemory
ask.setCapability(Resources.createResource(2 * minMemory));
+ before = ask.getCapability();
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource);
+ after = ask.getCapability();
assertEquals(2 * minMemory, ask.getCapability().getMemory());
+ assertTrue(before == after);
// case memory is not a multiple of minMemory
ask.setCapability(Resources.createResource(minMemory + 10));
+ before = ask.getCapability();
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource);
+ after = ask.getCapability();
assertEquals(2 * minMemory, ask.getCapability().getMemory());
+ assertTrue(before == after);
}
@@ -73,24 +86,33 @@ public class TestSchedulerUtils {
// case negative memory/vcores
ask.setCapability(Resources.createResource(-1024, -1));
+ Resource before = ask.getCapability();
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource);
+ Resource after = ask.getCapability();
assertEquals(minResource, ask.getCapability());
+ assertTrue(before == after);
// case zero memory/vcores
ask.setCapability(Resources.createResource(0, 0));
+ before = ask.getCapability();
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource);
+ after = ask.getCapability();
assertEquals(minResource, ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(1024, ask.getCapability().getMemory());
+ assertTrue(before == after);
// case non-zero memory & zero cores
ask.setCapability(Resources.createResource(1536, 0));
+ before = ask.getCapability();
SchedulerUtils.normalizeRequest(
ask, resourceCalculator, clusterResource, minResource);
+ after = ask.getCapability();
assertEquals(Resources.createResource(2048, 1), ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
assertEquals(2048, ask.getCapability().getMemory());
+ assertTrue(before == after);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 97310093cb..c4dbe876c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -33,13 +33,13 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.Application;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.Task;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index ad127a9264..c59625361c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -21,11 +21,17 @@ package org.apache.hadoop.yarn.server.resourcemanager.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collections;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -50,6 +56,8 @@ import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
/**
* unit test -
@@ -541,4 +549,54 @@ public class TestDelegationTokenRenewer {
fail("Renewal of cancelled token should have failed");
} catch (InvalidToken ite) {}
}
+
+ @Test(timeout=2000)
+ public void testConncurrentAddApplication()
+ throws IOException, InterruptedException, BrokenBarrierException {
+ final CyclicBarrier startBarrier = new CyclicBarrier(2);
+ final CyclicBarrier endBarrier = new CyclicBarrier(2);
+
+ // this token uses barriers to block during renew
+ final Credentials creds1 = new Credentials();
+ final Token> token1 = mock(Token.class);
+ creds1.addToken(new Text("token"), token1);
+ doReturn(true).when(token1).isManaged();
+ doAnswer(new Answer() {
+ public Long answer(InvocationOnMock invocation)
+ throws InterruptedException, BrokenBarrierException {
+ startBarrier.await();
+ endBarrier.await();
+ return Long.MAX_VALUE;
+ }}).when(token1).renew(any(Configuration.class));
+
+ // this dummy token fakes renewing
+ final Credentials creds2 = new Credentials();
+ final Token> token2 = mock(Token.class);
+ creds2.addToken(new Text("token"), token2);
+ doReturn(true).when(token2).isManaged();
+ doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class));
+
+ // fire up the renewer
+ final DelegationTokenRenewer dtr = new DelegationTokenRenewer();
+ dtr.init(conf);
+ dtr.start();
+
+ // submit a job that blocks during renewal
+ Thread submitThread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ dtr.addApplication(mock(ApplicationId.class), creds1, false);
+ } catch (IOException e) {}
+ }
+ };
+ submitThread.start();
+
+ // wait till 1st submit blocks, then submit another
+ startBarrier.await();
+ dtr.addApplication(mock(ApplicationId.class), creds2, false);
+ // signal 1st to complete
+ endBarrier.await();
+ submitThread.join();
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 00bdafbe8a..3fc0883189 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -27,10 +27,12 @@ import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
+import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -44,6 +46,7 @@ import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
+import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
@@ -355,10 +358,10 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
private void verifySubQueue(JSONObject info, String q,
float parentAbsCapacity, float parentAbsMaxCapacity)
throws JSONException, Exception {
- int numExpectedElements = 11;
+ int numExpectedElements = 12;
boolean isParentQueue = true;
if (!info.has("queues")) {
- numExpectedElements = 20;
+ numExpectedElements = 22;
isParentQueue = false;
}
assertEquals("incorrect number of elements", numExpectedElements, info.length());
@@ -397,6 +400,8 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
lqi.userLimit = info.getInt("userLimit");
lqi.userLimitFactor = (float) info.getDouble("userLimitFactor");
verifyLeafQueueGeneric(q, lqi);
+ // resourcesUsed and users (per-user resources used) are checked in
+ // testPerUserResource()
}
}
@@ -464,4 +469,143 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
assertEquals("userLimitFactor doesn't match",
csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
}
+
+ //Return a child Node of node with the tagname or null if none exists
+ private Node getChildNodeByName(Node node, String tagname) {
+ NodeList nodeList = node.getChildNodes();
+ for (int i=0; i < nodeList.getLength(); ++i) {
+ if (nodeList.item(i).getNodeName().equals(tagname)) {
+ return nodeList.item(i);
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Test per user resources and resourcesUsed elements in the web services XML
+ * @throws Exception
+ */
+ @Test
+ public void testPerUserResourcesXML() throws Exception {
+ //Start RM so that it accepts app submissions
+ rm.start();
+ try {
+ rm.submitApp(10, "app1", "user1", null, "b1");
+ rm.submitApp(20, "app2", "user2", null, "b1");
+
+ //Get the XML from ws/v1/cluster/scheduler
+ WebResource r = resource();
+ ClientResponse response = r.path("ws/v1/cluster/scheduler")
+ .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+ assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+ String xml = response.getEntity(String.class);
+ DocumentBuilder db = DocumentBuilderFactory.newInstance()
+ .newDocumentBuilder();
+ InputSource is = new InputSource();
+ is.setCharacterStream(new StringReader(xml));
+ //Parse the XML we got
+ Document dom = db.parse(is);
+
+ //Get all users elements (1 for each leaf queue)
+ NodeList allUsers = dom.getElementsByTagName("users");
+ for (int i=0; i", res.toString());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
index f55f925bed..d48312a936 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
@@ -378,6 +378,8 @@ ResourceManager REST API's.
*---------------+--------------+-------------------------------+
| queues | array of queues(JSON)/zero or more queue objects(XML) | A collection of sub-queue information|
*---------------+--------------+-------------------------------+
+| resourcesUsed | A single resource object | The total amount of resources used by this queue |
+*---------------+--------------+-------------------------------+
** Elements of the queues object for a Leaf queue - contains all elements in parent plus the following:
@@ -404,6 +406,32 @@ ResourceManager REST API's.
*---------------+--------------+-------------------------------+
| userLimitFactor | float | The user limit factor set in the configuration |
*---------------+--------------+-------------------------------+
+| users | array of users(JSON)/zero or more user objects(XML) | A collection of user objects containing resources used |
+*---------------+--------------+-------------------------------+
+
+** Elements of the user object for users:
+
+*---------------+--------------+-------------------------------+
+|| Item || Data Type || Description |
+*---------------+--------------+-------------------------------+
+| username | String | The username of the user using the resources |
+*---------------+--------------+-------------------------------+
+| resourcesUsed | A single resource object | The amount of resources used by the user in this queue |
+*---------------+--------------+-------------------------------+
+| numActiveApplications | int | The number of active applications for this user in this queue |
+*---------------+--------------+-------------------------------+
+| numPendingApplications | int | The number of pending applications for this user in this queue |
+*---------------+--------------+-------------------------------+
+
+** Elements of the resource object for resourcesUsed in user and queues:
+
+*---------------+--------------+-------------------------------+
+|| Item || Data Type || Description |
+*---------------+--------------+-------------------------------+
+| memory | int | The amount of memory used (in MB) |
+*---------------+--------------+-------------------------------+
+| vCores | int | The number of virtual cores |
+*---------------+--------------+-------------------------------+
*** Response Examples
@@ -428,199 +456,262 @@ ResourceManager REST API's.
+---+
{
- "scheduler" : {
- "schedulerInfo" : {
- "queueName" : "root",
- "maxCapacity" : 100,
- "type" : "capacityScheduler",
- "queues" : {
- "queue" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "default",
- "userLimitFactor" : 1,
- "maxApplications" : 1,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 90,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 90,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 70,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 70,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 1
- },
- {
- "queueName" : "test",
- "absoluteCapacity" : 20,
- "usedCapacity" : 0,
- "capacity" : 20,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "queues" : {
- "queue" : [
- {
- "queueName" : "a1",
- "absoluteCapacity" : 12,
- "usedCapacity" : 0,
- "capacity" : 60.000004,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "queues" : {
- "queue" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "a11",
- "userLimitFactor" : 1,
- "maxApplications" : 0,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 10.200001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 85,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 0
- },
- {
- "numPendingApplications" : 0,
- "queueName" : "a12",
- "userLimitFactor" : 1,
- "maxApplications" : 0,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 1.8000001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 15.000001,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 0
- }
- ]
- },
- "absoluteUsedCapacity" : 0
- },
- {
- "numPendingApplications" : 0,
- "queueName" : "a2",
- "userLimitFactor" : 1,
- "maxApplications" : 0,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 100,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 8.000001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 40,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 0
- }
- ]
- },
- "absoluteUsedCapacity" : 0
- },
- {
- "queueName" : "test2",
- "absoluteCapacity" : 10,
- "usedCapacity" : 0,
- "capacity" : 10,
- "state" : "RUNNING",
- "maxCapacity" : 15.000001,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 15.000001,
- "queues" : {
- "queue" : [
- {
- "numPendingApplications" : 0,
- "queueName" : "a3",
- "userLimitFactor" : 1,
- "maxApplications" : 0,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 15.000001,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 9,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 90,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 0
- },
- {
- "numPendingApplications" : 0,
- "queueName" : "a4",
- "userLimitFactor" : 1,
- "maxApplications" : 0,
- "usedCapacity" : 0,
- "numContainers" : 0,
- "state" : "RUNNING",
- "maxCapacity" : 100,
- "numApplications" : 0,
- "usedResources" : "memory: 0",
- "absoluteMaxCapacity" : 15.000001,
- "maxActiveApplications" : 1,
- "numActiveApplications" : 0,
- "absoluteUsedCapacity" : 0,
- "userLimit" : 100,
- "absoluteCapacity" : 1.0000001,
- "maxActiveApplicationsPerUser" : 1,
- "capacity" : 10,
- "type" : "capacitySchedulerLeafQueueInfo",
- "maxApplicationsPerUser" : 0
- }
- ]
- },
- "absoluteUsedCapacity" : 0
- }
- ]
- },
- "usedCapacity" : 0,
- "capacity" : 100
- }
- }
+ "scheduler": {
+ "schedulerInfo": {
+ "capacity": 100.0,
+ "maxCapacity": 100.0,
+ "queueName": "root",
+ "queues": {
+ "queue": [
+ {
+ "absoluteCapacity": 10.5,
+ "absoluteMaxCapacity": 50.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 10.5,
+ "maxCapacity": 50.0,
+ "numApplications": 0,
+ "queueName": "a",
+ "queues": {
+ "queue": [
+ {
+ "absoluteCapacity": 3.15,
+ "absoluteMaxCapacity": 25.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 30.000002,
+ "maxCapacity": 50.0,
+ "numApplications": 0,
+ "queueName": "a1",
+ "queues": {
+ "queue": [
+ {
+ "absoluteCapacity": 2.6775,
+ "absoluteMaxCapacity": 25.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 85.0,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 1,
+ "maxApplications": 267,
+ "maxApplicationsPerUser": 267,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 0,
+ "numApplications": 0,
+ "numContainers": 0,
+ "numPendingApplications": 0,
+ "queueName": "a1a",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 1.0,
+ "users": null
+ },
+ {
+ "absoluteCapacity": 0.47250003,
+ "absoluteMaxCapacity": 25.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 15.000001,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 1,
+ "maxApplications": 47,
+ "maxApplicationsPerUser": 47,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 0,
+ "numApplications": 0,
+ "numContainers": 0,
+ "numPendingApplications": 0,
+ "queueName": "a1b",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 1.0,
+ "users": null
+ }
+ ]
+ },
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "usedCapacity": 0.0,
+ "usedResources": ""
+ },
+ {
+ "absoluteCapacity": 7.35,
+ "absoluteMaxCapacity": 50.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 70.0,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 100,
+ "maxApplications": 735,
+ "maxApplicationsPerUser": 73500,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 0,
+ "numApplications": 0,
+ "numContainers": 0,
+ "numPendingApplications": 0,
+ "queueName": "a2",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 100.0,
+ "users": null
+ }
+ ]
+ },
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "usedCapacity": 0.0,
+ "usedResources": ""
+ },
+ {
+ "absoluteCapacity": 89.5,
+ "absoluteMaxCapacity": 100.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 89.5,
+ "maxCapacity": 100.0,
+ "numApplications": 2,
+ "queueName": "b",
+ "queues": {
+ "queue": [
+ {
+ "absoluteCapacity": 53.7,
+ "absoluteMaxCapacity": 100.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 60.000004,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 100,
+ "maxApplications": 5370,
+ "maxApplicationsPerUser": 537000,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 1,
+ "numApplications": 2,
+ "numContainers": 0,
+ "numPendingApplications": 1,
+ "queueName": "b1",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 100.0,
+ "users": {
+ "user": [
+ {
+ "numActiveApplications": 0,
+ "numPendingApplications": 1,
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "username": "user2"
+ },
+ {
+ "numActiveApplications": 1,
+ "numPendingApplications": 0,
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "username": "user1"
+ }
+ ]
+ }
+ },
+ {
+ "absoluteCapacity": 35.3525,
+ "absoluteMaxCapacity": 100.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 39.5,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 100,
+ "maxApplications": 3535,
+ "maxApplicationsPerUser": 353500,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 0,
+ "numApplications": 0,
+ "numContainers": 0,
+ "numPendingApplications": 0,
+ "queueName": "b2",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 100.0,
+ "users": null
+ },
+ {
+ "absoluteCapacity": 0.4475,
+ "absoluteMaxCapacity": 100.0,
+ "absoluteUsedCapacity": 0.0,
+ "capacity": 0.5,
+ "maxActiveApplications": 1,
+ "maxActiveApplicationsPerUser": 100,
+ "maxApplications": 44,
+ "maxApplicationsPerUser": 4400,
+ "maxCapacity": 100.0,
+ "numActiveApplications": 0,
+ "numApplications": 0,
+ "numContainers": 0,
+ "numPendingApplications": 0,
+ "queueName": "b3",
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "type": "capacitySchedulerLeafQueueInfo",
+ "usedCapacity": 0.0,
+ "usedResources": "",
+ "userLimit": 100,
+ "userLimitFactor": 100.0,
+ "users": null
+ }
+ ]
+ },
+ "resourcesUsed": {
+ "memory": 0,
+ "vCores": 0
+ },
+ "state": "RUNNING",
+ "usedCapacity": 0.0,
+ "usedResources": ""
+ }
+ ]
+ },
+ "type": "capacityScheduler",
+ "usedCapacity": 0.0
+ }
+ }
}
+---+
@@ -653,48 +744,27 @@ ResourceManager REST API's.
100.0
root
-
- 70.0
- 0.0
- 90.0
- 70.0
- 90.0
- 0.0
- 0
- memory: 0
- default
- RUNNING
- 0
- 0
- 0
- 1
- 1
- 1
- 1
- 100
- 1.0
-
- 20.0
+ 10.5
0.0
- 100.0
- 20.0
- 100.0
+ 50.0
+ 10.5
+ 50.0
0.0
0
- memory: 0
- test
+ <memory:0, vCores:0>
+ a
RUNNING
- 60.000004
+ 30.000002
0.0
- 100.0
- 12.0
- 100.0
+ 50.0
+ 3.15
+ 25.0
0.0
0
- memory: 0
+ <memory:0, vCores:0>
a1
RUNNING
@@ -702,124 +772,206 @@ ResourceManager REST API's.
85.0
0.0
100.0
- 10.200001
- 100.0
+ 2.6775
+ 25.0
0.0
0
- memory: 0
- a11
+ <memory:0, vCores:0>
+ a1a
RUNNING
+
+ 0
+ 0
+
0
0
0
- 0
- 0
+ 267
+ 267
1
1
100
+
1.0
15.000001
0.0
100.0
- 1.8000001
- 100.0
+ 0.47250003
+ 25.0
0.0
0
- memory: 0
- a12
+ <memory:0, vCores:0>
+ a1b
RUNNING
+
+ 0
+ 0
+
0
0
0
- 0
- 0
+ 47
+ 47
1
1
100
+
1.0
+
+ 0
+ 0
+
- 40.0
+ 70.0
0.0
100.0
- 8.000001
- 100.0
+ 7.35
+ 50.0
0.0
0
- memory: 0
+ <memory:0, vCores:0>
a2
RUNNING
+
+ 0
+ 0
+
0
0
0
- 0
- 0
+ 735
+ 73500
1
- 1
+ 100
100
- 1.0
+
+ 100.0
+
+ 0
+ 0
+
- 10.0
+ 89.5
0.0
- 15.000001
- 10.0
- 15.000001
+ 100.0
+ 89.5
+ 100.0
0.0
- 0
- memory: 0
- test2
+ 2
+ <memory:0, vCores:0>
+ b
RUNNING
- 90.0
+ 60.000004
0.0
100.0
- 9.0
- 15.000001
+ 53.7
+ 100.0
0.0
- 0
- memory: 0
- a3
+ 2
+ <memory:0, vCores:0>
+ b1
RUNNING
- 0
- 0
+
+ 0
+ 0
+
+ 1
+ 1
0
- 0
- 0
+ 5370
+ 537000
1
- 1
+ 100
100
- 1.0
+
+
+ user2
+
+ 0
+ 0
+
+ 1
+ 0
+
+
+ user1
+
+ 0
+ 0
+
+ 0
+ 1
+
+
+ 100.0
- 10.0
+ 39.5
0.0
100.0
- 1.0000001
- 15.000001
+ 35.3525
+ 100.0
0.0
0
- memory: 0
- a4
+ <memory:0, vCores:0>
+ b2
RUNNING
+
+ 0
+ 0
+
0
0
0
- 0
- 0
+ 3535
+ 353500
1
- 1
+ 100
100
- 1.0
+
+ 100.0
+
+
+ 0.5
+ 0.0
+ 100.0
+ 0.4475
+ 100.0
+ 0.0
+ 0
+ <memory:0, vCores:0>
+ b3
+ RUNNING
+
+ 0
+ 0
+
+ 0
+ 0
+ 0
+ 44
+ 4400
+ 1
+ 100
+ 100
+
+ 100.0
+
+ 0
+ 0
+
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index a0eed4838c..4366b387e1 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -33,7 +33,6 @@
true
600000
once
- ${basedir}
yarn
true
@@ -200,16 +199,6 @@
- org.codehaus.mojo
- findbugs-maven-plugin
-
- true
- true
- ${mr.basedir}/dev-support/findbugs-exclude.xml
- Max
-
-
-
org.apache.rat
apache-rat-plugin