diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index d6f2c2510d..45546f2aec 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -2208,7 +2208,7 @@ public void testDelegationTokensUpdatedInUGI() throws Exception { "hadoop.kms.authentication.delegation-token.renew-interval.sec", "5"); writeConf(confDir, conf); - // Running as a service (e.g. Yarn in practice). + // Running as a service (e.g. YARN in practice). runServer(null, null, confDir, new KMSCallable() { @Override public Void call() throws Exception { @@ -2223,7 +2223,7 @@ public Void call() throws Exception { final InetSocketAddress kmsAddr = new InetSocketAddress(getKMSUrl().getHost(), getKMSUrl().getPort()); - // Job 1 (e.g. Yarn log aggregation job), with user DT. + // Job 1 (e.g. YARN log aggregation job), with user DT. final Collection> job1Token = new HashSet<>(); doAs("client", new PrivilegedExceptionAction() { @Override @@ -2268,7 +2268,7 @@ public Void run() throws Exception { }); Assert.assertFalse(job1Token.isEmpty()); - // job 2 (e.g. Another Yarn log aggregation job, with user DT. + // job 2 (e.g. Another YARN log aggregation job, with user DT. doAs("client", new PrivilegedExceptionAction() { @Override public Void run() throws Exception { diff --git a/hadoop-tools/hadoop-archive-logs/src/site/markdown/HadoopArchiveLogs.md b/hadoop-tools/hadoop-archive-logs/src/site/markdown/HadoopArchiveLogs.md index ce9cebaea2..3dc1349e90 100644 --- a/hadoop-tools/hadoop-archive-logs/src/site/markdown/HadoopArchiveLogs.md +++ b/hadoop-tools/hadoop-archive-logs/src/site/markdown/HadoopArchiveLogs.md @@ -21,7 +21,7 @@ Hadoop Archive Logs Guide Overview -------- -For clusters with a lot of Yarn aggregated logs, it can be helpful to combine +For clusters with a lot of YARN aggregated logs, it can be helpful to combine them into hadoop archives in order to reduce the number of small files, and hence the stress on the NameNode. This tool provides an easy way to do this. Aggregated logs in hadoop archives can still be read by the Job History Server @@ -50,7 +50,7 @@ How to Archive Logs to be eligible (default: 20) -noProxy When specified, all processing will be done as the user running this command (or - the Yarn user if DefaultContainerExecutor + the YARN user if DefaultContainerExecutor is in use). When not specified, all processing will be done as the user who owns that application; if the user @@ -86,7 +86,7 @@ The tool works by performing the following procedure: its aggregated log files with the resulting archive. The ``-noProxy`` option makes the tool process everything as the user who is -currently running it, or the Yarn user if DefaultContainerExecutor is in use. +currently running it, or the YARN user if DefaultContainerExecutor is in use. When not specified, all processing will be done by the user who owns that application; if the user running this command is not allowed to impersonate that user, it will fail. This is useful if you want an admin user to handle all diff --git a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md index d1848e8975..d3f91f4af1 100644 --- a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md +++ b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md @@ -12,10 +12,10 @@ limitations under the License. See accompanying LICENSE file. --> -Yarn Scheduler Load Simulator (SLS) +YARN Scheduler Load Simulator (SLS) =================================== -* [Yarn Scheduler Load Simulator (SLS)](#Yarn_Scheduler_Load_Simulator_SLS) +* [YARN Scheduler Load Simulator (SLS)](#Yarn_Scheduler_Load_Simulator_SLS) * [Overview](#Overview) * [Overview](#Overview) * [Goals](#Goals) @@ -39,11 +39,11 @@ Overview ### Overview -The Yarn scheduler is a fertile area of interest with different implementations, e.g., Fifo, Capacity and Fair schedulers. Meanwhile, several optimizations are also made to improve scheduler performance for different scenarios and workload. Each scheduler algorithm has its own set of features, and drives scheduling decisions by many factors, such as fairness, capacity guarantee, resource availability, etc. It is very important to evaluate a scheduler algorithm very well before we deploy in a production cluster. Unfortunately, currently it is non-trivial to evaluate a scheduler algorithm. Evaluating in a real cluster is always time and cost consuming, and it is also very hard to find a large-enough cluster. Hence, a simulator which can predict how well a scheduler algorithm for some specific workload would be quite useful. +The YARN scheduler is a fertile area of interest with different implementations, e.g., Fifo, Capacity and Fair schedulers. Meanwhile, several optimizations are also made to improve scheduler performance for different scenarios and workload. Each scheduler algorithm has its own set of features, and drives scheduling decisions by many factors, such as fairness, capacity guarantee, resource availability, etc. It is very important to evaluate a scheduler algorithm very well before we deploy in a production cluster. Unfortunately, currently it is non-trivial to evaluate a scheduler algorithm. Evaluating in a real cluster is always time and cost consuming, and it is also very hard to find a large-enough cluster. Hence, a simulator which can predict how well a scheduler algorithm for some specific workload would be quite useful. -The Yarn Scheduler Load Simulator (SLS) is such a tool, which can simulate large-scale Yarn clusters and application loads in a single machine.This simulator would be invaluable in furthering Yarn by providing a tool for researchers and developers to prototype new scheduler features and predict their behavior and performance with reasonable amount of confidence, thereby aiding rapid innovation. +The YARN Scheduler Load Simulator (SLS) is such a tool, which can simulate large-scale YARN clusters and application loads in a single machine.This simulator would be invaluable in furthering YARN by providing a tool for researchers and developers to prototype new scheduler features and predict their behavior and performance with reasonable amount of confidence, thereby aiding rapid innovation. o -The simulator will exercise the real Yarn `ResourceManager` removing the network factor by simulating `NodeManagers` and `ApplicationMasters` via handling and dispatching `NM`/`AMs` heartbeat events from within the same JVM. To keep tracking of scheduler behavior and performance, a scheduler wrapper will wrap the real scheduler. +The simulator will exercise the real YARN `ResourceManager` removing the network factor by simulating `NodeManagers` and `ApplicationMasters` via handling and dispatching `NM`/`AMs` heartbeat events from within the same JVM. To keep tracking of scheduler behavior and performance, a scheduler wrapper will wrap the real scheduler. The size of the cluster and the application load can be loaded from configuration files, which are generated from job history files directly by adopting [Apache Rumen](../hadoop-rumen/Rumen.html). @@ -74,7 +74,7 @@ The following figure illustrates the implementation architecture of the simulato ![The architecture of the simulator](images/sls_arch.png) -The simulator takes input of workload traces, or synthetic load distributions and generaters the cluster and applications information. For each NM and AM, the simulator builds a simulator to simulate their running. All NM/AM simulators run in a thread pool. The simulator reuses Yarn Resource Manager, and builds a wrapper out of the scheduler. The Scheduler Wrapper can track the scheduler behaviors and generates several logs, which are the outputs of the simulator and can be further analyzed. +The simulator takes input of workload traces, or synthetic load distributions and generaters the cluster and applications information. For each NM and AM, the simulator builds a simulator to simulate their running. All NM/AM simulators run in a thread pool. The simulator reuses YARN Resource Manager, and builds a wrapper out of the scheduler. The Scheduler Wrapper can track the scheduler behaviors and generates several logs, which are the outputs of the simulator and can be further analyzed. ### Usecases @@ -110,9 +110,9 @@ The following sections will describe how to use the simulator step by step. Befo ### Step 1: Configure Hadoop and the simulator -Before we start, make sure Hadoop and the simulator are configured well. All configuration files for Hadoop and the simulator should be placed in directory `$HADOOP_ROOT/etc/hadoop`, where the `ResourceManager` and Yarn scheduler load their configurations. Directory `$HADOOP_ROOT/share/hadoop/tools/sls/sample-conf/` provides several example configurations, that can be used to start a demo. +Before we start, make sure Hadoop and the simulator are configured well. All configuration files for Hadoop and the simulator should be placed in directory `$HADOOP_ROOT/etc/hadoop`, where the `ResourceManager` and YARN scheduler load their configurations. Directory `$HADOOP_ROOT/share/hadoop/tools/sls/sample-conf/` provides several example configurations, that can be used to start a demo. -For configuration of Hadoop and Yarn scheduler, users can refer to Yarn’s website (). +For configuration of Hadoop and YARN scheduler, users can refer to Yarn’s website (). For the simulator, it loads configuration information from file `$HADOOP_ROOT/etc/hadoop/sls-runner.xml`. @@ -244,7 +244,7 @@ The simulator supports two types of input files: the rumen traces and its own in Metrics ------- -The Yarn Scheduler Load Simulator has integrated [Metrics](http://metrics.codahale.com/) to measure the behaviors of critical components and operations, including running applications and containers, cluster available resources, scheduler operation timecost, et al. If the switch `yarn.sls.runner.metrics.switch` is set `ON`, `Metrics` will run and output it logs in `--output-dir` directory specified by users. Users can track these information during simulator running, and can also analyze these logs after running to evaluate the scheduler performance. +The YARN Scheduler Load Simulator has integrated [Metrics](http://metrics.codahale.com/) to measure the behaviors of critical components and operations, including running applications and containers, cluster available resources, scheduler operation timecost, et al. If the switch `yarn.sls.runner.metrics.switch` is set `ON`, `Metrics` will run and output it logs in `--output-dir` directory specified by users. Users can track these information during simulator running, and can also analyze these logs after running to evaluate the scheduler performance. ### Real-time Tracking @@ -320,7 +320,7 @@ Appendix ### Resources -[YARN-1021](https://issues.apache.org/jira/browse/YARN-1021) is the main JIRA that introduces Yarn Scheduler Load Simulator to Hadoop Yarn project. +[YARN-1021](https://issues.apache.org/jira/browse/YARN-1021) is the main JIRA that introduces YARN Scheduler Load Simulator to Hadoop YARN project. [YARN-6363](https://issues.apache.org/jira/browse/YARN-6363) is the main JIRA that introduces the Synthetic Load Generator to SLS. ### SLS JSON input file format diff --git a/hadoop-yarn-project/hadoop-yarn/README b/hadoop-yarn-project/hadoop-yarn/README index 4e6aaa523e..13835fee22 100644 --- a/hadoop-yarn-project/hadoop-yarn/README +++ b/hadoop-yarn-project/hadoop-yarn/README @@ -55,7 +55,7 @@ Modules ------- YARN consists of multiple modules. The modules are listed below as per the directory structure: -hadoop-yarn-api - Yarn's cross platform external interface +hadoop-yarn-api - YARN's cross platform external interface hadoop-yarn-common - Utilities which can be used by yarn clients and server diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java index 9e9ec3cd2c..962bbba407 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java @@ -367,7 +367,7 @@ public static ApplicationReport newInstance(ApplicationId applicationId, * Get the AMRM token of the application. *

* The AMRM token is required for AM to RM scheduling operations. For - * managed Application Masters Yarn takes care of injecting it. For unmanaged + * managed Application Masters YARN takes care of injecting it. For unmanaged * Applications Masters, the token must be obtained via this method and set * in the {@link org.apache.hadoop.security.UserGroupInformation} of the * current user. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index a6bbca7c56..38db60cd9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -53,7 +53,7 @@ *

  • * maxAppAttempts. The maximum number of application attempts. * It should be no larger than the global number of max attempts in the - * Yarn configuration. + * YARN configuration. *
  • *
  • * attemptFailuresValidityInterval. The default value is -1. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRuntimeException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRuntimeException.java index 6f8c6ed490..e28dcec2fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRuntimeException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRuntimeException.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; -/** Base Yarn Exception. +/** Base YARN Exception. * * NOTE: All derivatives of this exception, which may be thrown by a remote * service, must include a String only constructor for the exception to be diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java index 0c349cc429..c2c262069f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java @@ -35,7 +35,7 @@ * NMTokenCache manages NMTokens required for an Application Master * communicating with individual NodeManagers. *

    - * By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use + * By default YARN client libraries {@link AMRMClient} and {@link NMClient} use * {@link #getSingleton()} instance of the cache. *

      *
    • diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java index 60e7813e42..26c99e31aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java @@ -229,7 +229,7 @@ public abstract ApplicationReport getApplicationReport(ApplicationId appId) * Get the AMRM token of the application. *

      * The AMRM token is required for AM to RM scheduling operations. For - * managed Application Masters Yarn takes care of injecting it. For unmanaged + * managed Application Masters YARN takes care of injecting it. For unmanaged * Applications Masters, the token must be obtained via this method and set * in the {@link org.apache.hadoop.security.UserGroupInformation} of the * current user. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java index 542fb22880..7b4b77408d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnUncaughtExceptionHandler.java @@ -31,7 +31,7 @@ * This class is intended to be installed by calling * {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)} * In the main entry point. It is intended to try and cleanly shut down - * programs using the Yarn Event framework. + * programs using the YARN Event framework. * * Note: Right now it only will shut down the program if a Error is caught, but * not any other exception. Anything else is just logged. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java index 9235e7dcaa..27191bed84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java @@ -38,7 +38,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; /** - * Yarn internal application-related utilities + * YARN internal application-related utilities */ @Private public class Apps { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/TrackingUriPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/TrackingUriPlugin.java index c3def07fd7..d29e52fee8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/TrackingUriPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/TrackingUriPlugin.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; /** - * Plugin to derive a tracking URL from a Yarn Application ID + * Plugin to derive a tracking URL from a YARN Application ID * */ @InterfaceAudience.LimitedPrivate({"MapReduce"}) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java index 9daee330e9..e515321aa4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java @@ -38,8 +38,8 @@ protected YarnVersionInfo() { super("yarn"); } /** - * Get the Yarn version. - * @return the Yarn version string, eg. "0.6.3-dev" + * Get the YARN version. + * @return the YARN version string, eg. "0.6.3-dev" */ public static String getVersion() { return YARN_VERSION_INFO._getVersion(); @@ -62,7 +62,7 @@ public static String getBranch() { } /** - * The date that Yarn was compiled. + * The date that YARN was compiled. * @return the compilation date in unix date format */ public static String getDate() { @@ -78,14 +78,14 @@ public static String getUser() { } /** - * Get the subversion URL for the root Yarn directory. + * Get the subversion URL for the root YARN directory. */ public static String getUrl() { return YARN_VERSION_INFO._getUrl(); } /** - * Get the checksum of the source files from which Yarn was + * Get the checksum of the source files from which YARN was * built. **/ public static String getSrcChecksum() { @@ -102,7 +102,7 @@ public static String getBuildVersion(){ public static void main(String[] args) { LOG.debug("version: "+ getVersion()); - System.out.println("Yarn " + getVersion()); + System.out.println("YARN " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); System.out.println("From source with checksum " + getSrcChecksum()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java index fe2a0a89f2..4bca195cdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java @@ -21,7 +21,7 @@ *

        *
      • * The {@link org.apache.hadoop.registry.server.services.RegistryAdminService} - * extends the shared Yarn Registry client with registry setup and + * extends the shared YARN Registry client with registry setup and * (potentially asynchronous) administrative actions. *
      • *
      • diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java index 97e484846b..7716a6f717 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java @@ -80,7 +80,7 @@ public static FederationPolicyManager instantiatePolicyManager(String newType) * and configuration as fallback. * * @param queue the queue of the application - * @param conf the Yarn configuration + * @param conf the YARN configuration * @param federationFacade state store facade * @return SubClusterPolicyConfiguration recreated */ @@ -139,7 +139,7 @@ public static SubClusterPolicyConfiguration loadPolicyConfiguration( * * @param queue the queue of the application * @param oldPolicy the previous policy instance (can be null) - * @param conf the Yarn configuration + * @param conf the YARN configuration * @param federationFacade state store facade * @param homeSubClusterId home sub-cluster id * @return FederationAMRMProxyPolicy recreated diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java index 769296b74b..628c7819dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java @@ -187,7 +187,7 @@ public class MockResourceManagerFacade implements ApplicationClientProtocol, final private AtomicInteger applicationCounter = new AtomicInteger(0); // True if the Mock RM is running, false otherwise. - // This property allows us to write tests for specific scenario as Yarn RM + // This property allows us to write tests for specific scenario as YARN RM // down e.g. network issue, failover. private boolean isRunning; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index d12892e325..38eb636f74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -1716,7 +1716,7 @@ public RestartContainerResponse restartContainer(ContainerId containerId) * @param containerId Container Id. * @param autoCommit Auto Commit flag. * @param reInitLaunchContext Target Launch Context. - * @throws YarnException Yarn Exception. + * @throws YarnException YARN Exception. */ public void reInitializeContainer(ContainerId containerId, ContainerLaunchContext reInitLaunchContext, boolean autoCommit) @@ -1743,7 +1743,7 @@ public void reInitializeContainer(ContainerId containerId, * Rollback the last reInitialization, if possible. * @param containerId Container ID. * @return Rollback Response. - * @throws YarnException Yarn Exception. + * @throws YarnException YARN Exception. */ @Override public RollbackResponse rollbackLastReInitialization(ContainerId containerId) @@ -1764,7 +1764,7 @@ public RollbackResponse rollbackLastReInitialization(ContainerId containerId) * Commit last reInitialization after which no rollback will be possible. * @param containerId Container ID. * @return Commit Response. - * @throws YarnException Yarn Exception. + * @throws YarnException YARN Exception. */ @Override public CommitResponse commitLastReInitialization(ContainerId containerId) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index 1c6385dd04..619a65bcef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -370,7 +370,7 @@ public void initializeCGroupController(CGroupController controller) throws } // We are working with a pre-mounted contoller - // Make sure that Yarn cgroup hierarchy path exists + // Make sure that YARN cgroup hierarchy path exists initializePreMountedCGroupController(controller); } @@ -378,9 +378,9 @@ public void initializeCGroupController(CGroupController controller) throws * This function is called when the administrator opted * to use a pre-mounted cgroup controller. * There are two options. - * 1. Yarn hierarchy already exists. We verify, whether we have write access + * 1. YARN hierarchy already exists. We verify, whether we have write access * in this case. - * 2. Yarn hierarchy does not exist, yet. We create it in this case. + * 2. YARN hierarchy does not exist, yet. We create it in this case. * @param controller the controller being initialized * @throws ResourceHandlerException yarn hierarchy cannot be created or * accessed for any reason diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java index 996fff0de5..0d7c097896 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java @@ -118,7 +118,7 @@ private YarnConfiguration createMountConfiguration() { /** * Create configuration where the cgroups are premounted. - * @param myHierarchy Yarn cgroup + * @param myHierarchy YARN cgroup * @return configuration object */ private Configuration createNoMountConfiguration(String myHierarchy) { @@ -396,7 +396,7 @@ private void testPreMountedControllerInitialization(String myHierarchy) File mtab = createPremountedCgroups(parentDir, false); File mountPoint = new File(parentDir, "cpu"); - // Initialize Yarn classes + // Initialize YARN classes Configuration confNoMount = createNoMountConfiguration(myHierarchy); CGroupsHandlerImpl cGroupsHandler = new CGroupsHandlerImpl(confNoMount, privilegedOperationExecutorMock, mtab.getAbsolutePath()); @@ -555,7 +555,7 @@ public void testRemount() assertTrue("Could not create dirs", new File(newMountPoint, "cpu").mkdirs()); - // Initialize Yarn classes + // Initialize YARN classes Configuration confMount = createMountConfiguration(); confMount.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, parentDir.getAbsolutePath() + Path.SEPARATOR + newMountPointDir); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 90fa3e4ebc..05dbf1e51a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -129,7 +129,7 @@ public String getHttpAddress() { * Typically this is the 'hostname' reported by the node, but it could be * configured to be 'hostname:port' reported by the node via the * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant. - * The main usecase of this is Yarn minicluster to be able to differentiate + * The main usecase of this is YARN minicluster to be able to differentiate * node manager instances by their port number. * @return Name of the node for scheduling matching decisions. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java index 8874ed822d..a28d46e3fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/test/YarnTestDriver.java @@ -22,7 +22,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.TestZKRMStateStorePerf; /** - * Driver for Yarn tests. + * Driver for YARN tests. * */ public class YarnTestDriver { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index b8f8a9fd84..07eaf97fd7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -212,7 +212,7 @@ private SubClusterId getRandomActiveSubCluster( } /** - * Yarn Router forwards every getNewApplication requests to any RM. During + * YARN Router forwards every getNewApplication requests to any RM. During * this operation there will be no communication with the State Store. The * Router will forward the requests to any SubCluster. The Router will retry * to submit the request on #numSubmitRetries different SubClusters. The @@ -431,7 +431,7 @@ public SubmitApplicationResponse submitApplication( } /** - * The Yarn Router will forward to the respective Yarn RM in which the AM is + * The YARN Router will forward to the respective YARN RM in which the AM is * running. * * Possible failures and behaviors: @@ -496,7 +496,7 @@ public KillApplicationResponse forceKillApplication( } /** - * The Yarn Router will forward to the respective Yarn RM in which the AM is + * The YARN Router will forward to the respective YARN RM in which the AM is * running. * * Possible failure: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java index bfd35c5f76..5adcc62604 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java @@ -228,7 +228,7 @@ protected DefaultRequestInterceptorREST getOrCreateInterceptorForSubCluster( } /** - * Yarn Router forwards every getNewApplication requests to any RM. During + * YARN Router forwards every getNewApplication requests to any RM. During * this operation there will be no communication with the State Store. The * Router will forward the requests to any SubCluster. The Router will retry * to submit the request on #numSubmitRetries different SubClusters. The @@ -497,7 +497,7 @@ public Response submitApplication(ApplicationSubmissionContextInfo newApp, } /** - * The Yarn Router will forward to the respective Yarn RM in which the AM is + * The YARN Router will forward to the respective YARN RM in which the AM is * running. *

        * Possible failure: @@ -552,7 +552,7 @@ public AppInfo getApp(HttpServletRequest hsr, String appId, } /** - * The Yarn Router will forward to the respective Yarn RM in which the AM is + * The YARN Router will forward to the respective YARN RM in which the AM is * running. *

        * Possible failures and behaviors: @@ -606,7 +606,7 @@ public Response updateAppState(AppState targetState, HttpServletRequest hsr, } /** - * The Yarn Router will forward the request to all the Yarn RMs in parallel, + * The YARN Router will forward the request to all the YARN RMs in parallel, * after that it will group all the ApplicationReports by the ApplicationId. *

        * Possible failure: @@ -615,8 +615,8 @@ public Response updateAppState(AppState targetState, HttpServletRequest hsr, *

        * Router: the Client will timeout and resubmit the request. *

        - * ResourceManager: the Router calls each Yarn RM in parallel by using one - * thread for each Yarn RM. In case a Yarn RM fails, a single call will + * ResourceManager: the Router calls each YARN RM in parallel by using one + * thread for each YARN RM. In case a YARN RM fails, a single call will * timeout. However the Router will merge the ApplicationReports it got, and * provides a partial list to the client. *

        @@ -692,14 +692,14 @@ public AppsInfo call() { return null; } - // Merge all the application reports got from all the available Yarn RMs + // Merge all the application reports got from all the available YARN RMs return RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), returnPartialReport); } /** - * The Yarn Router will forward to the request to all the SubClusters to find + * The YARN Router will forward to the request to all the SubClusters to find * where the node is running. *

        * Possible failure: @@ -779,7 +779,7 @@ public NodeInfo call() { } /** - * The Yarn Router will forward the request to all the Yarn RMs in parallel, + * The YARN Router will forward the request to all the YARN RMs in parallel, * after that it will remove all the duplicated NodeInfo by using the NodeId. *

        * Possible failure: @@ -788,8 +788,8 @@ public NodeInfo call() { *

        * Router: the Client will timeout and resubmit the request. *

        - * ResourceManager: the Router calls each Yarn RM in parallel by using one - * thread for each Yarn RM. In case a Yarn RM fails, a single call will + * ResourceManager: the Router calls each YARN RM in parallel by using one + * thread for each YARN RM. In case a YARN RM fails, a single call will * timeout. However the Router will use the NodesInfo it got, and provides a * partial list to the client. *

        @@ -850,7 +850,7 @@ public NodesInfo call() { } // Delete duplicate from all the node reports got from all the available - // Yarn RMs. Nodes can be moved from one subclusters to another. In this + // YARN RMs. Nodes can be moved from one subclusters to another. In this // operation they result LOST/RUNNING in the previous SubCluster and // NEW/RUNNING in the new one. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java index 6afecae7c5..9f54582650 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java @@ -57,7 +57,7 @@ public class MockDefaultRequestInterceptorREST LoggerFactory.getLogger(MockDefaultRequestInterceptorREST.class); final private AtomicInteger applicationCounter = new AtomicInteger(0); // True if the Mock RM is running, false otherwise. - // This property allows us to write tests for specific scenario as Yarn RM + // This property allows us to write tests for specific scenario as YARN RM // down e.g. network issue, failover. private boolean isRunning = true; private HashSet applicationMap = new HashSet<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index 3b1247a8c8..27aa916bd3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -105,11 +105,11 @@ /** *

        - * Embedded Yarn minicluster for testcases that need to interact with a cluster. + * Embedded YARN minicluster for testcases that need to interact with a cluster. *

        *

        * In a real cluster, resource request matching is done using the hostname, and - * by default Yarn minicluster works in the exact same way as a real cluster. + * by default YARN minicluster works in the exact same way as a real cluster. *

        *

        * If a testcase needs to use multiple nodes and exercise resource request diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java index b0d85274e6..c115b18086 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java @@ -263,7 +263,7 @@ public static String convertApplicationIdToString(ApplicationId appId) { } /** - * @param conf Yarn configuration. Used to see if there is an explicit config + * @param conf YARN configuration. Used to see if there is an explicit config * pointing to the HBase config file to read. It should not be null * or a NullPointerException will be thrown. * @return a configuration with the HBase configuration from the classpath, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index e1588c115a..f21ff2c37d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -613,7 +613,7 @@ private boolean handleRedirect(String id, HttpServletRequest req, *

        * Do not remove *

        - * Yarn isn't currently serializing this class, but findbugs + * YARN isn't currently serializing this class, but findbugs * complains in its absence. * * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md index f67f3517fc..e4135ec275 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md @@ -15,9 +15,9 @@ limitations under the License. --> -# Yarn UI +# YARN UI -The Yarn UI is an Ember based web-app that provides visualization of the applications running on the Apache Hadoop YARN framework. +The YARN UI is an Ember based web-app that provides visualization of the applications running on the Apache Hadoop YARN framework. ## Configurations @@ -49,7 +49,7 @@ You will need the following things properly installed on your computer. **Warning: Do not edit the _package.json_ or _bower.json_ files manually. This could make them out-of-sync with the respective lock or shrinkwrap files.** -Yarn UI has replaced NPM with Yarn package manager. And hence Yarn would be used to manage dependencies defined in package.json. +YARN UI has replaced NPM with Yarn package manager. And hence Yarn would be used to manage dependencies defined in package.json. * Please use the Yarn and Bower command-line tools to add new dependencies. And the tool version must be same as those defined in Prerequisites section. * Once any dependency is added: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/index.html b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/index.html index f727454b25..9fcf0eed40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/index.html +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/index.html @@ -21,7 +21,7 @@ - YarnUi + YARN diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js index 8ab7ce13a3..acde40c8ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js @@ -16,7 +16,7 @@ * limitations under the License. */ -module.exports = { // Yarn UI App configurations +module.exports = { // YARN UI App configurations hosts: { localBaseAddress: "", timelineWebAddress: "localhost:8188",