From 03fc6b1bb0f5c0844cd5477ffba43de8a14d4d60 Mon Sep 17 00:00:00 2001
From: Wangda Tan
Date: Fri, 10 Jun 2016 09:51:09 -0700
Subject: [PATCH] YARN-3426. Add jdiff support to YARN. (vinodkv via wangda)
---
.../tools/RootDocProcessor.java | 4 +
.../jdiff/Apache_Hadoop_YARN_API_2.6.0.xml | 13076 +++++++++++++++
.../jdiff/Apache_Hadoop_YARN_API_2.7.2.xml | 13692 ++++++++++++++++
.../jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml | 2427 +++
.../jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml | 2581 +++
.../jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml | 2870 ++++
.../jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml | 3323 ++++
...Apache_Hadoop_YARN_Server_Common_2.6.0.xml | 2059 +++
...Apache_Hadoop_YARN_Server_Common_2.7.2.xml | 1801 ++
.../hadoop-yarn/dev-support/jdiff/Null.java | 20 +
.../hadoop-yarn/hadoop-yarn-api/pom.xml | 2 +
.../hadoop-yarn/hadoop-yarn-client/pom.xml | 2 +
.../hadoop-yarn/hadoop-yarn-common/pom.xml | 2 +
.../hadoop-yarn-server-common/pom.xml | 2 +
hadoop-yarn-project/hadoop-yarn/pom.xml | 129 +
15 files changed, 41990 insertions(+)
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
create mode 100644 hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index 8042f17b8d..60c2a6f6e9 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -127,6 +127,10 @@ public Object invoke(Object proxy, Method method, Object[] args)
return filter(((ClassDoc) target).constructors(true),
ConstructorDoc.class);
}
+ } else {
+ if (methodName.equals("methods")) {
+ return filter(((ClassDoc) target).methods(true), MethodDoc.class);
+ }
}
} else if (target instanceof PackageDoc) {
if (methodName.equals("allClasses")) {
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
new file mode 100644
index 0000000000..5d58600a0c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.6.0.xml
@@ -0,0 +1,13076 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to obtain a new {@link ApplicationId} for
+ submitting new applications.
+
+ The ResourceManager
responds with a new, monotonically
+ increasing, {@link ApplicationId} which is used by the client to submit
+ a new application.
+
+ The ResourceManager
also responds with details such
+ as maximum resource capabilities in the cluster as specified in
+ {@link GetNewApplicationResponse}.
+
+ @param request request to get a new ApplicationId
+ @return response containing the new ApplicationId
to be used
+ to submit an application
+ @throws YarnException
+ @throws IOException
+ @see #submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to submit a new application to the
+ ResourceManager.
+
+ The client is required to provide details such as queue,
+ {@link Resource} required to run the ApplicationMaster
,
+ the equivalent of {@link ContainerLaunchContext} for launching
+ the ApplicationMaster
etc. via the
+ {@link SubmitApplicationRequest}.
+
+ Currently the ResourceManager
sends an immediate (empty)
+ {@link SubmitApplicationResponse} on accepting the submission and throws
+ an exception if it rejects the submission. However, this call needs to be
+ followed by {@link #getApplicationReport(GetApplicationReportRequest)}
+ to make sure that the application gets properly submitted - obtaining a
+ {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
+ that RM 'remembers' this application beyond failover or restart. If RM
+ failover or RM restart happens before ResourceManager saves the
+ application's state successfully, the subsequent
+ {@link #getApplicationReport(GetApplicationReportRequest)} will throw
+ a {@link ApplicationNotFoundException}. The Clients need to re-submit
+ the application with the same {@link ApplicationSubmissionContext} when
+ it encounters the {@link ApplicationNotFoundException} on the
+ {@link #getApplicationReport(GetApplicationReportRequest)} call.
+
+ During the submission process, it checks whether the application
+ already exists. If the application exists, it will simply return
+ SubmitApplicationResponse
+
+ In secure mode,the ResourceManager
verifies access to
+ queues etc. before accepting the application submission.
+
+ @param request request to submit a new application
+ @return (empty) response on accepting the submission
+ @throws YarnException
+ @throws IOException
+ @throws InvalidResourceRequestException
+ The exception is thrown when a {@link ResourceRequest} is out of
+ the range of the configured lower and upper resource boundaries.
+ @see #getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to request the
+ ResourceManager
to abort submitted application.
+
+ The client, via {@link KillApplicationRequest} provides the
+ {@link ApplicationId} of the application to be aborted.
+
+ In secure mode,the ResourceManager
verifies access to the
+ application, queue etc. before terminating the application.
+
+ Currently, the ResourceManager
returns an empty response
+ on success and throws an exception on rejecting the request.
+
+ @param request request to abort a submitted application
+ @return ResourceManager
returns an empty response
+ on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Application from
+ the ResourceManager
.
+
+ The client, via {@link GetApplicationReportRequest} provides the
+ {@link ApplicationId} of the application.
+
+ In secure mode,the ResourceManager
verifies access to the
+ application, queue etc. before accepting the request.
+
+ The ResourceManager
responds with a
+ {@link GetApplicationReportResponse} which includes the
+ {@link ApplicationReport} for the application.
+
+ If the user does not have VIEW_APP
access then the
+ following fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+ @param request request for an application report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get metrics about the cluster from
+ the ResourceManager
.
+
+ The ResourceManager
responds with a
+ {@link GetClusterMetricsResponse} which includes the
+ {@link YarnClusterMetrics} with details such as number of current
+ nodes in the cluster.
+
+ @param request request for cluster metrics
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get a report of Applications
+ matching the filters defined by {@link GetApplicationsRequest}
+ in the cluster from the ResourceManager
.
+
+ The ResourceManager
responds with a
+ {@link GetApplicationsResponse} which includes the
+ {@link ApplicationReport} for the applications.
+
+ If the user does not have VIEW_APP
access for an
+ application then the corresponding report will be filtered as
+ described in {@link #getApplicationReport(GetApplicationReportRequest)}.
+
+
+ @param request request for report on applications
+ @return report on applications matching the given application types
+ defined in the request
+ @throws YarnException
+ @throws IOException
+ @see GetApplicationsRequest]]>
+
+
+
+
+
+
+
+ The interface used by clients to get a report of all nodes
+ in the cluster from the ResourceManager
.
+
+ The ResourceManager
responds with a
+ {@link GetClusterNodesResponse} which includes the
+ {@link NodeReport} for all the nodes in the cluster.
+
+ @param request request for report on all nodes
+ @return report on all nodes
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get information about queues
+ from the ResourceManager
.
+
+ The client, via {@link GetQueueInfoRequest}, can ask for details such
+ as used/total resources, child queues, running applications etc.
+
+ In secure mode,the ResourceManager
verifies access before
+ providing the information.
+
+ @param request request to get queue information
+ @return queue information
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get information about queue
+ acls for current user from the ResourceManager
.
+
+
+ The ResourceManager
responds with queue acls for all
+ existing queues.
+
+ @param request request to get queue acls for current user
+ @return queue acls for current user
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get delegation token, enabling the
+ containers to be able to talk to the service using those tokens.
+
+ The ResourceManager
responds with the delegation
+ {@link Token} that can be used by the client to speak to this
+ service.
+ @param request request to get a delegation token for the client.
+ @return delegation token that can be used to talk to this service
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Application Attempt
+ from the ResourceManager
+
+
+
+ The client, via {@link GetApplicationAttemptReportRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+
+
+
+ In secure mode,the ResourceManager
verifies access to
+ the method before accepting the request.
+
+
+
+ The ResourceManager
responds with a
+ {@link GetApplicationAttemptReportResponse} which includes the
+ {@link ApplicationAttemptReport} for the application attempt.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host
+ - RPC port
+ - client token
+ - diagnostics - set to "N/A"
+ - tracking URL
+
+
+
+ @param request
+ request for an application attempt report
+ @return application attempt report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of all Application attempts
+ in the cluster from the ResourceManager
+
+
+
+ The ResourceManager
responds with a
+ {@link GetApplicationAttemptsRequest} which includes the
+ {@link ApplicationAttemptReport} for all the applications attempts of a
+ specified application attempt.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+
+
+ @param request
+ request for reports on all application attempts of an application
+ @return reports on all application attempts of an application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Container from the
+ ResourceManager
+
+
+
+ The client, via {@link GetContainerReportRequest} provides the
+ {@link ContainerId} of the container.
+
+
+
+ In secure mode,the ResourceManager
verifies access to the
+ method before accepting the request.
+
+
+
+ The ResourceManager
responds with a
+ {@link GetContainerReportResponse} which includes the
+ {@link ContainerReport} for the container.
+
+
+ @param request
+ request for a container report
+ @return container report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of Containers for an
+ application attempt from the ResourceManager
+
+
+
+ The client, via {@link GetContainersRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+
+
+
+ In secure mode,the ResourceManager
verifies access to the
+ method before accepting the request.
+
+
+
+ The ResourceManager
responds with a
+ {@link GetContainersResponse} which includes a list of
+ {@link ContainerReport} for all the containers of a specific application
+ attempt.
+
+
+ @param request
+ request for a list of container reports of an application attempt.
+ @return reports on all containers of an application attempt
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+
+
+
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and concurrency needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+
+
+
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationSubmissionRequest is satisfiable the
+ {@code ResourceManager} answers with a
+ {@link ReservationSubmissionResponse} that include a non-null
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the reason.
+
+ On application submission the client can use this {@link ReservationId} to
+ obtain access to the reserved resources.
+
+
+
+ The system guarantees that during the time-range specified by the user, the
+ reservationID will be corresponding to a valid reservation. The amount of
+ capacity dedicated to such queue can vary overtime, depending of the
+ allocation that has been determined. But it is guaranteed to satisfy all
+ the constraint expressed by the user in the
+ {@link ReservationSubmissionRequest}.
+
+
+ @param request the request to submit a new Reservation
+ @return response the {@link ReservationId} on accepting the submission
+ @throws YarnException if the request is invalid or reservation cannot be
+ created successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+
+
+
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationUpdateRequest}. Upon success the previous allocation is
+ substituted by the new one, and on failure (i.e., if the system cannot find
+ a valid allocation for the updated request), the previous allocation
+ remains valid.
+
+ The {@link ReservationId} is not changed, and applications currently
+ running within this reservation will automatically receive the resources
+ based on the new allocation.
+
+
+ @param request to update an existing Reservation (the ReservationRequest
+ should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ updated successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to remove an existing Reservation.
+
+ Upon deletion of a reservation applications running with this reservation,
+ are automatically downgraded to normal jobs running without any dedicated
+ reservation.
+
+
+ @param request to remove an existing Reservation (the ReservationRequest
+ should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ deleted successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get node to labels mappings in existing cluster
+
+
+ @param request
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get node labels in the cluster
+
+
+ @param request to get node labels collection of this cluster
+ @return node labels collection of this cluster
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+ The protocol between clients and the ResourceManager
+ to submit/abort jobs and to get information on applications, cluster metrics,
+ nodes, queues and ACLs.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Note: Use $$() method for cross-platform practice i.e. submit an
+ application from a Windows client to a Linux/Unix server or vice versa.
+ ]]>
+
+
+
+
+
+
+
+
+ final
+ i.e. they cannot be modified by the applications.]]>
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Application from the
+ ResourceManager
.
+
+
+
+ The client, via {@link GetApplicationReportRequest} provides the
+ {@link ApplicationId} of the application.
+
+
+
+ In secure mode,the ApplicationHistoryServer
verifies access to
+ the application, queue etc. before accepting the request.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetApplicationReportResponse} which includes the
+ {@link ApplicationReport} for the application.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+
+ @param request
+ request for an application report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of all Applications in the
+ cluster from the ApplicationHistoryServer
.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetApplicationsResponse} which includes a list of
+ {@link ApplicationReport} for all the applications.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(GetApplicationReportRequest)}.
+
+
+ @param request
+ request for reports on all the applications
+ @return report on applications matching the given application types defined
+ in the request
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Application Attempt
+ from the ApplicationHistoryServer
.
+
+
+
+ The client, via {@link GetApplicationAttemptReportRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+
+
+
+ In secure mode,the ApplicationHistoryServer
verifies access to
+ the method before accepting the request.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetApplicationAttemptReportResponse} which includes the
+ {@link ApplicationAttemptReport} for the application attempt.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host
+ - RPC port
+ - client token
+ - diagnostics - set to "N/A"
+ - tracking URL
+
+
+
+ @param request
+ request for an application attempt report
+ @return application attempt report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of all Application attempts
+ in the cluster from the ApplicationHistoryServer
.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetApplicationAttemptsRequest} which includes the
+ {@link ApplicationAttemptReport} for all the applications attempts of a
+ specified application attempt.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationAttemptReport(GetApplicationAttemptReportRequest)}.
+
+
+ @param request
+ request for reports on all application attempts of an application
+ @return reports on all application attempts of an application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of an Container from the
+ ApplicationHistoryServer
.
+
+
+
+ The client, via {@link GetContainerReportRequest} provides the
+ {@link ContainerId} of the container.
+
+
+
+ In secure mode,the ApplicationHistoryServer
verifies access to
+ the method before accepting the request.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetContainerReportResponse} which includes the
+ {@link ContainerReport} for the container.
+
+
+ @param request
+ request for a container report
+ @return container report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get a report of Containers for an
+ application attempt from the ApplciationHistoryServer
.
+
+
+
+ The client, via {@link GetContainersRequest} provides the
+ {@link ApplicationAttemptId} of the application attempt.
+
+
+
+ In secure mode,the ApplicationHistoryServer
verifies access to
+ the method before accepting the request.
+
+
+
+ The ApplicationHistoryServer
responds with a
+ {@link GetContainersResponse} which includes a list of
+ {@link ContainerReport} for all the containers of a specific application
+ attempt.
+
+
+ @param request
+ request for a list of container reports of an application attempt.
+ @return reports on all containers of an application attempt
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to get delegation token, enabling the
+ containers to be able to talk to the service using those tokens.
+
+
+
+ The ApplicationHistoryServer
responds with the delegation
+ token {@link Token} that can be used by the client to speak to this
+ service.
+
+
+ @param request
+ request to get a delegation token for the client.
+ @return delegation token that can be used to talk to this service
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+ The protocol between clients and the ApplicationHistoryServer
to
+ get the information of completed applications etc.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by a new ApplicationMaster
to register with
+ the ResourceManager
.
+
+
+
+ The ApplicationMaster
needs to provide details such as RPC
+ Port, HTTP tracking url etc. as specified in
+ {@link RegisterApplicationMasterRequest}.
+
+
+
+ The ResourceManager
responds with critical details such as
+ maximum resource capabilities in the cluster as specified in
+ {@link RegisterApplicationMasterResponse}.
+
+
+ @param request
+ registration request
+ @return registration respose
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+ The exception is thrown when an ApplicationMaster tries to
+ register more then once.
+ @see RegisterApplicationMasterRequest
+ @see RegisterApplicationMasterResponse]]>
+
+
+
+
+
+
+
+ The interface used by an ApplicationMaster
to notify the
+ ResourceManager
about its completion (success or failed).
+
+ The ApplicationMaster
has to provide details such as
+ final state, diagnostics (in case of failures) etc. as specified in
+ {@link FinishApplicationMasterRequest}.
+
+ The ResourceManager
responds with
+ {@link FinishApplicationMasterResponse}.
+
+ @param request completion request
+ @return completion response
+ @throws YarnException
+ @throws IOException
+ @see FinishApplicationMasterRequest
+ @see FinishApplicationMasterResponse]]>
+
+
+
+
+
+
+
+
+ The main interface between an ApplicationMaster
and the
+ ResourceManager
.
+
+
+
+ The ApplicationMaster
uses this interface to provide a list of
+ {@link ResourceRequest} and returns unused {@link Container} allocated to
+ it via {@link AllocateRequest}. Optionally, the
+ ApplicationMaster
can also blacklist resources which
+ it doesn't want to use.
+
+
+
+ This also doubles up as a heartbeat to let the
+ ResourceManager
know that the ApplicationMaster
+ is alive. Thus, applications should periodically make this call to be kept
+ alive. The frequency depends on
+ {@link YarnConfiguration#RM_AM_EXPIRY_INTERVAL_MS} which defaults to
+ {@link YarnConfiguration#DEFAULT_RM_AM_EXPIRY_INTERVAL_MS}.
+
+
+
+ The ResourceManager
responds with list of allocated
+ {@link Container}, status of completed containers and headroom information
+ for the application.
+
+
+
+ The ApplicationMaster
can use the available headroom
+ (resources) to decide how to utilized allocated resources and make informed
+ decisions about future resource requests.
+
+
+ @param request
+ allocation request
+ @return allocation response
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+ This exception is thrown when an ApplicationMaster calls allocate
+ without registering first.
+ @throws InvalidResourceBlacklistRequestException
+ This exception is thrown when an application provides an invalid
+ specification for blacklist of resources.
+ @throws InvalidResourceRequestException
+ This exception is thrown when a {@link ResourceRequest} is out of
+ the range of the configured lower and upper limits on the
+ resources.
+ @see AllocateRequest
+ @see AllocateResponse]]>
+
+
+
+ The protocol between a live instance of ApplicationMaster
+ and the ResourceManager
.
+
+ This is used by the ApplicationMaster
to register/unregister
+ and to request and obtain resources in the cluster from the
+ ResourceManager
.
]]>
+
+
+
+
+
+
+
+
+
+
+
+ The ApplicationMaster
provides a list of
+ {@link StartContainerRequest}s to a NodeManager
to
+ start {@link Container}s allocated to it using this interface.
+
+
+
+ The ApplicationMaster
has to provide details such as allocated
+ resource capability, security tokens (if enabled), command to be executed
+ to start the container, environment for the process, necessary
+ binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
+ the {@link StartContainerRequest}.
+
+
+
+ The NodeManager
sends a response via
+ {@link StartContainersResponse} which includes a list of
+ {@link Container}s of successfully launched {@link Container}s, a
+ containerId-to-exception map for each failed {@link StartContainerRequest} in
+ which the exception indicates errors from per container and a
+ allServicesMetaData map between the names of auxiliary services and their
+ corresponding meta-data. Note: None-container-specific exceptions will
+ still be thrown by the API method itself.
+
+
+ The ApplicationMaster
can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the to-be-launched or launched containers.
+
+
+ @param request
+ request to start a list of containers
+ @return response including conatinerIds of all successfully launched
+ containers, a containerId-to-exception map for failed requests and
+ a allServicesMetaData map.
+ @throws YarnException
+ @throws IOException
+ @throws NMNotYetReadyException
+ This exception is thrown when NM starts from scratch but has not
+ yet connected with RM.]]>
+
+
+
+
+
+
+
+
+ The ApplicationMaster
requests a NodeManager
to
+ stop a list of {@link Container}s allocated to it using this
+ interface.
+
+
+
+ The ApplicationMaster
sends a {@link StopContainersRequest}
+ which includes the {@link ContainerId}s of the containers to be stopped.
+
+
+
+ The NodeManager
sends a response via
+ {@link StopContainersResponse} which includes a list of {@link ContainerId}
+ s of successfully stopped containers, a containerId-to-exception map for
+ each failed request in which the exception indicates errors from per
+ container. Note: None-container-specific exceptions will still be thrown by
+ the API method itself. ApplicationMaster
can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the containers.
+
+
+ @param request
+ request to stop a list of containers
+ @return response which includes a list of containerIds of successfully
+ stopped containers, a containerId-to-exception map for failed
+ requests.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The API used by the ApplicationMaster
to request for current
+ statuses of Container
s from the NodeManager
.
+
+
+
+ The ApplicationMaster
sends a
+ {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
+ of all containers whose statuses are needed.
+
+
+
+ The NodeManager
responds with
+ {@link GetContainerStatusesResponse} which includes a list of
+ {@link ContainerStatus} of the successfully queried containers and a
+ containerId-to-exception map for each failed request in which the exception
+ indicates errors from per container. Note: None-container-specific
+ exceptions will still be thrown by the API method itself.
+
+
+ @param request
+ request to get ContainerStatus
es of containers with
+ the specified ContainerId
s
+ @return response containing the list of ContainerStatus
of the
+ successfully queried containers and a containerId-to-exception map
+ for failed requests.
+
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+ The protocol between an ApplicationMaster
and a
+ NodeManager
to start/stop containers and to get status
+ of running containers.
+
+ If security is enabled the NodeManager
verifies that the
+ ApplicationMaster
has truly been allocated the container
+ by the ResourceManager
and also verifies all interactions such
+ as stopping the container or obtaining status information for the container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ response id used to track duplicate responses.
+ @return response id]]>
+
+
+
+
+
+ response id used to track duplicate responses.
+ @param id response id]]>
+
+
+
+
+ current progress of application.
+ @return current progress of application]]>
+
+
+
+
+
+ current progress of application
+ @param progress current progress of application]]>
+
+
+
+
+ ResourceRequest to update the
+ ResourceManager
about the application's resource requirements.
+ @return the list of ResourceRequest
+ @see ResourceRequest]]>
+
+
+
+
+
+ ResourceRequest to update the
+ ResourceManager
about the application's resource requirements.
+ @param resourceRequests list of ResourceRequest
to update the
+ ResourceManager
about the application's
+ resource requirements
+ @see ResourceRequest]]>
+
+
+
+
+ ContainerId of containers being
+ released by the ApplicationMaster
.
+ @return list of ContainerId
of containers being
+ released by the ApplicationMaster
]]>
+
+
+
+
+
+ ContainerId of containers being
+ released by the ApplicationMaster
+ @param releaseContainers list of ContainerId
of
+ containers being released by the
+ ApplicationMaster
]]>
+
+
+
+
+ ResourceBlacklistRequest being sent by the
+ ApplicationMaster
.
+ @return the ResourceBlacklistRequest
being sent by the
+ ApplicationMaster
+ @see ResourceBlacklistRequest]]>
+
+
+
+
+
+ ResourceBlacklistRequest to inform the
+ ResourceManager
about the blacklist additions and removals
+ per the ApplicationMaster
.
+
+ @param resourceBlacklistRequest the ResourceBlacklistRequest
+ to inform the ResourceManager
about
+ the blacklist additions and removals
+ per the ApplicationMaster
+ @see ResourceBlacklistRequest]]>
+
+
+
+
+ ContainerResourceIncreaseRequest being sent by the
+ ApplicationMaster
]]>
+
+
+
+
+
+ ContainerResourceIncreaseRequest to inform the
+ ResourceManager
about some container's resources need to be
+ increased]]>
+
+
+
+ The core request sent by the ApplicationMaster
to the
+ ResourceManager
to obtain resources in the cluster.
+
+ The request includes:
+
+ - A response id to track duplicate responses.
+ - Progress information.
+ -
+ A list of {@link ResourceRequest} to inform the
+
ResourceManager
about the application's
+ resource requirements.
+
+ -
+ A list of unused {@link Container} which are being returned.
+
+
+
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager needs the
+ ApplicationMaster
to take some action then it will send an
+ AMCommand to the ApplicationMaster
. See AMCommand
+ for details on commands and actions for them.
+ @return AMCommand
if the ApplicationMaster
should
+ take action, null
otherwise
+ @see AMCommand]]>
+
+
+
+
+ last response id.
+ @return last response id]]>
+
+
+
+
+ newly allocated Container
by the
+ ResourceManager
.
+ @return list of newly allocated Container
]]>
+
+
+
+
+ available headroom for resources in the cluster for the
+ application.
+ @return limit of available headroom for resources in the cluster for the
+ application]]>
+
+
+
+
+ completed containers' statuses.
+ @return the list of completed containers' statuses]]>
+
+
+
+
+ updated NodeReport
s. Updates could
+ be changes in health, availability etc of the nodes.
+ @return The delta of updated nodes since the last response]]>
+
+
+
+
+
+
+
+
+
+ Get the description of containers owned by the AM, but requested back by
+ the cluster. Note that the RM may have an inconsistent view of the
+ resources owned by the AM. These messages are advisory, and the AM may
+ elect to ignore them.
+
+
The message is a snapshot of the resources the RM wants back from the AM.
+ While demand persists, the RM will repeat its request; applications should
+ not interpret each message as a request for additional
+ resources on top of previous messages. Resources requested consistently
+ over some duration may be forcibly killed by the RM.
+
+ @return A specification of the resources to reclaim from this AM.]]>
+
+
+
+
+ Get the list of NMTokens required for communicating with NM. New NMTokens
+ issued only if
+
1) AM is receiving first container on underlying NodeManager.
+ OR
+ 2) NMToken master key rolled over in ResourceManager and AM is getting new
+ container on the same underlying NodeManager.
+
AM will receive one NMToken per NM irrespective of the number of containers
+ issued on same NM. AM is expected to store these tokens until issued a
+ new token for the same NM.
]]>
+
+
+
+
+ ResourceManager]]>
+
+
+
+
+ NodeManager]]>
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
the
+ ApplicationMaster
during resource negotiation.
+
+ The response, includes:
+
+ - Response ID to track duplicate responses.
+ -
+ An AMCommand sent by ResourceManager to let the
ApplicationMaster
+ take some actions (resync, shutdown etc.).
+ - A list of newly allocated {@link Container}.
+ - A list of completed {@link Container}s' statuses.
+ -
+ The available headroom for resources in the cluster for the
+ application.
+
+ - A list of nodes whose status has been updated.
+ - The number of available nodes in a cluster.
+ - A description of resources requested back by the cluster
+ - AMRMToken, if AMRMToken has been rolled over
+
+
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ final state of the ApplicationMaster
.
+ @return final state of the ApplicationMaster
]]>
+
+
+
+
+
+ final state of the ApplicationMaster
+ @param finalState final state of the ApplicationMaster
]]>
+
+
+
+
+ diagnostic information on application failure.
+ @return diagnostic information on application failure]]>
+
+
+
+
+
+ diagnostic information on application failure.
+ @param diagnostics diagnostic information on application failure]]>
+
+
+
+
+ tracking URL for the ApplicationMaster
.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return tracking URLfor the ApplicationMaster
]]>
+
+
+
+
+
+ final tracking URLfor the ApplicationMaster
.
+ This is the web-URL to which ResourceManager or web-application proxy will
+ redirect client/users once the application is finished and the
+ ApplicationMaster
is gone.
+
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+
+
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+
+
+ @param url
+ tracking URLfor the ApplicationMaster
]]>
+
+
+
+ The finalization request sent by the ApplicationMaster
to
+ inform the ResourceManager
about its completion.
+
+ The final request includes details such:
+
+ - Final state of the
ApplicationMaster
+ -
+ Diagnostic information in case of failure of the
+
ApplicationMaster
+
+ - Tracking URL
+
+
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a
+ ApplicationMaster
on it's completion.
+
+
+
+ The response, includes:
+
+ - A flag which indicates that the application has successfully unregistered
+ with the RM and the application can safely stop.
+
+
+ Note: The flag indicates whether the application has successfully
+ unregistered and is safe to stop. The application may stop after the flag is
+ true. If the application stops before the flag is true then the RM may retry
+ the application .
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of an application attempt.
+
+ @return ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+
+ ApplicationAttemptId of an application attempt
+
+ @param applicationAttemptId
+ ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+ The request sent by a client to the ResourceManager
to get an
+ {@link ApplicationAttemptReport} for an application attempt.
+
+
+
+ The request should include the {@link ApplicationAttemptId} of the
+ application attempt.
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptReport for the application attempt.
+
+ @return ApplicationAttemptReport
for the application attempt]]>
+
+
+
+
+
+ ApplicationAttemptReport for the application attempt.
+
+ @param applicationAttemptReport
+ ApplicationAttemptReport
for the application attempt]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ an application attempt report.
+
+
+
+ The response includes an {@link ApplicationAttemptReport} which has the
+ details about the particular application attempt
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of an application
+
+ @return ApplicationId
of an application]]>
+
+
+
+
+
+ ApplicationId of an application
+
+ @param applicationId
+ ApplicationId
of an application]]>
+
+
+
+
+ The request from clients to get a list of application attempt reports of an
+ application from the ResourceManager
.
+
+
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationReport of an application.
+
+ @return a list of ApplicationReport
of an application]]>
+
+
+
+
+
+ ApplicationReport of an application.
+
+ @param applicationAttempts
+ a list of ApplicationReport
of an application]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a list of {@link ApplicationAttemptReport} for application attempts.
+
+
+
+ The ApplicationAttemptReport
for each application includes the
+ details of an application attempt.
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application.
+ @return ApplicationId
of the application]]>
+
+
+
+
+
+ ApplicationId of the application
+ @param applicationId ApplicationId
of the application]]>
+
+
+
+ The request sent by a client to the ResourceManager
to
+ get an {@link ApplicationReport} for an application.
+
+ The request should include the {@link ApplicationId} of the
+ application.
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)
+ @see ApplicationReport]]>
+
+
+
+
+
+
+
+
+
+ ApplicationReport for the application.
+ @return ApplicationReport
for the application]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting an application report.
+
+ The response includes an {@link ApplicationReport} which has details such
+ as user, queue, name, host on which the ApplicationMaster
is
+ running, RPC port, tracking URL, diagnostics, start time etc.
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+
+ Setting any of the parameters to null, would just disable that
+ filter
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @param users list of users to filter by
+ @param queues list of scheduler queues to filter by
+ @param applicationTypes types of applications
+ @param applicationTags application tags to filter by
+ @param applicationStates application states to filter by
+ @param startRange range of application start times to filter by
+ @param finishRange range of application finish times to filter by
+ @param limit number of applications to limit to
+ @return {@link GetApplicationsRequest} to be used with
+ {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application states in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving and application types and application types in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications
+ in the cluster from the ResourceManager
.
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+ ApplicationReport for applications.
+ @return ApplicationReport
for applications]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting an {@link ApplicationReport} for applications.
+
+ The ApplicationReport
for each application includes details
+ such as user, queue, name, host on which the ApplicationMaster
+ is running, RPC port, tracking URL, diagnostics, start time etc.
+
+ @see ApplicationReport
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to get cluster metrics from the
+ ResourceManager
.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+
+
+
+
+
+
+
+
+
+ YarnClusterMetrics for the cluster.
+ @return YarnClusterMetrics
for the cluster]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting cluster metrics.
+
+ @see YarnClusterMetrics
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of all nodes
+ in the cluster from the ResourceManager
.
+
+ The request will ask for all nodes in the given {@link NodeState}s.
+
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+ NodeReport for all nodes in the cluster.
+ @return NodeReport
for all nodes in the cluster]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting a {@link NodeReport} for all nodes.
+
+ The NodeReport
contains per-node information such as
+ available resources, number of containers, tracking url, rack name, health
+ status etc.
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerId of the Container.
+
+ @return ContainerId
of the Container]]>
+
+
+
+
+
+ ContainerId of the container
+
+ @param containerId
+ ContainerId
of the container]]>
+
+
+
+
+ The request sent by a client to the ResourceManager
to get an
+ {@link ContainerReport} for a container.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerReport for the container.
+
+ @return ContainerReport
for the container]]>
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a container report.
+
+
+
+ The response includes a {@link ContainerReport} which has details of a
+ container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of an application attempt.
+
+ @return ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+
+ ApplicationAttemptId of an application attempt
+
+ @param applicationAttemptId
+ ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+ The request from clients to get a list of container reports, which belong to
+ an application attempt from the ResourceManager
.
+
+
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerReport for all the containers of an
+ application attempt.
+
+ @return a list of ContainerReport
for all the containers of an
+ application attempt]]>
+
+
+
+
+
+ ContainerReport for all the containers of an
+ application attempt.
+
+ @param containers
+ a list of ContainerReport
for all the containers of
+ an application attempt]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a list of {@link ContainerReport} for containers.
+
+
+
+ The ContainerReport
for each container includes the container
+ details.
+
+
+ @see ContainerReport
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerIds of containers for which to obtain
+ the ContainerStatus
.
+
+ @return the list of ContainerId
s of containers for which to
+ obtain the ContainerStatus
.]]>
+
+
+
+
+
+ ContainerIds of containers for which to obtain
+ the ContainerStatus
+
+ @param containerIds
+ a list of ContainerId
s of containers for which to
+ obtain the ContainerStatus
]]>
+
+
+
+
+ The request sent by the ApplicationMaster
to the
+ NodeManager
to get {@link ContainerStatus} of requested
+ containers.
+
+
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerStatuses of the requested containers.
+
+ @return ContainerStatus
es of the requested containers.]]>
+
+
+
+
+
+
+
+
+
+ The response sent by the NodeManager
to the
+ ApplicationMaster
when asked to obtain the
+ ContainerStatus
of requested containers.
+
+
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to get a new {@link ApplicationId} for
+ submitting an application.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+ new ApplicationId
allocated by the
+ ResourceManager
.
+ @return new ApplicationId
allocated by the
+ ResourceManager
]]>
+
+
+
+
+ ResourceManager in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+
+
+
+ The response sent by the ResourceManager
to the client for
+ a request to get a new {@link ApplicationId} for submitting applications.
+
+ Clients can submit an application with the returned
+ {@link ApplicationId}.
+
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ queue name for which to get queue information.
+ @return queue name for which to get queue information]]>
+
+
+
+
+
+ queue name for which to get queue information
+ @param queueName queue name for which to get queue information]]>
+
+
+
+
+ active applications required?
+ @return true
if applications' information is to be included,
+ else false
]]>
+
+
+
+
+
+ active applications?
+ @param includeApplications fetch information about active
+ applications?]]>
+
+
+
+
+ child queues required?
+ @return true
if information about child queues is required,
+ else false
]]>
+
+
+
+
+
+ child queues?
+ @param includeChildQueues fetch information about child queues?]]>
+
+
+
+
+ child queue hierarchy required?
+ @return true
if information about entire hierarchy is
+ required, false
otherwise]]>
+
+
+
+
+
+ child queue hierarchy?
+ @param recursive fetch information on the entire child queue
+ hierarchy?]]>
+
+
+
+ The request sent by clients to get queue information
+ from the ResourceManager
.
+
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ QueueInfo for the specified queue.
+ @return QueueInfo
for the specified queue]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting information about queues in the system.
+
+ The response includes a {@link QueueInfo} which has details such as
+ queue name, used/total capacities, running applications, child queues etc
+ .
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to the ResourceManager
to
+ get queue acls for the current user.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ QueueUserACLInfo per queue for the user.
+ @return QueueUserACLInfo
per queue for the user]]>
+
+
+
+ The response sent by the ResourceManager
to clients
+ seeking queue acls for the user.
+
+ The response contains a list of {@link QueueUserACLInfo} which
+ provides information about {@link QueueACL} per queue.
+
+ @see QueueACL
+ @see QueueUserACLInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application to be aborted.
+ @return ApplicationId
of the application to be aborted]]>
+
+
+
+
+
+
+ The request sent by the client to the ResourceManager
+ to abort a submitted application.
+
+ The request includes the {@link ApplicationId} of the application to be
+ aborted.
+
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to the client aborting
+ a submitted application.
+
+
+ The response, includes:
+
+ - A flag which indicates that the process of killing the application is
+ completed or not.
+
+ Note: user is recommended to wait until this flag becomes true, otherwise if
+ the ResourceManager
crashes before the process of killing the
+ application is completed, the ResourceManager
may retry this
+ application on recovery.
+
+
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application to be moved.
+ @return ApplicationId
of the application to be moved]]>
+
+
+
+
+
+ ApplicationId of the application to be moved.
+ @param appId ApplicationId
of the application to be moved]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request sent by the client to the ResourceManager
+ to move a submitted application to a different queue.
+
+ The request includes the {@link ApplicationId} of the application to be
+ moved and the queue to place it in.
+
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to the client moving
+ a submitted application to a different queue.
+
+
+ A response without exception means that the move has completed successfully.
+
+
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ RegisterApplicationMasterRequest.
+ If port, trackingUrl is not used, use the following default value:
+
+ - port: -1
+ - trackingUrl: null
+
+ The port is allowed to be any integer larger than or equal to -1.
+ @return the new instance of RegisterApplicationMasterRequest
]]>
+
+
+
+
+ host on which the ApplicationMaster
is
+ running.
+ @return host on which the ApplicationMaster
is running]]>
+
+
+
+
+
+ host on which the ApplicationMaster
is
+ running.
+ @param host host on which the ApplicationMaster
+ is running]]>
+
+
+
+
+ RPC port on which the ApplicationMaster
+ is responding.
+ @return the RPC port on which the ApplicationMaster
is
+ responding]]>
+
+
+
+
+
+ RPC port on which the ApplicationMaster
is
+ responding.
+ @param port RPC port on which the ApplicationMaster
is
+ responding]]>
+
+
+
+
+ tracking URL for the ApplicationMaster
.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return tracking URL for the ApplicationMaster
]]>
+
+
+
+
+
+ tracking URLfor the ApplicationMaster
while
+ it is running. This is the web-URL to which ResourceManager or
+ web-application proxy will redirect client/users while the application and
+ the ApplicationMaster
are still running.
+
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+
+
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+
+
+ @param trackingUrl
+ tracking URLfor the ApplicationMaster
]]>
+
+
+
+ The request sent by the ApplicationMaster
to
+ ResourceManager
on registration.
+
+ The registration includes details such as:
+
+ - Hostname on which the AM is running.
+ - RPC Port
+ - Tracking URL
+
+
+
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+ ResourceManager in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+
+
+
+
+ ApplicationACLs for the application.
+ @return all the ApplicationACL
s]]>
+
+
+
+
+ Get ClientToAMToken master key.
+ The ClientToAMToken master key is sent to ApplicationMaster
+ by ResourceManager
via {@link RegisterApplicationMasterResponse}
+ , used to verify corresponding ClientToAMToken.
]]>
+
+
+
+
+
+
+
+
+
+
+ Get the queue that the application was placed in.]]>
+
+
+
+
+
+ Set the queue that the application was placed in.]]>
+
+
+
+
+
+ Get the list of running containers as viewed by
+ ResourceManager
from previous application attempts.
+
+
+ @return the list of running containers as viewed by
+ ResourceManager
from previous application attempts
+ @see RegisterApplicationMasterResponse#getNMTokensFromPreviousAttempts()]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a new
+ ApplicationMaster
on registration.
+
+ The response contains critical details such as:
+
+ - Maximum capability for allocated resources in the cluster.
+ ApplicationACL
s for the application.
+ - ClientToAMToken master key.
+
+
+
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerLaunchContext for the container to be started
+ by the NodeManager
.
+
+ @return ContainerLaunchContext
for the container to be started
+ by the NodeManager
]]>
+
+
+
+
+
+ ContainerLaunchContext for the container to be started
+ by the NodeManager
+ @param context ContainerLaunchContext
for the container to be
+ started by the NodeManager
]]>
+
+
+
+
+ Get the container token to be used for authorization during starting
+ container.
+ Note: {@link NMToken} will be used for authenticating communication with
+ NodeManager.
+ @return the container token to be used for authorization during starting
+ container.
+ @see NMToken
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+ The request sent by the ApplicationMaster
to the
+ NodeManager
to start a container.
+
+ The ApplicationMaster
has to provide details such as
+ allocated resource capability, security tokens (if enabled), command
+ to be executed to start the container, environment for the process,
+ necessary binaries/jar/shared-objects etc. via the
+ {@link ContainerLaunchContext}.
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request which contains a list of {@link StartContainerRequest} sent by
+ the ApplicationMaster
to the NodeManager
to
+ start containers.
+
+
+
+ In each {@link StartContainerRequest}, the ApplicationMaster
has
+ to provide details such as allocated resource capability, security tokens (if
+ enabled), command to be executed to start the container, environment for the
+ process, necessary binaries/jar/shared-objects etc. via the
+ {@link ContainerLaunchContext}.
+
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerId s of the containers that are
+ started successfully.
+
+ @return the list of ContainerId
s of the containers that are
+ started successfully.
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ Get the meta-data from all auxiliary services running on the
+ NodeManager
.
+
+
+ The meta-data is returned as a Map between the auxiliary service names and
+ their corresponding per service meta-data as an opaque blob
+ ByteBuffer
+
+
+
+ To be able to interpret the per-service meta-data, you should consult the
+ documentation for the Auxiliary-service configured on the NodeManager
+
+
+ @return a Map between the names of auxiliary services and their
+ corresponding meta-data]]>
+
+
+
+
+ The response sent by the NodeManager
to the
+ ApplicationMaster
when asked to start an allocated
+ container.
+
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerIds of the containers to be stopped.
+ @return ContainerId
s of containers to be stopped]]>
+
+
+
+
+
+ ContainerIds of the containers to be stopped.
+ @param containerIds ContainerId
s of the containers to be stopped]]>
+
+
+
+ The request sent by the ApplicationMaster
to the
+ NodeManager
to stop containers.
+
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response sent by the NodeManager
to the
+ ApplicationMaster
when asked to stop allocated
+ containers.
+
+
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationSubmissionContext for the application.
+ @return ApplicationSubmissionContext
for the application]]>
+
+
+
+
+
+ ApplicationSubmissionContext for the application.
+ @param context ApplicationSubmissionContext
for the
+ application]]>
+
+
+
+ The request sent by a client to submit an application to the
+ ResourceManager
.
+
+ The request, via {@link ApplicationSubmissionContext}, contains
+ details such as queue, {@link Resource} required to run the
+ ApplicationMaster
, the equivalent of
+ {@link ContainerLaunchContext} for launching the
+ ApplicationMaster
etc.
+
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a client on
+ application submission.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the ApplicationAttempId
.
+ @return ApplicationId
of the ApplicationAttempId
]]>
+
+
+
+
+ attempt id of the Application
.
+ @return attempt id
of the Application
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId
denotes the particular attempt
+ of an ApplicationMaster
for a given {@link ApplicationId}.
+
+ Multiple attempts might be needed to run an application to completion due
+ to temporal failures of the ApplicationMaster
such as hardware
+ failures, connectivity issues etc. on the node on which it was scheduled.
]]>
+
+
+
+
+
+
+
+
+
+ YarnApplicationAttemptState of the application attempt.
+
+ @return YarnApplicationAttemptState of the application attempt]]>
+
+
+
+
+ RPC port of this attempt ApplicationMaster
.
+
+ @return RPC port of this attempt ApplicationMaster
]]>
+
+
+
+
+ host on which this attempt of
+ ApplicationMaster
is running.
+
+ @return host on which this attempt of
+ ApplicationMaster
is running]]>
+
+
+
+
+ diagnositic information of the application attempt in case
+ of errors.
+
+ @return diagnositic information of the application attempt in case
+ of errors]]>
+
+
+
+
+ tracking url for the application attempt.
+
+ @return tracking url for the application attempt]]>
+
+
+
+
+ original tracking url for the application attempt.
+
+ @return original tracking url for the application attempt]]>
+
+
+
+
+ ApplicationAttemptId of this attempt of the
+ application
+
+ @return ApplicationAttemptId
of the attempt]]>
+
+
+
+
+ ContainerId of AMContainer for this attempt
+
+ @return ContainerId
of the attempt]]>
+
+
+
+
+ ApplicationAttemptReport
is a report of an application attempt.
+
+
+
+ It includes details such as:
+
+ - {@link ApplicationAttemptId} of the application.
+ - Host on which the
ApplicationMaster
of this attempt is
+ running.
+ - RPC port of the
ApplicationMaster
of this attempt.
+ - Tracking URL.
+ - Diagnostic information in case of errors.
+ - {@link YarnApplicationAttemptState} of the application attempt.
+ - {@link ContainerId} of the master Container.
+
+ ]]>
+
+
+
+
+
+
+
+
+
+
+ ApplicationId
+ which is unique for all applications started by a particular instance
+ of the ResourceManager
.
+ @return short integer identifier of the ApplicationId
]]>
+
+
+
+
+ start time of the ResourceManager
which is
+ used to generate globally unique ApplicationId
.
+ @return start time of the ResourceManager
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId
represents the globally unique
+ identifier for an application.
+
+ The globally unique nature of the identifier is achieved by using the
+ cluster timestamp i.e. start-time of the
+ ResourceManager
along with a monotonically increasing counter
+ for the application.
]]>
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application.
+ @return ApplicationId
of the application]]>
+
+
+
+
+ ApplicationAttemptId of the current
+ attempt of the application
+ @return ApplicationAttemptId
of the attempt]]>
+
+
+
+
+ user who submitted the application.
+ @return user who submitted the application]]>
+
+
+
+
+ queue to which the application was submitted.
+ @return queue to which the application was submitted]]>
+
+
+
+
+ name of the application.
+ @return name of the application]]>
+
+
+
+
+ host on which the ApplicationMaster
+ is running.
+ @return host on which the ApplicationMaster
+ is running]]>
+
+
+
+
+ RPC port of the ApplicationMaster
.
+ @return RPC port of the ApplicationMaster
]]>
+
+
+
+
+ client token for communicating with the
+ ApplicationMaster
.
+
+ ClientToAMToken is the security token used by the AMs to verify
+ authenticity of any client
.
+
+
+
+ The ResourceManager
, provides a secure token (via
+ {@link ApplicationReport#getClientToAMToken()}) which is verified by the
+ ApplicationMaster when the client directly talks to an AM.
+
+ @return client token for communicating with the
+ ApplicationMaster
]]>
+
+
+
+
+ YarnApplicationState of the application.
+ @return YarnApplicationState
of the application]]>
+
+
+
+
+ diagnositic information of the application in case of
+ errors.
+ @return diagnositic information of the application in case
+ of errors]]>
+
+
+
+
+ tracking url for the application.
+ @return tracking url for the application]]>
+
+
+
+
+ start time of the application.
+ @return start time of the application]]>
+
+
+
+
+ finish time of the application.
+ @return finish time of the application]]>
+
+
+
+
+ final finish status of the application.
+ @return final finish status of the application]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+
+ The AMRM token will be returned only if all the following conditions are
+ met:
+
+ the requester is the owner of the ApplicationMaster
+ the application master is an unmanaged ApplicationMaster
+ the application master is in ACCEPTED state
+
+ Else this method returns NULL.
+
+ @return the AM to RM token if available.]]>
+
+
+
+ ApplicationReport
is a report of an application.
+
+ It includes details such as:
+
+ - {@link ApplicationId} of the application.
+ - Applications user.
+ - Application queue.
+ - Application name.
+ - Host on which the
ApplicationMaster
is running.
+ - RPC port of the
ApplicationMaster
.
+ - Tracking URL.
+ - {@link YarnApplicationState} of the application.
+ - Diagnostic information in case of errors.
+ - Start time of the application.
+ - Client {@link Token} of the application (if security is enabled).
+
+
+
+ @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the used Resource
]]>
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the reserved Resource
]]>
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the needed Resource
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the submitted application.
+ @return ApplicationId
of the submitted application]]>
+
+
+
+
+
+ ApplicationId of the submitted application.
+ @param applicationId ApplicationId
of the submitted
+ application]]>
+
+
+
+
+ name.
+ @return application name]]>
+
+
+
+
+
+ name.
+ @param applicationName application name]]>
+
+
+
+
+ queue to which the application is being submitted.
+ @return queue to which the application is being submitted]]>
+
+
+
+
+
+ queue to which the application is being submitted
+ @param queue queue to which the application is being submitted]]>
+
+
+
+
+ Priority of the application.
+ @return Priority
of the application]]>
+
+
+
+
+ ContainerLaunchContext to describe the
+ Container
with which the ApplicationMaster
is
+ launched.
+ @return ContainerLaunchContext
for the
+ ApplicationMaster
container]]>
+
+
+
+
+
+ ContainerLaunchContext to describe the
+ Container
with which the ApplicationMaster
is
+ launched.
+ @param amContainer ContainerLaunchContext
for the
+ ApplicationMaster
container]]>
+
+
+
+
+ YarnApplicationState.
+ Such apps will not be retried by the RM on app attempt failure.
+ The default value is false.
+ @return true if the AM is not managed by the RM]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster for this
+ application. Please note this will be DEPRECATED, use getResource
+ in getAMContainerResourceRequest instead.
+
+ @return the resource required by the ApplicationMaster
for
+ this application.]]>
+
+
+
+
+
+ ApplicationMaster for this
+ application.
+
+ @param resource the resource required by the ApplicationMaster
+ for this application.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ If the flag is true, running containers will not be killed when application
+ attempt fails and these containers will be retrieved by the new application
+ attempt on registration via
+ {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}.
+
+
+ @param keepContainers
+ the flag which indicates whether to keep containers across
+ application attempts.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ getResource and getPriority of
+ ApplicationSubmissionContext.
+
+ Number of containers and Priority will be ignore.
+
+ @return ResourceRequest of AM container]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LogAggregationContext of the application
+
+ @return LogAggregationContext
of the application]]>
+
+
+
+
+
+ LogAggregationContext for the application
+
+ @param logAggregationContext
+ for the application]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationSubmissionContext
represents all of the
+ information needed by the ResourceManager
to launch
+ the ApplicationMaster
for an application.
+
+ It includes details such as:
+
+ - {@link ApplicationId} of the application.
+ - Application user.
+ - Application name.
+ - {@link Priority} of the application.
+ -
+ {@link ContainerLaunchContext} of the container in which the
+
ApplicationMaster
is executed.
+
+ - maxAppAttempts. The maximum number of application attempts.
+ It should be no larger than the global number of max attempts in the
+ Yarn configuration.
+ - attemptFailuresValidityInterval. The default value is -1.
+ when attemptFailuresValidityInterval in milliseconds is set to > 0,
+ the failure number will no take failures which happen out of the
+ validityInterval into failure count. If failure count reaches to
+ maxAppAttempts, the application will be failed.
+
+ - Optional, application-specific {@link LogAggregationContext}
+
+
+
+ @see ContainerLaunchContext
+ @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Resource allocated to the container.
+ @return Resource
allocated to the container]]>
+
+
+
+
+ Priority at which the Container
was
+ allocated.
+ @return Priority
at which the Container
was
+ allocated]]>
+
+
+
+
+ ContainerToken for the container.
+ ContainerToken
is the security token used by the framework
+ to verify authenticity of any Container
.
+
+ The ResourceManager
, on container allocation provides a
+ secure token which is verified by the NodeManager
on
+ container launch.
+
+ Applications do not need to care about ContainerToken
, they
+ are transparently handled by the framework - the allocated
+ Container
includes the ContainerToken
.
+
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+
+ @return ContainerToken
for the container]]>
+
+
+
+ Container
represents an allocated resource in the cluster.
+
+
+ The ResourceManager
is the sole authority to allocate any
+ Container
to applications. The allocated Container
+ is always on a single node and has a unique {@link ContainerId}. It has
+ a specific amount of {@link Resource} allocated.
+
+ It includes details such as:
+
+ - {@link ContainerId} for the container, which is globally unique.
+ -
+ {@link NodeId} of the node on which it is allocated.
+
+ - HTTP uri of the node.
+ - {@link Resource} allocated to the container.
+ - {@link Priority} at which the container was allocated.
+ -
+ Container {@link Token} of the container, used to securely verify
+ authenticity of the allocation.
+
+
+
+
+ Typically, an ApplicationMaster
receives the
+ Container
from the ResourceManager
during
+ resource-negotiation and then talks to the NodeManager
to
+ start/stop containers.
+
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+ @see ContainerManagementProtocol#stopContainers(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of the application to which the
+ Container
was assigned.
+
+ Note: If containers are kept alive across application attempts via
+ {@link ApplicationSubmissionContext#setKeepContainersAcrossApplicationAttempts(boolean)}
+ the ContainerId
does not necessarily contain the current
+ running application attempt's ApplicationAttemptId
This
+ container can be allocated by previously exited application attempt and
+ managed by the current running attempt thus have the previous application
+ attempt's ApplicationAttemptId
.
+
+
+ @return ApplicationAttemptId
of the application to which the
+ Container
was assigned]]>
+
+
+
+
+ ContainerId,
+ which doesn't include epoch. Note that this method will be marked as
+ deprecated, so please use getContainerId
instead.
+ @return lower 32 bits of identifier of the ContainerId
]]>
+
+
+
+
+ ContainerId. Upper 24 bits are
+ reserved as epoch of cluster, and lower 40 bits are reserved as
+ sequential number of containers.
+ @return identifier of the ContainerId
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerId
represents a globally unique identifier
+ for a {@link Container} in the cluster.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LocalResource required by the container.
+ @return all LocalResource
required by the container]]>
+
+
+
+
+
+ LocalResource required by the container. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param localResources LocalResource
required by the container]]>
+
+
+
+
+
+ Get application-specific binary service data. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}.
+
+
+
+ This will be used to initialize this application on the specific
+ {@link AuxiliaryService} running on the NodeManager by calling
+ {@link AuxiliaryService#initializeApplication(ApplicationInitializationContext)}
+
+
+ @return application-specific binary service data]]>
+
+
+
+
+
+
+ Set application-specific binary service data. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}. All pre-existing Map entries are
+ preserved.
+
+
+ @param serviceData
+ application-specific binary service data]]>
+
+
+
+
+ environment variables for the container.
+ @return environment variables for the container]]>
+
+
+
+
+
+ environment variables for the container. All pre-existing Map
+ entries are cleared before adding the new Map
+ @param environment environment variables for the container]]>
+
+
+
+
+ commands for launching the container.
+ @return the list of commands for launching the container]]>
+
+
+
+
+
+ commands for launching the container. All
+ pre-existing List entries are cleared before adding the new List
+ @param commands the list of commands for launching the container]]>
+
+
+
+
+ ApplicationACLs for the application.
+ @return all the ApplicationACL
s]]>
+
+
+
+
+
+ ApplicationACLs for the application. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param acls ApplicationACL
s for the application]]>
+
+
+
+ ContainerLaunchContext
represents all of the information
+ needed by the NodeManager
to launch a container.
+
+ It includes details such as:
+
+ - {@link ContainerId} of the container.
+ - {@link Resource} allocated to the container.
+ - User to whom the container is allocated.
+ - Security tokens (if security is enabled).
+ -
+ {@link LocalResource} necessary for running the container such
+ as binaries, jar, shared-objects, side-files etc.
+
+ - Optional, application-specific binary service data.
+ - Environment variables for the launched process.
+ - Command to launch the container.
+
+
+
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerId of the container.
+
+ @return ContainerId
of the container.]]>
+
+
+
+
+
+
+
+ Resource of the container.
+
+ @return allocated Resource
of the container.]]>
+
+
+
+
+
+
+
+ NodeId where container is running.
+
+ @return allocated NodeId
where container is running.]]>
+
+
+
+
+
+
+
+ Priority of the container.
+
+ @return allocated Priority
of the container.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerState of the container.
+
+ @return final ContainerState
of the container.]]>
+
+
+
+
+
+
+
+ exit status of the container.
+
+ @return final exit status
of the container.]]>
+
+
+
+
+
+
+
+ ContainerReport
is a report of an container.
+
+
+
+ It includes details such as:
+
+ - {@link ContainerId} of the container.
+ - Allocated Resources to the container.
+ - Assigned Node id.
+ - Assigned Priority.
+ - Creation Time.
+ - Finish Time.
+ - Container Exit Status.
+ - {@link ContainerState} of the container.
+ - Diagnostic information in case of errors.
+ - Log URL.
+
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ State of a Container
.]]>
+
+
+
+
+
+
+
+
+
+ ContainerId of the container.
+ @return ContainerId
of the container]]>
+
+
+
+
+ ContainerState of the container.
+ @return ContainerState
of the container]]>
+
+
+
+
+ Get the exit status for the container.
+
+ Note: This is valid only for completed containers i.e. containers
+ with state {@link ContainerState#COMPLETE}.
+ Otherwise, it returns an ContainerExitStatus.INVALID.
+
+
+ Containers killed by the framework, either due to being released by
+ the application or being 'lost' due to node failures etc. have a special
+ exit code of ContainerExitStatus.ABORTED.
+
+ When threshold number of the nodemanager-local-directories or
+ threshold number of the nodemanager-log-directories become bad, then
+ container is not launched and is exited with ContainersExitStatus.DISKS_FAILED.
+
+
+ @return exit status for the container]]>
+
+
+
+
+ diagnostic messages for failed containers.
+ @return diagnostic messages for failed containers]]>
+
+
+
+ ContainerStatus
represents the current status of a
+ Container
.
+
+ It provides details such as:
+
+ ContainerId
of the container.
+ ContainerState
of the container.
+ - Exit status of a completed container.
+ - Diagnostic message for a failed container.
+
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+ Application.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ location of the resource to be localized.
+ @return location of the resource to be localized]]>
+
+
+
+
+
+ location of the resource to be localized.
+ @param resource location of the resource to be localized]]>
+
+
+
+
+ size of the resource to be localized.
+ @return size of the resource to be localized]]>
+
+
+
+
+
+ size of the resource to be localized.
+ @param size size of the resource to be localized]]>
+
+
+
+
+ timestamp of the resource to be localized, used
+ for verification.
+ @return timestamp of the resource to be localized]]>
+
+
+
+
+
+ timestamp of the resource to be localized, used
+ for verification.
+ @param timestamp timestamp of the resource to be localized]]>
+
+
+
+
+ LocalResourceType of the resource to be localized.
+ @return LocalResourceType
of the resource to be localized]]>
+
+
+
+
+
+ LocalResourceType of the resource to be localized.
+ @param type LocalResourceType
of the resource to be localized]]>
+
+
+
+
+ LocalResourceVisibility of the resource to be
+ localized.
+ @return LocalResourceVisibility
of the resource to be
+ localized]]>
+
+
+
+
+
+ LocalResourceVisibility of the resource to be
+ localized.
+ @param visibility LocalResourceVisibility
of the resource to be
+ localized]]>
+
+
+
+
+ pattern that should be used to extract entries from the
+ archive (only used when type is PATTERN
).
+ @return pattern that should be used to extract entries from the
+ archive.]]>
+
+
+
+
+
+ pattern that should be used to extract entries from the
+ archive (only used when type is PATTERN
).
+ @param pattern pattern that should be used to extract entries
+ from the archive.]]>
+
+
+
+ LocalResource
represents a local resource required to
+ run a container.
+
+ The NodeManager
is responsible for localizing the resource
+ prior to launching the container.
+
+ Applications can specify {@link LocalResourceType} and
+ {@link LocalResourceVisibility}.
+
+ @see LocalResourceType
+ @see LocalResourceVisibility
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+ LocalResourceType
specifies the type
+ of a resource localized by the NodeManager
.
+
+ The type can be one of:
+
+ -
+ {@link #FILE} - Regular file i.e. uninterpreted bytes.
+
+ -
+ {@link #ARCHIVE} - Archive, which is automatically unarchived by the
+
NodeManager
.
+
+ -
+ {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
+
+
+
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+ LocalResourceVisibility
specifies the visibility
+ of a resource localized by the NodeManager
.
+
+ The visibility can be one of:
+
+ - {@link #PUBLIC} - Shared by all users on the node.
+ -
+ {@link #PRIVATE} - Shared among all applications of the
+ same user on the node.
+
+ -
+ {@link #APPLICATION} - Shared only among containers of the
+ same application on the node.
+
+
+
+
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LogAggregationContext
represents all of the
+ information needed by the NodeManager
to handle
+ the logs for an application.
+
+ It includes details such as:
+
+ - includePattern. It uses Java Regex to filter the log files
+ which match the defined include pattern and those log files
+ will be uploaded.
+ - excludePattern. It uses Java Regex to filter the log files
+ which match the defined exclude pattern and those log files
+ will not be uploaded. If the log file name matches both the
+ include and the exclude pattern, this file will be excluded eventually
+
+
+
+ @see ApplicationSubmissionContext]]>
+
+
+
+
+
+
+
+
+
+ NodeManager for which the NMToken
+ is used to authenticate.
+ @return the {@link NodeId} of the NodeManager
for which the
+ NMToken is used to authenticate.]]>
+
+
+
+
+
+
+
+ NodeManager
+ @return the {@link Token} used for authenticating with NodeManager
]]>
+
+
+
+
+
+
+
+
+
+
+
+ The NMToken is used for authenticating communication with
+ NodeManager
+ It is issued by ResourceMananger
when ApplicationMaster
+ negotiates resource with ResourceManager
and
+ validated on NodeManager
side.
+ @see AllocateResponse#getNMTokens()]]>
+
+
+
+
+
+
+
+
+
+
+ hostname of the node.
+ @return hostname of the node]]>
+
+
+
+
+ port for communicating with the node.
+ @return port for communicating with the node]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NodeId
is the unique identifier for a node.
+
+ It includes the hostname and port to uniquely
+ identify the node. Thus, it is unique across restarts of any
+ NodeManager
.
]]>
+
+
+
+
+
+
+
+
+
+ NodeId of the node.
+ @return NodeId
of the node]]>
+
+
+
+
+ NodeState of the node.
+ @return NodeState
of the node]]>
+
+
+
+
+ http address of the node.
+ @return http address of the node]]>
+
+
+
+
+ rack name for the node.
+ @return rack name for the node]]>
+
+
+
+
+ used Resource
on the node.
+ @return used Resource
on the node]]>
+
+
+
+
+ total Resource
on the node.
+ @return total Resource
on the node]]>
+
+
+
+
+ diagnostic health report of the node.
+ @return diagnostic health report of the node]]>
+
+
+
+
+ last timestamp at which the health report was received.
+ @return last timestamp at which the health report was received]]>
+
+
+
+
+
+
+
+
+ NodeReport
is a summary of runtime information of a
+ node in the cluster.
+
+ It includes details such as:
+
+ - {@link NodeId} of the node.
+ - HTTP Tracking URL of the node.
+ - Rack name for the node.
+ - Used {@link Resource} on the node.
+ - Total available {@link Resource} of the node.
+ - Number of running containers on the node.
+
+
+
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ State of a Node
.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager.
+ @see PreemptionContract
+ @see StrictPreemptionContract]]>
+
+
+
+
+
+
+
+
+
+ ApplicationMaster about resources requested back by the
+ ResourceManager
.
+ @see AllocateRequest#setAskList(List)]]>
+
+
+
+
+ ApplicationMaster that may be reclaimed by the
+ ResourceManager
. If the AM prefers a different set of
+ containers, then it may checkpoint or kill containers matching the
+ description in {@link #getResourceRequest}.
+ @return Set of containers at risk if the contract is not met.]]>
+
+
+
+ ResourceManager.
+ The ApplicationMaster
(AM) can satisfy this request according
+ to its own priorities to prevent containers from being forcibly killed by
+ the platform.
+ @see PreemptionMessage]]>
+
+
+
+
+
+
+
+
+
+ ResourceManager]]>
+
+
+
+
+
+
+
+
+ A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by
+ the RM to specify resources that the RM wants to reclaim from this
+ ApplicationMaster
(AM). The AM receives a {@link
+ StrictPreemptionContract} message encoding which containers the platform may
+ forcibly kill, granting it an opportunity to checkpoint state or adjust its
+ execution plan. The message may also include a {@link PreemptionContract}
+ granting the AM more latitude in selecting which resources to return to the
+ cluster.
+
+
The AM should decode both parts of the message. The {@link
+ StrictPreemptionContract} specifies particular allocations that the RM
+ requires back. The AM can checkpoint containers' state, adjust its execution
+ plan to move the computation, or take no action and hope that conditions that
+ caused the RM to ask for the container will change.
+
+
In contrast, the {@link PreemptionContract} also includes a description of
+ resources with a set of containers. If the AM releases containers matching
+ that profile, then the containers enumerated in {@link
+ PreemptionContract#getContainers()} may not be killed.
+
+
Each preemption message reflects the RM's current understanding of the
+ cluster state, so a request to return N containers may not
+ reflect containers the AM is releasing, recently exited containers the RM has
+ yet to learn about, or new containers allocated before the message was
+ generated. Conversely, an RM may request a different profile of containers in
+ subsequent requests.
+
+
The policy enforced by the RM is part of the scheduler. Generally, only
+ containers that have been requested consistently should be killed, but the
+ details are not specified.
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ QueueACL
enumerates the various ACLs for queues.
+
+
+
+ The ACL is one of:
+
+ - {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the
+ queue.
+ - {@link #ADMINISTER_QUEUE} - ACL to administer the queue.
+
+
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ name of the queue.
+ @return name of the queue]]>
+
+
+
+
+ configured capacity of the queue.
+ @return configured capacity of the queue]]>
+
+
+
+
+ maximum capacity of the queue.
+ @return maximum capacity of the queue]]>
+
+
+
+
+ current capacity of the queue.
+ @return current capacity of the queue]]>
+
+
+
+
+ child queues of the queue.
+ @return child queues of the queue]]>
+
+
+
+
+ running applications of the queue.
+ @return running applications of the queue]]>
+
+
+
+
+ QueueState of the queue.
+ @return QueueState
of the queue]]>
+
+
+
+
+ accessible node labels of the queue.
+ @return accessible node labels
of the queue]]>
+
+
+
+
+ default node label expression of the queue, this takes
+ affect only when the ApplicationSubmissionContext
and
+ ResourceRequest
don't specify their
+ NodeLabelExpression
.
+
+ @return default node label expression
of the queue]]>
+
+
+
+
+
+
+ QueueInfo is a report of the runtime information of the queue.
+
+ It includes information such as:
+
+ - Queue name.
+ - Capacity of the queue.
+ - Maximum capacity of the queue.
+ - Current capacity of the queue.
+ - Child queues.
+ - Running applications.
+ - {@link QueueState} of the queue.
+
+
+
+ @see QueueState
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+ State of a Queue.
+
+ A queue is in one of:
+
+ - {@link #RUNNING} - normal state.
+ - {@link #STOPPED} - not accepting new application submissions.
+
+
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ queue name of the queue.
+ @return queue name of the queue]]>
+
+
+
+
+ QueueACL for the given user.
+ @return list of QueueACL
for the given user]]>
+
+
+
+ QueueUserACLInfo
provides information {@link QueueACL} for
+ the given user.
+
+ @see QueueACL
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ start time of the {@code ResourceManager} which is used to
+ generate globally unique {@link ReservationId}.
+
+ @return start time of the {@code ResourceManager}]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {@link ReservationId} represents the globally unique identifier for
+ a reservation.
+
+
+
+ The globally unique nature of the identifier is achieved by using the
+ cluster timestamp i.e. start-time of the {@code ResourceManager}
+ along with a monotonically increasing counter for the reservation.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {@link ReservationRequest} represents the request made by an application to
+ the {@code ResourceManager} to reserve {@link Resource}s.
+
+
+
+ It includes:
+
+ - {@link Resource} required for each request.
+ -
+ Number of containers, of above specifications, which are required by the
+ application.
+ -
+ Concurrency that indicates the gang size of the request.
+
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ memory of the resource.
+ @return memory of the resource]]>
+
+
+
+
+
+ memory of the resource.
+ @param memory memory of the resource]]>
+
+
+
+
+ number of virtual cpu cores of the resource.
+
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical cores.
+ A container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ @return num of virtual cpu cores of the resource]]>
+
+
+
+
+
+ number of virtual cpu cores of the resource.
+
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical cores.
+ A container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ @param vCores number of virtual cpu cores of the resource]]>
+
+
+
+
+
+
+
+
+
+
+ Resource
models a set of computer resources in the
+ cluster.
+
+ Currently it models both memory and CPU.
+
+ The unit for memory is megabytes. CPU is modeled with virtual cores
+ (vcores), a unit for expressing parallelism. A node's capacity should
+ be configured with virtual cores equal to its number of physical cores. A
+ container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ Virtual cores take integer values and thus currently CPU-scheduling is
+ very coarse. A complementary axis for CPU requests that represents processing
+ power will likely be added in the future to enable finer-grained resource
+ configuration.
+
+ Typically, applications request Resource
of suitable
+ capability to run their component tasks.
+
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ blacklist of resources
+ for the application.
+
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ host/rack string represents an arbitrary
+ host name.
+
+ @param hostName host/rack on which the allocation is desired
+ @return whether the given host/rack string represents an arbitrary
+ host name]]>
+
+
+
+
+ Priority of the request.
+ @return Priority
of the request]]>
+
+
+
+
+
+ Priority of the request
+ @param priority Priority
of the request]]>
+
+
+
+
+ host/rack) on which the allocation
+ is desired.
+
+ A special value of * signifies that any resource
+ (host/rack) is acceptable.
+
+ @return resource (e.g. host/rack) on which the allocation
+ is desired]]>
+
+
+
+
+
+ host/rack) on which the allocation
+ is desired.
+
+ A special value of * signifies that any resource name
+ (e.g. host/rack) is acceptable.
+
+ @param resourceName (e.g. host/rack) on which the
+ allocation is desired]]>
+
+
+
+
+ Resource capability of the request.
+ @return Resource
capability of the request]]>
+
+
+
+
+
+ Resource capability of the request
+ @param capability Resource
capability of the request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceRequest. Defaults to true.
+
+ @return whether locality relaxation is enabled with this
+ ResourceRequest
.]]>
+
+
+
+
+
+ For a request at a network hierarchy level, set whether locality can be relaxed
+ to that level and beyond.
+
+
If the flag is off on a rack-level ResourceRequest
,
+ containers at that request's priority will not be assigned to nodes on that
+ request's rack unless requests specifically for those nodes have also been
+ submitted.
+
+
If the flag is off on an {@link ResourceRequest#ANY}-level
+ ResourceRequest
, containers at that request's priority will
+ only be assigned on racks for which specific requests have also been
+ submitted.
+
+
For example, to request a container strictly on a specific node, the
+ corresponding rack-level and any-level requests should have locality
+ relaxation set to false. Similarly, to request a container strictly on a
+ specific rack, the corresponding any-level request should have locality
+ relaxation set to false.
+
+ @param relaxLocality whether locality relaxation is enabled with this
+ ResourceRequest
.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceRequest
represents the request made by an
+ application to the ResourceManager
to obtain various
+ Container
allocations.
+
+ It includes:
+
+ - {@link Priority} of the request.
+ -
+ The name of the machine or rack on which the allocation is
+ desired. A special value of * signifies that
+ any host/rack is acceptable to the application.
+
+ - {@link Resource} required for each request.
+ -
+ Number of containers, of above specifications, which are required
+ by the application.
+
+ -
+ A boolean relaxLocality flag, defaulting to
true
,
+ which tells the ResourceManager
if the application wants
+ locality to be loose (i.e. allows fall-through to rack or any)
+ or strict (i.e. specify hard constraint on resource allocation).
+
+
+
+
+ @see Resource
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster that may be reclaimed by the
+ ResourceManager
.
+ @return the set of {@link ContainerId} to be preempted.]]>
+
+
+
+ ApplicationMaster (AM)
+ may attempt to checkpoint work or adjust its execution plan to accommodate
+ it. In contrast to {@link PreemptionContract}, the AM has no flexibility in
+ selecting which resources to return to the cluster.
+ @see PreemptionMessage]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Token
is the security entity used by the framework
+ to verify authenticity of any resource.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ URL
represents a serializable {@link java.net.URL}.]]>
+
+
+
+
+
+
+
+
+
+
+
+ RMAppAttempt.]]>
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster.]]>
+
+
+
+
+
+
+
+
+
+ NodeManagers in the cluster.
+ @return number of NodeManager
s in the cluster]]>
+
+
+
+ YarnClusterMetrics
represents cluster metrics.
+
+ Currently only number of NodeManager
s is provided.
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ This class contains the information about a timeline domain, which is used
+ to a user to host a number of timeline entities, isolating them from others'.
+ The user can also define the reader and writer users/groups for the the
+ domain, which is used to control the access to its entities.
+
+
+
+ The reader and writer users/groups pattern that the user can supply is the
+ same as what AccessControlList
takes.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The class that contains the the meta information of some conceptual entity
+ and its related events. The entity can be an application, an application
+ attempt, a container or whatever the user-defined object.
+
+
+
+ Primary filters will be used to index the entities in
+ TimelineStore
, such that users should carefully choose the
+ information they want to store as the primary filters. The remaining can be
+ stored as other information.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ name property as a
+ InetSocketAddress
. On a HA cluster,
+ this fetches the address corresponding to the RM identified by
+ {@link #RM_HA_ID}.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Default platform-specific CLASSPATH for YARN applications. A
+ comma-separated list of CLASSPATH entries constructed based on the client
+ OS environment expansion syntax.
+
+
+ Note: Use {@link DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH} for
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The information is passed along to applications via
+ {@link StartContainersResponse#getAllServicesMetaData()} that is returned by
+ {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
+
+
+ @return meta-data for this service that should be made available to
+ applications.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
new file mode 100644
index 0000000000..ff01b262d2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_2.7.2.xml
@@ -0,0 +1,13692 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to obtain a new {@link ApplicationId} for
+ submitting new applications.
+
+ The ResourceManager
responds with a new, monotonically
+ increasing, {@link ApplicationId} which is used by the client to submit
+ a new application.
+
+ The ResourceManager
also responds with details such
+ as maximum resource capabilities in the cluster as specified in
+ {@link GetNewApplicationResponse}.
+
+ @param request request to get a new ApplicationId
+ @return response containing the new ApplicationId
to be used
+ to submit an application
+ @throws YarnException
+ @throws IOException
+ @see #submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to submit a new application to the
+ ResourceManager.
+
+ The client is required to provide details such as queue,
+ {@link Resource} required to run the ApplicationMaster
,
+ the equivalent of {@link ContainerLaunchContext} for launching
+ the ApplicationMaster
etc. via the
+ {@link SubmitApplicationRequest}.
+
+ Currently the ResourceManager
sends an immediate (empty)
+ {@link SubmitApplicationResponse} on accepting the submission and throws
+ an exception if it rejects the submission. However, this call needs to be
+ followed by {@link #getApplicationReport(GetApplicationReportRequest)}
+ to make sure that the application gets properly submitted - obtaining a
+ {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
+ that RM 'remembers' this application beyond failover or restart. If RM
+ failover or RM restart happens before ResourceManager saves the
+ application's state successfully, the subsequent
+ {@link #getApplicationReport(GetApplicationReportRequest)} will throw
+ a {@link ApplicationNotFoundException}. The Clients need to re-submit
+ the application with the same {@link ApplicationSubmissionContext} when
+ it encounters the {@link ApplicationNotFoundException} on the
+ {@link #getApplicationReport(GetApplicationReportRequest)} call.
+
+ During the submission process, it checks whether the application
+ already exists. If the application exists, it will simply return
+ SubmitApplicationResponse
+
+ In secure mode,the ResourceManager
verifies access to
+ queues etc. before accepting the application submission.
+
+ @param request request to submit a new application
+ @return (empty) response on accepting the submission
+ @throws YarnException
+ @throws IOException
+ @see #getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to request the
+ ResourceManager
to abort submitted application.
+
+ The client, via {@link KillApplicationRequest} provides the
+ {@link ApplicationId} of the application to be aborted.
+
+ In secure mode,the ResourceManager
verifies access to the
+ application, queue etc. before terminating the application.
+
+ Currently, the ResourceManager
returns an empty response
+ on success and throws an exception on rejecting the request.
+
+ @param request request to abort a submitted application
+ @return ResourceManager
returns an empty response
+ on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+ The interface used by clients to get metrics about the cluster from
+ the ResourceManager
.
+
+ The ResourceManager
responds with a
+ {@link GetClusterMetricsResponse} which includes the
+ {@link YarnClusterMetrics} with details such as number of current
+ nodes in the cluster.
+
+ @param request request for cluster metrics
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get a report of all nodes
+ in the cluster from the ResourceManager
.
+
+ The ResourceManager
responds with a
+ {@link GetClusterNodesResponse} which includes the
+ {@link NodeReport} for all the nodes in the cluster.
+
+ @param request request for report on all nodes
+ @return report on all nodes
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get information about queues
+ from the ResourceManager
.
+
+ The client, via {@link GetQueueInfoRequest}, can ask for details such
+ as used/total resources, child queues, running applications etc.
+
+ In secure mode,the ResourceManager
verifies access before
+ providing the information.
+
+ @param request request to get queue information
+ @return queue information
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by clients to get information about queue
+ acls for current user from the ResourceManager
.
+
+
+ The ResourceManager
responds with queue acls for all
+ existing queues.
+
+ @param request request to get queue acls for current user
+ @return queue acls for current user
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+
+
+
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and concurrency needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+
+
+
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationSubmissionRequest is satisfiable the
+ {@code ResourceManager} answers with a
+ {@link ReservationSubmissionResponse} that include a non-null
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the reason.
+
+ On application submission the client can use this {@link ReservationId} to
+ obtain access to the reserved resources.
+
+
+
+ The system guarantees that during the time-range specified by the user, the
+ reservationID will be corresponding to a valid reservation. The amount of
+ capacity dedicated to such queue can vary overtime, depending of the
+ allocation that has been determined. But it is guaranteed to satisfy all
+ the constraint expressed by the user in the
+ {@link ReservationSubmissionRequest}.
+
+
+ @param request the request to submit a new Reservation
+ @return response the {@link ReservationId} on accepting the submission
+ @throws YarnException if the request is invalid or reservation cannot be
+ created successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+
+
+
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationUpdateRequest}. Upon success the previous allocation is
+ substituted by the new one, and on failure (i.e., if the system cannot find
+ a valid allocation for the updated request), the previous allocation
+ remains valid.
+
+ The {@link ReservationId} is not changed, and applications currently
+ running within this reservation will automatically receive the resources
+ based on the new allocation.
+
+
+ @param request to update an existing Reservation (the ReservationRequest
+ should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ updated successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to remove an existing Reservation.
+
+ Upon deletion of a reservation applications running with this reservation,
+ are automatically downgraded to normal jobs running without any dedicated
+ reservation.
+
+
+ @param request to remove an existing Reservation (the ReservationRequest
+ should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ deleted successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get node to labels mappings in existing cluster
+
+
+ @param request
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get labels to nodes mappings
+ in existing cluster
+
+
+ @param request
+ @return labels to nodes mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get node labels in the cluster
+
+
+ @param request to get node labels collection of this cluster
+ @return node labels collection of this cluster
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+ The protocol between clients and the ResourceManager
+ to submit/abort jobs and to get information on applications, cluster metrics,
+ nodes, queues and ACLs.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Note: Use $$() method for cross-platform practice i.e. submit an
+ application from a Windows client to a Linux/Unix server or vice versa.
+ ]]>
+
+
+
+
+
+
+
+
+ final
+ i.e. they cannot be modified by the applications.]]>
+
+
+
+
+
+
+
+
+ The protocol between clients and the ApplicationHistoryServer
to
+ get the information of completed applications etc.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by a new ApplicationMaster
to register with
+ the ResourceManager
.
+
+
+
+ The ApplicationMaster
needs to provide details such as RPC
+ Port, HTTP tracking url etc. as specified in
+ {@link RegisterApplicationMasterRequest}.
+
+
+
+ The ResourceManager
responds with critical details such as
+ maximum resource capabilities in the cluster as specified in
+ {@link RegisterApplicationMasterResponse}.
+
+
+ @param request
+ registration request
+ @return registration respose
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+ The exception is thrown when an ApplicationMaster tries to
+ register more then once.
+ @see RegisterApplicationMasterRequest
+ @see RegisterApplicationMasterResponse]]>
+
+
+
+
+
+
+
+ The interface used by an ApplicationMaster
to notify the
+ ResourceManager
about its completion (success or failed).
+
+ The ApplicationMaster
has to provide details such as
+ final state, diagnostics (in case of failures) etc. as specified in
+ {@link FinishApplicationMasterRequest}.
+
+ The ResourceManager
responds with
+ {@link FinishApplicationMasterResponse}.
+
+ @param request completion request
+ @return completion response
+ @throws YarnException
+ @throws IOException
+ @see FinishApplicationMasterRequest
+ @see FinishApplicationMasterResponse]]>
+
+
+
+
+
+
+
+
+ The main interface between an ApplicationMaster
and the
+ ResourceManager
.
+
+
+
+ The ApplicationMaster
uses this interface to provide a list of
+ {@link ResourceRequest} and returns unused {@link Container} allocated to
+ it via {@link AllocateRequest}. Optionally, the
+ ApplicationMaster
can also blacklist resources which
+ it doesn't want to use.
+
+
+
+ This also doubles up as a heartbeat to let the
+ ResourceManager
know that the ApplicationMaster
+ is alive. Thus, applications should periodically make this call to be kept
+ alive. The frequency depends on
+ {@link YarnConfiguration#RM_AM_EXPIRY_INTERVAL_MS} which defaults to
+ {@link YarnConfiguration#DEFAULT_RM_AM_EXPIRY_INTERVAL_MS}.
+
+
+
+ The ResourceManager
responds with list of allocated
+ {@link Container}, status of completed containers and headroom information
+ for the application.
+
+
+
+ The ApplicationMaster
can use the available headroom
+ (resources) to decide how to utilized allocated resources and make informed
+ decisions about future resource requests.
+
+
+ @param request
+ allocation request
+ @return allocation response
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+ This exception is thrown when an ApplicationMaster calls allocate
+ without registering first.
+ @throws InvalidResourceBlacklistRequestException
+ This exception is thrown when an application provides an invalid
+ specification for blacklist of resources.
+ @throws InvalidResourceRequestException
+ This exception is thrown when a {@link ResourceRequest} is out of
+ the range of the configured lower and upper limits on the
+ resources.
+ @see AllocateRequest
+ @see AllocateResponse]]>
+
+
+
+ The protocol between a live instance of ApplicationMaster
+ and the ResourceManager
.
+
+ This is used by the ApplicationMaster
to register/unregister
+ and to request and obtain resources in the cluster from the
+ ResourceManager
.
]]>
+
+
+
+
+
+
+
+
+
+
+
+ The interface used by clients to claim a resource with the
+ SharedCacheManager.
The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application will be
+ using the resource.
+
+
+
+ The SharedCacheManager
responds with whether or not the
+ resource exists in the cache. If the resource exists, a Path
+ to the resource in the shared cache is returned. If the resource does not
+ exist, the response is empty.
+
+
+ @param request request to claim a resource in the shared cache
+ @return response indicating if the resource is already in the cache
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to release a resource with the
+ SharedCacheManager.
This method is called once an application
+ is no longer using a claimed resource in the shared cache. The client uses
+ a checksum to identify the resource and an {@link ApplicationId} to
+ identify which application is releasing the resource.
+
+
+
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+
+
+
+ Currently the SharedCacheManager
sends an empty response.
+
+
+ @param request request to release a resource in the shared cache
+ @return (empty) response on releasing the resource
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+ The protocol between clients and the SharedCacheManager
to claim
+ and release resources in the shared cache.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The ApplicationMaster
provides a list of
+ {@link StartContainerRequest}s to a NodeManager
to
+ start {@link Container}s allocated to it using this interface.
+
+
+
+ The ApplicationMaster
has to provide details such as allocated
+ resource capability, security tokens (if enabled), command to be executed
+ to start the container, environment for the process, necessary
+ binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
+ the {@link StartContainerRequest}.
+
+
+
+ The NodeManager
sends a response via
+ {@link StartContainersResponse} which includes a list of
+ {@link Container}s of successfully launched {@link Container}s, a
+ containerId-to-exception map for each failed {@link StartContainerRequest} in
+ which the exception indicates errors from per container and a
+ allServicesMetaData map between the names of auxiliary services and their
+ corresponding meta-data. Note: None-container-specific exceptions will
+ still be thrown by the API method itself.
+
+
+ The ApplicationMaster
can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the to-be-launched or launched containers.
+
+
+ @param request
+ request to start a list of containers
+ @return response including conatinerIds of all successfully launched
+ containers, a containerId-to-exception map for failed requests and
+ a allServicesMetaData map.
+ @throws YarnException
+ @throws IOException
+ @throws NMNotYetReadyException
+ This exception is thrown when NM starts from scratch but has not
+ yet connected with RM.]]>
+
+
+
+
+
+
+
+
+ The ApplicationMaster
requests a NodeManager
to
+ stop a list of {@link Container}s allocated to it using this
+ interface.
+
+
+
+ The ApplicationMaster
sends a {@link StopContainersRequest}
+ which includes the {@link ContainerId}s of the containers to be stopped.
+
+
+
+ The NodeManager
sends a response via
+ {@link StopContainersResponse} which includes a list of {@link ContainerId}
+ s of successfully stopped containers, a containerId-to-exception map for
+ each failed request in which the exception indicates errors from per
+ container. Note: None-container-specific exceptions will still be thrown by
+ the API method itself. ApplicationMaster
can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the containers.
+
+
+ @param request
+ request to stop a list of containers
+ @return response which includes a list of containerIds of successfully
+ stopped containers, a containerId-to-exception map for failed
+ requests.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The API used by the ApplicationMaster
to request for current
+ statuses of Container
s from the NodeManager
.
+
+
+
+ The ApplicationMaster
sends a
+ {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
+ of all containers whose statuses are needed.
+
+
+
+ The NodeManager
responds with
+ {@link GetContainerStatusesResponse} which includes a list of
+ {@link ContainerStatus} of the successfully queried containers and a
+ containerId-to-exception map for each failed request in which the exception
+ indicates errors from per container. Note: None-container-specific
+ exceptions will still be thrown by the API method itself.
+
+
+ @param request
+ request to get ContainerStatus
es of containers with
+ the specified ContainerId
s
+ @return response containing the list of ContainerStatus
of the
+ successfully queried containers and a containerId-to-exception map
+ for failed requests.
+
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+ The protocol between an ApplicationMaster
and a
+ NodeManager
to start/stop containers and to get status
+ of running containers.
+
+ If security is enabled the NodeManager
verifies that the
+ ApplicationMaster
has truly been allocated the container
+ by the ResourceManager
and also verifies all interactions such
+ as stopping the container or obtaining status information for the container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ response id used to track duplicate responses.
+ @return response id]]>
+
+
+
+
+
+ response id used to track duplicate responses.
+ @param id response id]]>
+
+
+
+
+ current progress of application.
+ @return current progress of application]]>
+
+
+
+
+
+ current progress of application
+ @param progress current progress of application]]>
+
+
+
+
+ ResourceRequest to update the
+ ResourceManager
about the application's resource requirements.
+ @return the list of ResourceRequest
+ @see ResourceRequest]]>
+
+
+
+
+
+ ResourceRequest to update the
+ ResourceManager
about the application's resource requirements.
+ @param resourceRequests list of ResourceRequest
to update the
+ ResourceManager
about the application's
+ resource requirements
+ @see ResourceRequest]]>
+
+
+
+
+ ContainerId of containers being
+ released by the ApplicationMaster
.
+ @return list of ContainerId
of containers being
+ released by the ApplicationMaster
]]>
+
+
+
+
+
+ ContainerId of containers being
+ released by the ApplicationMaster
+ @param releaseContainers list of ContainerId
of
+ containers being released by the
+ ApplicationMaster
]]>
+
+
+
+
+ ResourceBlacklistRequest being sent by the
+ ApplicationMaster
.
+ @return the ResourceBlacklistRequest
being sent by the
+ ApplicationMaster
+ @see ResourceBlacklistRequest]]>
+
+
+
+
+
+ ResourceBlacklistRequest to inform the
+ ResourceManager
about the blacklist additions and removals
+ per the ApplicationMaster
.
+
+ @param resourceBlacklistRequest the ResourceBlacklistRequest
+ to inform the ResourceManager
about
+ the blacklist additions and removals
+ per the ApplicationMaster
+ @see ResourceBlacklistRequest]]>
+
+
+
+
+ ContainerResourceIncreaseRequest being sent by the
+ ApplicationMaster
]]>
+
+
+
+
+
+ ContainerResourceIncreaseRequest to inform the
+ ResourceManager
about some container's resources need to be
+ increased]]>
+
+
+
+ The core request sent by the ApplicationMaster
to the
+ ResourceManager
to obtain resources in the cluster.
+
+ The request includes:
+
+ - A response id to track duplicate responses.
+ - Progress information.
+ -
+ A list of {@link ResourceRequest} to inform the
+
ResourceManager
about the application's
+ resource requirements.
+
+ -
+ A list of unused {@link Container} which are being returned.
+
+
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager needs the
+ ApplicationMaster
to take some action then it will send an
+ AMCommand to the ApplicationMaster
. See AMCommand
+ for details on commands and actions for them.
+ @return AMCommand
if the ApplicationMaster
should
+ take action, null
otherwise
+ @see AMCommand]]>
+
+
+
+
+ last response id.
+ @return last response id]]>
+
+
+
+
+ newly allocated Container
by the
+ ResourceManager
.
+ @return list of newly allocated Container
]]>
+
+
+
+
+ available headroom for resources in the cluster for the
+ application.
+ @return limit of available headroom for resources in the cluster for the
+ application]]>
+
+
+
+
+ completed containers' statuses.
+ @return the list of completed containers' statuses]]>
+
+
+
+
+ updated NodeReport
s. Updates could
+ be changes in health, availability etc of the nodes.
+ @return The delta of updated nodes since the last response]]>
+
+
+
+
+
+
+
+
+
+
+ The message is a snapshot of the resources the RM wants back from the AM.
+ While demand persists, the RM will repeat its request; applications should
+ not interpret each message as a request for additional
+ resources on top of previous messages. Resources requested consistently
+ over some duration may be forcibly killed by the RM.
+
+ @return A specification of the resources to reclaim from this AM.]]>
+
+
+
+
+
+ 1) AM is receiving first container on underlying NodeManager.
+ OR
+ 2) NMToken master key rolled over in ResourceManager and AM is getting new
+ container on the same underlying NodeManager.
+
+ AM will receive one NMToken per NM irrespective of the number of containers
+ issued on same NM. AM is expected to store these tokens until issued a
+ new token for the same NM.]]>
+
+
+
+
+ ResourceManager]]>
+
+
+
+
+ NodeManager]]>
+
+
+
+
+
+
+
+
+ ResourceManager the
+ ApplicationMaster
during resource negotiation.
+
+ The response, includes:
+
+ - Response ID to track duplicate responses.
+ -
+ An AMCommand sent by ResourceManager to let the
+ {@code ApplicationMaster} take some actions (resync, shutdown etc.).
+
+ - A list of newly allocated {@link Container}.
+ - A list of completed {@link Container}s' statuses.
+ -
+ The available headroom for resources in the cluster for the
+ application.
+
+ - A list of nodes whose status has been updated.
+ - The number of available nodes in a cluster.
+ - A description of resources requested back by the cluster
+ - AMRMToken, if AMRMToken has been rolled over
+
+
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ final state of the ApplicationMaster
.
+ @return final state of the ApplicationMaster
]]>
+
+
+
+
+
+ final state of the ApplicationMaster
+ @param finalState final state of the ApplicationMaster
]]>
+
+
+
+
+ diagnostic information on application failure.
+ @return diagnostic information on application failure]]>
+
+
+
+
+
+ diagnostic information on application failure.
+ @param diagnostics diagnostic information on application failure]]>
+
+
+
+
+ tracking URL for the ApplicationMaster
.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return tracking URLfor the ApplicationMaster
]]>
+
+
+
+
+
+ final tracking URLfor the ApplicationMaster
.
+ This is the web-URL to which ResourceManager or web-application proxy will
+ redirect client/users once the application is finished and the
+ ApplicationMaster
is gone.
+
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+
+
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+
+
+ @param url
+ tracking URLfor the ApplicationMaster
]]>
+
+
+
+
+ The final request includes details such:
+
+ - Final state of the {@code ApplicationMaster}
+ -
+ Diagnostic information in case of failure of the
+ {@code ApplicationMaster}
+
+ - Tracking URL
+
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager to a
+ ApplicationMaster
on it's completion.
+
+ The response, includes:
+
+ - A flag which indicates that the application has successfully unregistered
+ with the RM and the application can safely stop.
+
+
+ Note: The flag indicates whether the application has successfully
+ unregistered and is safe to stop. The application may stop after the flag is
+ true. If the application stops before the flag is true then the RM may retry
+ the application.
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of an application attempt.
+
+ @return ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+
+ ApplicationAttemptId of an application attempt
+
+ @param applicationAttemptId
+ ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+ The request sent by a client to the ResourceManager
to get an
+ {@link ApplicationAttemptReport} for an application attempt.
+
+
+
+ The request should include the {@link ApplicationAttemptId} of the
+ application attempt.
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptReport for the application attempt.
+
+ @return ApplicationAttemptReport
for the application attempt]]>
+
+
+
+
+
+ ApplicationAttemptReport for the application attempt.
+
+ @param applicationAttemptReport
+ ApplicationAttemptReport
for the application attempt]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ an application attempt report.
+
+
+
+ The response includes an {@link ApplicationAttemptReport} which has the
+ details about the particular application attempt
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of an application
+
+ @return ApplicationId
of an application]]>
+
+
+
+
+
+ ApplicationId of an application
+
+ @param applicationId
+ ApplicationId
of an application]]>
+
+
+
+
+ The request from clients to get a list of application attempt reports of an
+ application from the ResourceManager
.
+
+
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationReport of an application.
+
+ @return a list of ApplicationReport
of an application]]>
+
+
+
+
+
+ ApplicationReport of an application.
+
+ @param applicationAttempts
+ a list of ApplicationReport
of an application]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a list of {@link ApplicationAttemptReport} for application attempts.
+
+
+
+ The ApplicationAttemptReport
for each application includes the
+ details of an application attempt.
+
+
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application.
+ @return ApplicationId
of the application]]>
+
+
+
+
+
+ ApplicationId of the application
+ @param applicationId ApplicationId
of the application]]>
+
+
+
+ The request sent by a client to the ResourceManager
to
+ get an {@link ApplicationReport} for an application.
+
+ The request should include the {@link ApplicationId} of the
+ application.
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)
+ @see ApplicationReport]]>
+
+
+
+
+
+
+
+
+
+ ApplicationReport for the application.
+ @return ApplicationReport
for the application]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting an application report.
+
+ The response includes an {@link ApplicationReport} which has details such
+ as user, queue, name, host on which the ApplicationMaster
is
+ running, RPC port, tracking URL, diagnostics, start time etc.
+
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+
+ Setting any of the parameters to null, would just disable that
+ filter
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @param users list of users to filter by
+ @param queues list of scheduler queues to filter by
+ @param applicationTypes types of applications
+ @param applicationTags application tags to filter by
+ @param applicationStates application states to filter by
+ @param startRange range of application start times to filter by
+ @param finishRange range of application finish times to filter by
+ @param limit number of applications to limit to
+ @return {@link GetApplicationsRequest} to be used with
+ {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving application states in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications matching the
+ giving and application types and application types in the cluster from the
+ ResourceManager
.
+
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of Applications
+ in the cluster from the ResourceManager
.
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+ ApplicationReport for applications.
+ @return ApplicationReport
for applications]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting an {@link ApplicationReport} for applications.
+
+ The ApplicationReport
for each application includes details
+ such as user, queue, name, host on which the ApplicationMaster
+ is running, RPC port, tracking URL, diagnostics, start time etc.
+
+ @see ApplicationReport
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to get cluster metrics from the
+ ResourceManager
.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+
+
+
+
+
+
+
+
+
+ YarnClusterMetrics for the cluster.
+ @return YarnClusterMetrics
for the cluster]]>
+
+
+
+ ResourceManager to a client
+ requesting cluster metrics.
+
+ @see YarnClusterMetrics
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request from clients to get a report of all nodes
+ in the cluster from the ResourceManager
.
+
+ The request will ask for all nodes in the given {@link NodeState}s.
+
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+ NodeReport for all nodes in the cluster.
+ @return NodeReport
for all nodes in the cluster]]>
+
+
+
+ The response sent by the ResourceManager
to a client
+ requesting a {@link NodeReport} for all nodes.
+
+ The NodeReport
contains per-node information such as
+ available resources, number of containers, tracking url, rack name, health
+ status etc.
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerId of the Container.
+
+ @return ContainerId
of the Container]]>
+
+
+
+
+
+ ContainerId of the container
+
+ @param containerId
+ ContainerId
of the container]]>
+
+
+
+
+ The request sent by a client to the ResourceManager
to get an
+ {@link ContainerReport} for a container.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerReport for the container.
+
+ @return ContainerReport
for the container]]>
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a container report.
+
+
+
+ The response includes a {@link ContainerReport} which has details of a
+ container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of an application attempt.
+
+ @return ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+
+ ApplicationAttemptId of an application attempt
+
+ @param applicationAttemptId
+ ApplicationAttemptId
of an application attempt]]>
+
+
+
+
+ The request from clients to get a list of container reports, which belong to
+ an application attempt from the ResourceManager
.
+
+
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerReport for all the containers of an
+ application attempt.
+
+ @return a list of ContainerReport
for all the containers of an
+ application attempt]]>
+
+
+
+
+
+ ContainerReport for all the containers of an
+ application attempt.
+
+ @param containers
+ a list of ContainerReport
for all the containers of
+ an application attempt]]>
+
+
+
+
+ The response sent by the ResourceManager
to a client requesting
+ a list of {@link ContainerReport} for containers.
+
+
+
+ The ContainerReport
for each container includes the container
+ details.
+
+
+ @see ContainerReport
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerIds of containers for which to obtain
+ the ContainerStatus
.
+
+ @return the list of ContainerId
s of containers for which to
+ obtain the ContainerStatus
.]]>
+
+
+
+
+
+ ContainerIds of containers for which to obtain
+ the ContainerStatus
+
+ @param containerIds
+ a list of ContainerId
s of containers for which to
+ obtain the ContainerStatus
]]>
+
+
+
+ ApplicationMaster to the
+ NodeManager
to get {@link ContainerStatus} of requested
+ containers.
+
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerStatuses of the requested containers.
+
+ @return ContainerStatus
es of the requested containers.]]>
+
+
+
+
+
+
+
+
+ NodeManager to the
+ ApplicationMaster
when asked to obtain the
+ ContainerStatus
of requested containers.
+
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to get a new {@link ApplicationId} for
+ submitting an application.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+ new ApplicationId
allocated by the
+ ResourceManager
.
+ @return new ApplicationId
allocated by the
+ ResourceManager
]]>
+
+
+
+
+ ResourceManager in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+
+
+
+ The response sent by the ResourceManager
to the client for
+ a request to get a new {@link ApplicationId} for submitting applications.
+
+ Clients can submit an application with the returned
+ {@link ApplicationId}.
+
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ queue name for which to get queue information.
+ @return queue name for which to get queue information]]>
+
+
+
+
+
+ queue name for which to get queue information
+ @param queueName queue name for which to get queue information]]>
+
+
+
+
+ active applications required?
+ @return true
if applications' information is to be included,
+ else false
]]>
+
+
+
+
+
+ active applications?
+ @param includeApplications fetch information about active
+ applications?]]>
+
+
+
+
+ child queues required?
+ @return true
if information about child queues is required,
+ else false
]]>
+
+
+
+
+
+ child queues?
+ @param includeChildQueues fetch information about child queues?]]>
+
+
+
+
+ child queue hierarchy required?
+ @return true
if information about entire hierarchy is
+ required, false
otherwise]]>
+
+
+
+
+
+ child queue hierarchy?
+ @param recursive fetch information on the entire child queue
+ hierarchy?]]>
+
+
+
+ The request sent by clients to get queue information
+ from the ResourceManager
.
+
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ QueueInfo for the specified queue.
+ @return QueueInfo
for the specified queue]]>
+
+
+
+
+ The response includes a {@link QueueInfo} which has details such as
+ queue name, used/total capacities, running applications, child queues etc.
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ The request sent by clients to the ResourceManager
to
+ get queue acls for the current user.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ QueueUserACLInfo per queue for the user.
+ @return QueueUserACLInfo
per queue for the user]]>
+
+
+
+ The response sent by the ResourceManager
to clients
+ seeking queue acls for the user.
+
+ The response contains a list of {@link QueueUserACLInfo} which
+ provides information about {@link QueueACL} per queue.
+
+ @see QueueACL
+ @see QueueUserACLInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application to be aborted.
+ @return ApplicationId
of the application to be aborted]]>
+
+
+
+
+
+
+ The request sent by the client to the ResourceManager
+ to abort a submitted application.
+
+ The request includes the {@link ApplicationId} of the application to be
+ aborted.
+
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager to the client aborting
+ a submitted application.
+
+ The response, includes:
+
+ -
+ A flag which indicates that the process of killing the application is
+ completed or not.
+
+
+ Note: user is recommended to wait until this flag becomes true, otherwise if
+ the ResourceManager
crashes before the process of killing the
+ application is completed, the ResourceManager
may retry this
+ application on recovery.
+
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application to be moved.
+ @return ApplicationId
of the application to be moved]]>
+
+
+
+
+
+ ApplicationId of the application to be moved.
+ @param appId ApplicationId
of the application to be moved]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request sent by the client to the ResourceManager
+ to move a submitted application to a different queue.
+
+ The request includes the {@link ApplicationId} of the application to be
+ moved and the queue to place it in.
+
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to the client moving
+ a submitted application to a different queue.
+
+
+ A response without exception means that the move has completed successfully.
+
+
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ RegisterApplicationMasterRequest.
+ If port, trackingUrl is not used, use the following default value:
+
+ - port: -1
+ - trackingUrl: null
+
+ The port is allowed to be any integer larger than or equal to -1.
+ @return the new instance of RegisterApplicationMasterRequest
]]>
+
+
+
+
+ host on which the ApplicationMaster
is
+ running.
+ @return host on which the ApplicationMaster
is running]]>
+
+
+
+
+
+ host on which the ApplicationMaster
is
+ running.
+ @param host host on which the ApplicationMaster
+ is running]]>
+
+
+
+
+ RPC port on which the {@code ApplicationMaster} is
+ responding.
+ @return the RPC port on which the {@code ApplicationMaster}
+ is responding]]>
+
+
+
+
+
+ RPC port on which the {@code ApplicationMaster} is
+ responding.
+ @param port RPC port on which the {@code ApplicationMaster}
+ is responding]]>
+
+
+
+
+ tracking URL for the ApplicationMaster
.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return tracking URL for the ApplicationMaster
]]>
+
+
+
+
+
+ tracking URLfor the ApplicationMaster
while
+ it is running. This is the web-URL to which ResourceManager or
+ web-application proxy will redirect client/users while the application and
+ the ApplicationMaster
are still running.
+
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+
+
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+
+
+ @param trackingUrl
+ tracking URLfor the ApplicationMaster
]]>
+
+
+
+
+ The registration includes details such as:
+
+ - Hostname on which the AM is running.
+ - RPC Port
+ - Tracking URL
+
+
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+ ResourceManager in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+
+
+
+
+ ApplicationACLs for the application.
+ @return all the ApplicationACL
s]]>
+
+
+
+
+ Get ClientToAMToken master key.
+ The ClientToAMToken master key is sent to ApplicationMaster
+ by ResourceManager
via {@link RegisterApplicationMasterResponse}
+ , used to verify corresponding ClientToAMToken.
]]>
+
+
+
+
+
+
+
+
+
+
+ Get the queue that the application was placed in.]]>
+
+
+
+
+
+ Set the queue that the application was placed in.]]>
+
+
+
+
+
+ Get the list of running containers as viewed by
+ ResourceManager
from previous application attempts.
+
+
+ @return the list of running containers as viewed by
+ ResourceManager
from previous application attempts
+ @see RegisterApplicationMasterResponse#getNMTokensFromPreviousAttempts()]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response contains critical details such as:
+
+ - Maximum capability for allocated resources in the cluster.
+ - {@code ApplicationACL}s for the application.
+ - ClientToAMToken master key.
+
+
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+
+
+
+
+
+
+
+
+
+ ApplicationId of the resource to be released.
+
+ @return ApplicationId
]]>
+
+
+
+
+
+ ApplicationId of the resource to be released.
+
+ @param id ApplicationId
]]>
+
+
+
+
+ key of the resource to be released.
+
+ @return key
]]>
+
+
+
+
+
+ key of the resource to be released.
+
+ @param key unique identifier for the resource]]>
+
+
+
+ The request from clients to release a resource in the shared cache.]]>
+
+
+
+
+
+
+
+
+
+ The response to clients from the SharedCacheManager
when
+ releasing a resource in the shared cache.
+
+
+
+ Currently, this is empty.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerLaunchContext for the container to be started
+ by the NodeManager
.
+
+ @return ContainerLaunchContext
for the container to be started
+ by the NodeManager
]]>
+
+
+
+
+
+ ContainerLaunchContext for the container to be started
+ by the NodeManager
+ @param context ContainerLaunchContext
for the container to be
+ started by the NodeManager
]]>
+
+
+
+
+
+ Note: {@link NMToken} will be used for authenticating communication with
+ {@code NodeManager}.
+ @return the container token to be used for authorization during starting
+ container.
+ @see NMToken
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+ The request sent by the ApplicationMaster
to the
+ NodeManager
to start a container.
+
+ The ApplicationMaster
has to provide details such as
+ allocated resource capability, security tokens (if enabled), command
+ to be executed to start the container, environment for the process,
+ necessary binaries/jar/shared-objects etc. via the
+ {@link ContainerLaunchContext}.
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The request which contains a list of {@link StartContainerRequest} sent by
+ the ApplicationMaster
to the NodeManager
to
+ start containers.
+
+
+
+ In each {@link StartContainerRequest}, the ApplicationMaster
has
+ to provide details such as allocated resource capability, security tokens (if
+ enabled), command to be executed to start the container, environment for the
+ process, necessary binaries/jar/shared-objects etc. via the
+ {@link ContainerLaunchContext}.
+
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerId s of the containers that are
+ started successfully.
+
+ @return the list of ContainerId
s of the containers that are
+ started successfully.
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+ Get the meta-data from all auxiliary services running on the
+ NodeManager
.
+
+
+ The meta-data is returned as a Map between the auxiliary service names and
+ their corresponding per service meta-data as an opaque blob
+ ByteBuffer
+
+
+
+ To be able to interpret the per-service meta-data, you should consult the
+ documentation for the Auxiliary-service configured on the NodeManager
+
+
+ @return a Map between the names of auxiliary services and their
+ corresponding meta-data]]>
+
+
+
+
+ The response sent by the NodeManager
to the
+ ApplicationMaster
when asked to start an allocated
+ container.
+
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerIds of the containers to be stopped.
+ @return ContainerId
s of containers to be stopped]]>
+
+
+
+
+
+ ContainerIds of the containers to be stopped.
+ @param containerIds ContainerId
s of the containers to be stopped]]>
+
+
+
+ The request sent by the ApplicationMaster
to the
+ NodeManager
to stop containers.
+
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The response sent by the NodeManager
to the
+ ApplicationMaster
when asked to stop allocated
+ containers.
+
+
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationSubmissionContext for the application.
+ @return ApplicationSubmissionContext
for the application]]>
+
+
+
+
+
+ ApplicationSubmissionContext for the application.
+ @param context ApplicationSubmissionContext
for the
+ application]]>
+
+
+
+ The request sent by a client to submit an application to the
+ ResourceManager
.
+
+ The request, via {@link ApplicationSubmissionContext}, contains
+ details such as queue, {@link Resource} required to run the
+ ApplicationMaster
, the equivalent of
+ {@link ContainerLaunchContext} for launching the
+ ApplicationMaster
etc.
+
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+ The response sent by the ResourceManager
to a client on
+ application submission.
+
+ Currently, this is empty.
+
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+ ApplicationId of the resource to be used.
+
+ @return ApplicationId
]]>
+
+
+
+
+
+ ApplicationId of the resource to be used.
+
+ @param id ApplicationId
]]>
+
+
+
+
+ key of the resource to be used.
+
+ @return key
]]>
+
+
+
+
+
+ key of the resource to be used.
+
+ @param key unique identifier for the resource]]>
+
+
+
+
+ The request from clients to the SharedCacheManager
that claims a
+ resource in the shared cache.
+ ]]>
+
+
+
+
+
+
+
+
+
+ Path corresponding to the requested resource in the
+ shared cache.
+
+ @return String A Path
if the resource exists in the shared
+ cache, null
otherwise]]>
+
+
+
+
+
+ Path corresponding to a resource in the shared cache.
+
+ @param p A Path
corresponding to a resource in the shared
+ cache]]>
+
+
+
+
+ The response from the SharedCacheManager to the client that indicates whether
+ a requested resource exists in the cache.
+ ]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the ApplicationAttempId
.
+ @return ApplicationId
of the ApplicationAttempId
]]>
+
+
+
+
+ attempt id of the Application
.
+ @return attempt id
of the Application
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId
denotes the particular attempt
+ of an ApplicationMaster
for a given {@link ApplicationId}.
+
+ Multiple attempts might be needed to run an application to completion due
+ to temporal failures of the ApplicationMaster
such as hardware
+ failures, connectivity issues etc. on the node on which it was scheduled.
]]>
+
+
+
+
+
+
+
+
+
+ YarnApplicationAttemptState of the application attempt.
+
+ @return YarnApplicationAttemptState of the application attempt]]>
+
+
+
+
+ RPC port of this attempt ApplicationMaster
.
+
+ @return RPC port of this attempt ApplicationMaster
]]>
+
+
+
+
+ host on which this attempt of
+ ApplicationMaster
is running.
+
+ @return host on which this attempt of
+ ApplicationMaster
is running]]>
+
+
+
+
+ diagnositic information of the application attempt in case
+ of errors.
+
+ @return diagnositic information of the application attempt in case
+ of errors]]>
+
+
+
+
+ tracking url for the application attempt.
+
+ @return tracking url for the application attempt]]>
+
+
+
+
+ original tracking url for the application attempt.
+
+ @return original tracking url for the application attempt]]>
+
+
+
+
+ ApplicationAttemptId of this attempt of the
+ application
+
+ @return ApplicationAttemptId
of the attempt]]>
+
+
+
+
+ ContainerId of AMContainer for this attempt
+
+ @return ContainerId
of the attempt]]>
+
+
+
+
+ It includes details such as:
+
+ - {@link ApplicationAttemptId} of the application.
+ - Host on which the
ApplicationMaster
of this attempt is
+ running.
+ - RPC port of the
ApplicationMaster
of this attempt.
+ - Tracking URL.
+ - Diagnostic information in case of errors.
+ - {@link YarnApplicationAttemptState} of the application attempt.
+ - {@link ContainerId} of the master Container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+ ApplicationId
+ which is unique for all applications started by a particular instance
+ of the ResourceManager
.
+ @return short integer identifier of the ApplicationId
]]>
+
+
+
+
+ start time of the ResourceManager
which is
+ used to generate globally unique ApplicationId
.
+ @return start time of the ResourceManager
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId
represents the globally unique
+ identifier for an application.
+
+ The globally unique nature of the identifier is achieved by using the
+ cluster timestamp i.e. start-time of the
+ ResourceManager
along with a monotonically increasing counter
+ for the application.
]]>
+
+
+
+
+
+
+
+
+
+ ApplicationId of the application.
+ @return ApplicationId
of the application]]>
+
+
+
+
+ ApplicationAttemptId of the current
+ attempt of the application
+ @return ApplicationAttemptId
of the attempt]]>
+
+
+
+
+ user who submitted the application.
+ @return user who submitted the application]]>
+
+
+
+
+ queue to which the application was submitted.
+ @return queue to which the application was submitted]]>
+
+
+
+
+ name of the application.
+ @return name of the application]]>
+
+
+
+
+ host on which the ApplicationMaster
+ is running.
+ @return host on which the ApplicationMaster
+ is running]]>
+
+
+
+
+ RPC port of the ApplicationMaster
.
+ @return RPC port of the ApplicationMaster
]]>
+
+
+
+
+ client token for communicating with the
+ ApplicationMaster
.
+
+ ClientToAMToken is the security token used by the AMs to verify
+ authenticity of any client
.
+
+
+
+ The ResourceManager
, provides a secure token (via
+ {@link ApplicationReport#getClientToAMToken()}) which is verified by the
+ ApplicationMaster when the client directly talks to an AM.
+
+ @return client token for communicating with the
+ ApplicationMaster
]]>
+
+
+
+
+ YarnApplicationState of the application.
+ @return YarnApplicationState
of the application]]>
+
+
+
+
+ diagnositic information of the application in case of
+ errors.
+ @return diagnositic information of the application in case
+ of errors]]>
+
+
+
+
+ tracking url for the application.
+ @return tracking url for the application]]>
+
+
+
+
+ start time of the application.
+ @return start time of the application]]>
+
+
+
+
+ finish time of the application.
+ @return finish time of the application]]>
+
+
+
+
+ final finish status of the application.
+ @return final finish status of the application]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+
+ The AMRM token will be returned only if all the following conditions are
+ met:
+
+ - the requester is the owner of the ApplicationMaster
+ - the application master is an unmanaged ApplicationMaster
+ - the application master is in ACCEPTED state
+
+ Else this method returns NULL.
+
+ @return the AM to RM token if available.]]>
+
+
+
+
+ It includes details such as:
+
+ - {@link ApplicationId} of the application.
+ - Applications user.
+ - Application queue.
+ - Application name.
+ - Host on which the
ApplicationMaster
is running.
+ - RPC port of the
ApplicationMaster
.
+ - Tracking URL.
+ - {@link YarnApplicationState} of the application.
+ - Diagnostic information in case of errors.
+ - Start time of the application.
+ - Client {@link Token} of the application (if security is enabled).
+
+
+ @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the used Resource
]]>
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the reserved Resource
]]>
+
+
+
+
+ Resource. -1 for invalid/inaccessible reports.
+ @return the needed Resource
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationId of the submitted application.
+ @return ApplicationId
of the submitted application]]>
+
+
+
+
+
+ ApplicationId of the submitted application.
+ @param applicationId ApplicationId
of the submitted
+ application]]>
+
+
+
+
+ name.
+ @return application name]]>
+
+
+
+
+
+ name.
+ @param applicationName application name]]>
+
+
+
+
+ queue to which the application is being submitted.
+ @return queue to which the application is being submitted]]>
+
+
+
+
+
+ queue to which the application is being submitted
+ @param queue queue to which the application is being submitted]]>
+
+
+
+
+ Priority of the application.
+ @return Priority
of the application]]>
+
+
+
+
+ ContainerLaunchContext to describe the
+ Container
with which the ApplicationMaster
is
+ launched.
+ @return ContainerLaunchContext
for the
+ ApplicationMaster
container]]>
+
+
+
+
+
+ ContainerLaunchContext to describe the
+ Container
with which the ApplicationMaster
is
+ launched.
+ @param amContainer ContainerLaunchContext
for the
+ ApplicationMaster
container]]>
+
+
+
+
+ YarnApplicationState.
+ Such apps will not be retried by the RM on app attempt failure.
+ The default value is false.
+ @return true if the AM is not managed by the RM]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster for this
+ application. Please note this will be DEPRECATED, use getResource
+ in getAMContainerResourceRequest instead.
+
+ @return the resource required by the ApplicationMaster
for
+ this application.]]>
+
+
+
+
+
+ ApplicationMaster for this
+ application.
+
+ @param resource the resource required by the ApplicationMaster
+ for this application.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ If the flag is true, running containers will not be killed when application
+ attempt fails and these containers will be retrieved by the new application
+ attempt on registration via
+ {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}.
+
+
+ @param keepContainers
+ the flag which indicates whether to keep containers across
+ application attempts.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ getResource and getPriority of
+ ApplicationSubmissionContext.
+
+ Number of containers and Priority will be ignore.
+
+ @return ResourceRequest of AM container]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LogAggregationContext of the application
+
+ @return LogAggregationContext
of the application]]>
+
+
+
+
+
+ LogAggregationContext for the application
+
+ @param logAggregationContext
+ for the application]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ It includes details such as:
+
+ - {@link ApplicationId} of the application.
+ - Application user.
+ - Application name.
+ - {@link Priority} of the application.
+ -
+ {@link ContainerLaunchContext} of the container in which the
+
ApplicationMaster
is executed.
+
+ -
+ maxAppAttempts. The maximum number of application attempts.
+ It should be no larger than the global number of max attempts in the
+ Yarn configuration.
+
+ -
+ attemptFailuresValidityInterval. The default value is -1.
+ when attemptFailuresValidityInterval in milliseconds is set to
+ {@literal >} 0, the failure number will no take failures which happen
+ out of the validityInterval into failure count. If failure count
+ reaches to maxAppAttempts, the application will be failed.
+
+ - Optional, application-specific {@link LogAggregationContext}
+
+
+ @see ContainerLaunchContext
+ @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Resource allocated to the container.
+ @return Resource
allocated to the container]]>
+
+
+
+
+ Priority at which the Container
was
+ allocated.
+ @return Priority
at which the Container
was
+ allocated]]>
+
+
+
+
+ ContainerToken for the container.
+ ContainerToken
is the security token used by the framework
+ to verify authenticity of any Container
.
+
+ The ResourceManager
, on container allocation provides a
+ secure token which is verified by the NodeManager
on
+ container launch.
+
+ Applications do not need to care about ContainerToken
, they
+ are transparently handled by the framework - the allocated
+ Container
includes the ContainerToken
.
+
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+
+ @return ContainerToken
for the container]]>
+
+
+
+
+ The {@code ResourceManager} is the sole authority to allocate any
+ {@code Container} to applications. The allocated {@code Container}
+ is always on a single node and has a unique {@link ContainerId}. It has
+ a specific amount of {@link Resource} allocated.
+
+ It includes details such as:
+
+ - {@link ContainerId} for the container, which is globally unique.
+ -
+ {@link NodeId} of the node on which it is allocated.
+
+ - HTTP uri of the node.
+ - {@link Resource} allocated to the container.
+ - {@link Priority} at which the container was allocated.
+ -
+ Container {@link Token} of the container, used to securely verify
+ authenticity of the allocation.
+
+
+
+ Typically, an {@code ApplicationMaster} receives the {@code Container}
+ from the {@code ResourceManager} during resource-negotiation and then
+ talks to the {@code NodeManager} to start/stop containers.
+
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+ @see ContainerManagementProtocol#stopContainers(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationAttemptId of the application to which the
+ Container
was assigned.
+
+ Note: If containers are kept alive across application attempts via
+ {@link ApplicationSubmissionContext#setKeepContainersAcrossApplicationAttempts(boolean)}
+ the ContainerId
does not necessarily contain the current
+ running application attempt's ApplicationAttemptId
This
+ container can be allocated by previously exited application attempt and
+ managed by the current running attempt thus have the previous application
+ attempt's ApplicationAttemptId
.
+
+
+ @return ApplicationAttemptId
of the application to which the
+ Container
was assigned]]>
+
+
+
+
+ ContainerId,
+ which doesn't include epoch. Note that this method will be marked as
+ deprecated, so please use getContainerId
instead.
+ @return lower 32 bits of identifier of the ContainerId
]]>
+
+
+
+
+ ContainerId. Upper 24 bits are
+ reserved as epoch of cluster, and lower 40 bits are reserved as
+ sequential number of containers.
+ @return identifier of the ContainerId
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerId
represents a globally unique identifier
+ for a {@link Container} in the cluster.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LocalResource required by the container.
+ @return all LocalResource
required by the container]]>
+
+
+
+
+
+ LocalResource required by the container. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param localResources LocalResource
required by the container]]>
+
+
+
+
+
+ Get application-specific binary service data. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}.
+
+
+
+ This will be used to initialize this application on the specific
+ {@link AuxiliaryService} running on the NodeManager by calling
+ {@link AuxiliaryService#initializeApplication(ApplicationInitializationContext)}
+
+
+ @return application-specific binary service data]]>
+
+
+
+
+
+
+ Set application-specific binary service data. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}. All pre-existing Map entries are
+ preserved.
+
+
+ @param serviceData
+ application-specific binary service data]]>
+
+
+
+
+ environment variables for the container.
+ @return environment variables for the container]]>
+
+
+
+
+
+ environment variables for the container. All pre-existing Map
+ entries are cleared before adding the new Map
+ @param environment environment variables for the container]]>
+
+
+
+
+ commands for launching the container.
+ @return the list of commands for launching the container]]>
+
+
+
+
+
+ commands for launching the container. All
+ pre-existing List entries are cleared before adding the new List
+ @param commands the list of commands for launching the container]]>
+
+
+
+
+ ApplicationACLs for the application.
+ @return all the ApplicationACL
s]]>
+
+
+
+
+
+ ApplicationACLs for the application. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param acls ApplicationACL
s for the application]]>
+
+
+
+
+ It includes details such as:
+
+ - {@link ContainerId} of the container.
+ - {@link Resource} allocated to the container.
+ - User to whom the container is allocated.
+ - Security tokens (if security is enabled).
+ -
+ {@link LocalResource} necessary for running the container such
+ as binaries, jar, shared-objects, side-files etc.
+
+ - Optional, application-specific binary service data.
+ - Environment variables for the launched process.
+ - Command to launch the container.
+
+
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+ ContainerId of the container.
+
+ @return ContainerId
of the container.]]>
+
+
+
+
+
+
+
+ Resource of the container.
+
+ @return allocated Resource
of the container.]]>
+
+
+
+
+
+
+
+ NodeId where container is running.
+
+ @return allocated NodeId
where container is running.]]>
+
+
+
+
+
+
+
+ Priority of the container.
+
+ @return allocated Priority
of the container.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerState of the container.
+
+ @return final ContainerState
of the container.]]>
+
+
+
+
+
+
+
+ exit status of the container.
+
+ @return final exit status
of the container.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ It includes details such as:
+
+ - {@link ContainerId} of the container.
+ - Allocated Resources to the container.
+ - Assigned Node id.
+ - Assigned Priority.
+ - Creation Time.
+ - Finish Time.
+ - Container Exit Status.
+ - {@link ContainerState} of the container.
+ - Diagnostic information in case of errors.
+ - Log URL.
+ - nodeHttpAddress
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ State of a Container
.]]>
+
+
+
+
+
+
+
+
+
+ ContainerId of the container.
+ @return ContainerId
of the container]]>
+
+
+
+
+ ContainerState of the container.
+ @return ContainerState
of the container]]>
+
+
+
+
+ Get the exit status for the container.
+
+ Note: This is valid only for completed containers i.e. containers
+ with state {@link ContainerState#COMPLETE}.
+ Otherwise, it returns an ContainerExitStatus.INVALID.
+
+
+ Containers killed by the framework, either due to being released by
+ the application or being 'lost' due to node failures etc. have a special
+ exit code of ContainerExitStatus.ABORTED.
+
+ When threshold number of the nodemanager-local-directories or
+ threshold number of the nodemanager-log-directories become bad, then
+ container is not launched and is exited with ContainersExitStatus.DISKS_FAILED.
+
+
+ @return exit status for the container]]>
+
+
+
+
+ diagnostic messages for failed containers.
+ @return diagnostic messages for failed containers]]>
+
+
+
+
+ It provides details such as:
+
+ - {@code ContainerId} of the container.
+ - {@code ContainerState} of the container.
+ - Exit status of a completed container.
+ - Diagnostic message for a failed container.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+ Application.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ location of the resource to be localized.
+ @return location of the resource to be localized]]>
+
+
+
+
+
+ location of the resource to be localized.
+ @param resource location of the resource to be localized]]>
+
+
+
+
+ size of the resource to be localized.
+ @return size of the resource to be localized]]>
+
+
+
+
+
+ size of the resource to be localized.
+ @param size size of the resource to be localized]]>
+
+
+
+
+ timestamp of the resource to be localized, used
+ for verification.
+ @return timestamp of the resource to be localized]]>
+
+
+
+
+
+ timestamp of the resource to be localized, used
+ for verification.
+ @param timestamp timestamp of the resource to be localized]]>
+
+
+
+
+ LocalResourceType of the resource to be localized.
+ @return LocalResourceType
of the resource to be localized]]>
+
+
+
+
+
+ LocalResourceType of the resource to be localized.
+ @param type LocalResourceType
of the resource to be localized]]>
+
+
+
+
+ LocalResourceVisibility of the resource to be
+ localized.
+ @return LocalResourceVisibility
of the resource to be
+ localized]]>
+
+
+
+
+
+ LocalResourceVisibility of the resource to be
+ localized.
+ @param visibility LocalResourceVisibility
of the resource to be
+ localized]]>
+
+
+
+
+ pattern that should be used to extract entries from the
+ archive (only used when type is PATTERN
).
+ @return pattern that should be used to extract entries from the
+ archive.]]>
+
+
+
+
+
+ pattern that should be used to extract entries from the
+ archive (only used when type is PATTERN
).
+ @param pattern pattern that should be used to extract entries
+ from the archive.]]>
+
+
+
+
+
+
+
+
+
+
+ shouldBeUploadedToSharedCache
+ of this request]]>
+
+
+
+ LocalResource
represents a local resource required to
+ run a container.
+
+ The NodeManager
is responsible for localizing the resource
+ prior to launching the container.
+
+ Applications can specify {@link LocalResourceType} and
+ {@link LocalResourceVisibility}.
+
+ @see LocalResourceType
+ @see LocalResourceVisibility
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+ type
+ of a resource localized by the {@code NodeManager}.
+
+ The type can be one of:
+
+ -
+ {@link #FILE} - Regular file i.e. uninterpreted bytes.
+
+ -
+ {@link #ARCHIVE} - Archive, which is automatically unarchived by the
+
NodeManager
.
+
+ -
+ {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
+
+
+
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+ visibility
+ of a resource localized by the {@code NodeManager}.
+
+ The visibility can be one of:
+
+ - {@link #PUBLIC} - Shared by all users on the node.
+ -
+ {@link #PRIVATE} - Shared among all applications of the
+ same user on the node.
+
+ -
+ {@link #APPLICATION} - Shared only among containers of the
+ same application on the node.
+
+
+
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ It includes details such as:
+
+ -
+ includePattern. It uses Java Regex to filter the log files
+ which match the defined include pattern and those log files
+ will be uploaded when the application finishes.
+
+ -
+ excludePattern. It uses Java Regex to filter the log files
+ which match the defined exclude pattern and those log files
+ will not be uploaded when application finishes. If the log file
+ name matches both the include and the exclude pattern, this file
+ will be excluded eventually.
+
+ -
+ rolledLogsIncludePattern. It uses Java Regex to filter the log files
+ which match the defined include pattern and those log files
+ will be aggregated in a rolling fashion.
+
+ -
+ rolledLogsExcludePattern. It uses Java Regex to filter the log files
+ which match the defined exclude pattern and those log files
+ will not be aggregated in a rolling fashion. If the log file
+ name matches both the include and the exclude pattern, this file
+ will be excluded eventually.
+
+
+
+ @see ApplicationSubmissionContext]]>
+
+
+
+
+
+
+
+
+
+ NodeManager for which the NMToken
+ is used to authenticate.
+ @return the {@link NodeId} of the NodeManager
for which the
+ NMToken is used to authenticate.]]>
+
+
+
+
+
+
+
+ NodeManager
+ @return the {@link Token} used for authenticating with NodeManager
]]>
+
+
+
+
+
+
+
+
+
+
+
+ The NMToken is used for authenticating communication with
+ NodeManager
+ It is issued by ResourceMananger
when ApplicationMaster
+ negotiates resource with ResourceManager
and
+ validated on NodeManager
side.
+ @see AllocateResponse#getNMTokens()]]>
+
+
+
+
+
+
+
+
+
+
+ hostname of the node.
+ @return hostname of the node]]>
+
+
+
+
+ port for communicating with the node.
+ @return port for communicating with the node]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NodeId
is the unique identifier for a node.
+
+ It includes the hostname and port to uniquely
+ identify the node. Thus, it is unique across restarts of any
+ NodeManager
.
]]>
+
+
+
+
+
+
+
+
+
+ NodeId of the node.
+ @return NodeId
of the node]]>
+
+
+
+
+ NodeState of the node.
+ @return NodeState
of the node]]>
+
+
+
+
+ http address of the node.
+ @return http address of the node]]>
+
+
+
+
+ rack name for the node.
+ @return rack name for the node]]>
+
+
+
+
+ used Resource
on the node.
+ @return used Resource
on the node]]>
+
+
+
+
+ total Resource
on the node.
+ @return total Resource
on the node]]>
+
+
+
+
+ diagnostic health report of the node.
+ @return diagnostic health report of the node]]>
+
+
+
+
+ last timestamp at which the health report was received.
+ @return last timestamp at which the health report was received]]>
+
+
+
+
+
+
+
+
+
+ It includes details such as:
+
+ - {@link NodeId} of the node.
+ - HTTP Tracking URL of the node.
+ - Rack name for the node.
+ - Used {@link Resource} on the node.
+ - Total available {@link Resource} of the node.
+ - Number of running containers on the node.
+
+
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ State of a Node
.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceManager.
+ @see PreemptionContract
+ @see StrictPreemptionContract]]>
+
+
+
+
+
+
+
+
+
+ ApplicationMaster about resources requested back by the
+ ResourceManager
.
+ @see AllocateRequest#setAskList(List)]]>
+
+
+
+
+ ApplicationMaster that may be reclaimed by the
+ ResourceManager
. If the AM prefers a different set of
+ containers, then it may checkpoint or kill containers matching the
+ description in {@link #getResourceRequest}.
+ @return Set of containers at risk if the contract is not met.]]>
+
+
+
+ ResourceManager.
+ The ApplicationMaster
(AM) can satisfy this request according
+ to its own priorities to prevent containers from being forcibly killed by
+ the platform.
+ @see PreemptionMessage]]>
+
+
+
+
+
+
+
+
+
+ ResourceManager]]>
+
+
+
+
+
+
+
+
+
+ The AM should decode both parts of the message. The {@link
+ StrictPreemptionContract} specifies particular allocations that the RM
+ requires back. The AM can checkpoint containers' state, adjust its execution
+ plan to move the computation, or take no action and hope that conditions that
+ caused the RM to ask for the container will change.
+
+ In contrast, the {@link PreemptionContract} also includes a description of
+ resources with a set of containers. If the AM releases containers matching
+ that profile, then the containers enumerated in {@link
+ PreemptionContract#getContainers()} may not be killed.
+
+ Each preemption message reflects the RM's current understanding of the
+ cluster state, so a request to return N containers may not
+ reflect containers the AM is releasing, recently exited containers the RM has
+ yet to learn about, or new containers allocated before the message was
+ generated. Conversely, an RM may request a different profile of containers in
+ subsequent requests.
+
+ The policy enforced by the RM is part of the scheduler. Generally, only
+ containers that have been requested consistently should be killed, but the
+ details are not specified.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The ACL is one of:
+
+ -
+ {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the queue.
+
+ - {@link #ADMINISTER_QUEUE} - ACL to administer the queue.
+
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ name of the queue.
+ @return name of the queue]]>
+
+
+
+
+ configured capacity of the queue.
+ @return configured capacity of the queue]]>
+
+
+
+
+ maximum capacity of the queue.
+ @return maximum capacity of the queue]]>
+
+
+
+
+ current capacity of the queue.
+ @return current capacity of the queue]]>
+
+
+
+
+ child queues of the queue.
+ @return child queues of the queue]]>
+
+
+
+
+ running applications of the queue.
+ @return running applications of the queue]]>
+
+
+
+
+ QueueState of the queue.
+ @return QueueState
of the queue]]>
+
+
+
+
+ accessible node labels of the queue.
+ @return accessible node labels
of the queue]]>
+
+
+
+
+ default node label expression of the queue, this takes
+ affect only when the ApplicationSubmissionContext
and
+ ResourceRequest
don't specify their
+ NodeLabelExpression
.
+
+ @return default node label expression
of the queue]]>
+
+
+
+
+
+
+
+ It includes information such as:
+
+ - Queue name.
+ - Capacity of the queue.
+ - Maximum capacity of the queue.
+ - Current capacity of the queue.
+ - Child queues.
+ - Running applications.
+ - {@link QueueState} of the queue.
+
+
+ @see QueueState
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ A queue is in one of:
+
+ - {@link #RUNNING} - normal state.
+ - {@link #STOPPED} - not accepting new application submissions.
+
+
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+ queue name of the queue.
+ @return queue name of the queue]]>
+
+
+
+
+ QueueACL for the given user.
+ @return list of QueueACL
for the given user]]>
+
+
+
+ QueueUserACLInfo
provides information {@link QueueACL} for
+ the given user.
+
+ @see QueueACL
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ start time of the {@code ResourceManager} which is used to
+ generate globally unique {@link ReservationId}.
+
+ @return start time of the {@code ResourceManager}]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {@link ReservationId} represents the globally unique identifier for
+ a reservation.
+
+
+
+ The globally unique nature of the identifier is achieved by using the
+ cluster timestamp i.e. start-time of the {@code ResourceManager}
+ along with a monotonically increasing counter for the reservation.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ It includes:
+
+ - {@link Resource} required for each request.
+ -
+ Number of containers, of above specifications, which are required by the
+ application.
+
+ - Concurrency that indicates the gang size of the request.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ memory of the resource.
+ @return memory of the resource]]>
+
+
+
+
+
+ memory of the resource.
+ @param memory memory of the resource]]>
+
+
+
+
+ number of virtual cpu cores of the resource.
+
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical cores.
+ A container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ @return num of virtual cpu cores of the resource]]>
+
+
+
+
+
+ number of virtual cpu cores of the resource.
+
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical cores.
+ A container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ @param vCores number of virtual cpu cores of the resource]]>
+
+
+
+
+
+
+
+
+
+
+ Resource
models a set of computer resources in the
+ cluster.
+
+ Currently it models both memory and CPU.
+
+ The unit for memory is megabytes. CPU is modeled with virtual cores
+ (vcores), a unit for expressing parallelism. A node's capacity should
+ be configured with virtual cores equal to its number of physical cores. A
+ container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.
+
+ Virtual cores take integer values and thus currently CPU-scheduling is
+ very coarse. A complementary axis for CPU requests that represents processing
+ power will likely be added in the future to enable finer-grained resource
+ configuration.
+
+ Typically, applications request Resource
of suitable
+ capability to run their component tasks.
+
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ blacklist of resources
+ for the application.
+
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ host/rack string represents an arbitrary
+ host name.
+
+ @param hostName host/rack on which the allocation is desired
+ @return whether the given host/rack string represents an arbitrary
+ host name]]>
+
+
+
+
+ Priority of the request.
+ @return Priority
of the request]]>
+
+
+
+
+
+ Priority of the request
+ @param priority Priority
of the request]]>
+
+
+
+
+ host/rack) on which the allocation
+ is desired.
+
+ A special value of * signifies that any resource
+ (host/rack) is acceptable.
+
+ @return resource (e.g. host/rack) on which the allocation
+ is desired]]>
+
+
+
+
+
+ host/rack) on which the allocation
+ is desired.
+
+ A special value of * signifies that any resource name
+ (e.g. host/rack) is acceptable.
+
+ @param resourceName (e.g. host/rack) on which the
+ allocation is desired]]>
+
+
+
+
+ Resource capability of the request.
+ @return Resource
capability of the request]]>
+
+
+
+
+
+ Resource capability of the request
+ @param capability Resource
capability of the request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ResourceRequest. Defaults to true.
+
+ @return whether locality relaxation is enabled with this
+ ResourceRequest
.]]>
+
+
+
+
+
+ For a request at a network hierarchy level, set whether locality can be relaxed
+ to that level and beyond.
+
+
If the flag is off on a rack-level ResourceRequest
,
+ containers at that request's priority will not be assigned to nodes on that
+ request's rack unless requests specifically for those nodes have also been
+ submitted.
+
+
If the flag is off on an {@link ResourceRequest#ANY}-level
+ ResourceRequest
, containers at that request's priority will
+ only be assigned on racks for which specific requests have also been
+ submitted.
+
+
For example, to request a container strictly on a specific node, the
+ corresponding rack-level and any-level requests should have locality
+ relaxation set to false. Similarly, to request a container strictly on a
+ specific rack, the corresponding any-level request should have locality
+ relaxation set to false.
+
+ @param relaxLocality whether locality relaxation is enabled with this
+ ResourceRequest
.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ It includes:
+
+ - {@link Priority} of the request.
+ -
+ The name of the machine or rack on which the allocation is
+ desired. A special value of * signifies that
+ any host/rack is acceptable to the application.
+
+ - {@link Resource} required for each request.
+ -
+ Number of containers, of above specifications, which are required
+ by the application.
+
+ -
+ A boolean relaxLocality flag, defaulting to {@code true},
+ which tells the {@code ResourceManager} if the application wants
+ locality to be loose (i.e. allows fall-through to rack or any)
+ or strict (i.e. specify hard constraint on resource allocation).
+
+
+
+ @see Resource
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster that may be reclaimed by the
+ ResourceManager
.
+ @return the set of {@link ContainerId} to be preempted.]]>
+
+
+
+ ApplicationMaster (AM)
+ may attempt to checkpoint work or adjust its execution plan to accommodate
+ it. In contrast to {@link PreemptionContract}, the AM has no flexibility in
+ selecting which resources to return to the cluster.
+ @see PreemptionMessage]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Token
is the security entity used by the framework
+ to verify authenticity of any resource.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ URL
represents a serializable {@link java.net.URL}.]]>
+
+
+
+
+
+
+
+
+
+
+
+ RMAppAttempt.]]>
+
+
+
+
+
+
+
+
+
+
+
+ ApplicationMaster.]]>
+
+
+
+
+
+
+
+
+
+ NodeManagers in the cluster.
+ @return number of NodeManager
s in the cluster]]>
+
+
+
+ YarnClusterMetrics
represents cluster metrics.
+
+ Currently only number of NodeManager
s is provided.
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ This class contains the information about a timeline domain, which is used
+ to a user to host a number of timeline entities, isolating them from others'.
+ The user can also define the reader and writer users/groups for the the
+ domain, which is used to control the access to its entities.
+
+
+
+ The reader and writer users/groups pattern that the user can supply is the
+ same as what AccessControlList
takes.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The class that contains the the meta information of some conceptual entity
+ and its related events. The entity can be an application, an application
+ attempt, a container or whatever the user-defined object.
+
+
+
+ Primary filters will be used to index the entities in
+ TimelineStore
, such that users should carefully choose the
+ information they want to store as the primary filters. The remaining can be
+ stored as other information.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ name property as a
+ InetSocketAddress
. On a HA cluster,
+ this fetches the address corresponding to the RM identified by
+ {@link #RM_HA_ID}.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Default platform-specific CLASSPATH for YARN applications. A
+ comma-separated list of CLASSPATH entries constructed based on the client
+ OS environment expansion syntax.
+
+
+ Note: Use {@link #DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH} for
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The information is passed along to applications via
+ {@link StartContainersResponse#getAllServicesMetaData()} that is returned by
+ {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
+
+
+ @return meta-data for this service that should be made available to
+ applications.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The method used by administrators to ask SCM to run cleaner task right away
+
+
+ @param request request SharedCacheManager
to run a cleaner task
+ @return SharedCacheManager
returns an empty response
+ on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+ The protocol between administrators and the SharedCacheManager
+ ]]>
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
new file mode 100644
index 0000000000..aa11aea255
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client.2.6.0.xml
@@ -0,0 +1,2427 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Get a report of the given Application.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+
+ @param appId
+ {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of all Applications in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given ApplicationAttempt.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param applicationAttemptId
+ {@link ApplicationAttemptId} of the application attempt that needs
+ a report
+ @return application attempt report
+ @throws YarnException
+ @throws {@link ApplicationAttemptNotFoundException} if application attempt
+ not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+ application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Container.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param containerId
+ {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws {@link ContainerNotFoundException} if container not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+ attempt
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+ {@code
+ AMRMClient.createAMRMClientContainerRequest()
+ }
+ @return the newly create AMRMClient instance.]]>
+
+
+
+
+
+
+
+
+
+ RegisterApplicationMasterResponse
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ addContainerRequest are sent to the
+ ResourceManager
. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+
+
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+
+
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ allocate
+ @param req Resource request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerRequests matching the given
+ parameters. These ContainerRequests should have been added via
+ addContainerRequest
earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical
+ Resource
size that fit in the given capability. In a
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ AMRMClient. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ AMRMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+
+
+
+
+ AMRMClient. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ AMRMClient
.
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+
+
+
+
+
+
+ check to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
]]>
+
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms. In the main loop, this method will log
+ the message "waiting in main loop" for each logInterval
times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
+ @param logInterval interval to log for each]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Within a priority, all container requests must have the same value for
+ locality relaxation. Either enabled or disabled.
+ If locality relaxation is disabled, then across requests, locations at
+ different network levels may not be specified. E.g. its invalid to make a
+ request for a specific node and another request for a specific rack.
+ If locality relaxation is disabled, then only within the same request,
+ a node and its rack may be specified together. This allows for a specific
+ rack with a preference for a specific node within that rack.
+
+
+ To re-enable locality relaxation at a given priority, all pending requests
+ with locality relaxation disabled must be first removed. Then they can be
+ added back with locality relaxation enabled.
+
+ All getters return immutable values.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ AMRMClient.ContainerRequest in an invalid way.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Start an allocated container.
+
+ The ApplicationMaster
or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+ NodeManager
to launch the
+ container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Stop an started container.
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the NodeManager
+
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Query the status of a container.
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the NodeManager
+
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+ Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled. However, containers will be stopped only
+ when service is stopped. i.e. after {@link NMClient#stop()}.
+
+ @param enabled whether the feature is enabled or not]]>
+
+
+
+
+
+ NMClient. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this NMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+
+
+
+
+ NMClient. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this NMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+
+ - Using the singleton instance of the cache is appropriate when running a
+ single ApplicationMaster in the same JVM.
+ - When using the singleton, users don't need to do anything special,
+ {@link AMRMClient} and {@link NMClient} are already set up to use the default
+ singleton {@link NMTokenCache}
+
+
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+
+
+ -
+ If using the {@link AMRMClient} and the {@link NMClient}, setting up and using
+ an instance cache is as follows:
+
+
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ AMRMClient rmClient = AMRMClient.createAMRMClient();
+ NMClient nmClient = NMClient.createNMClient();
+ nmClient.setNMTokenCache(nmTokenCache);
+ ...
+
+
+ -
+ If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, setting up
+ and using an instance cache is as follows:
+
+
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ AMRMClient rmClient = AMRMClient.createAMRMClient();
+ NMClient nmClient = NMClient.createNMClient();
+ nmClient.setNMTokenCache(nmTokenCache);
+ AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+ NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+ ...
+
+
+ -
+ If using {@link ApplicationMasterProtocol} and
+ {@link ContainerManagementProtocol} directly, setting up and using an
+ instance cache is as follows:
+
+
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ ...
+ ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+ ...
+ AllocateRequest allocateRequest = ...
+ ...
+ AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+ for (NMToken token : allocateResponse.getNMTokens()) {
+ nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+ }
+ ...
+ ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+ ...
+ nmPro.startContainer(container, containerContext);
+ ...
+
+
+
+ It is also possible to mix the usage of a client (AMRMClient
or
+ NMClient
, or the async versions of them) with a protocol proxy (
+ ContainerManagementProtocolProxy
or
+ ApplicationMasterProtocol
).]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Submit a new application to YARN.
It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+
+
+
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+
+
+ This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}
+
+ @param appContext
+ {@link ApplicationSubmissionContext} containing all the details
+ needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+
+
+
+
+
+
+
+
+ Kill an application identified by given ID.
+
+
+ @param applicationId
+ {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+ in case of errors or if YARN rejects the request due to
+ access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Application.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+
+ @param appId
+ {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+
+ The AMRM token will be returned only if all the following conditions are
+ met:
+
+ the requester is the owner of the ApplicationMaster
+ the application master is an unmanaged ApplicationMaster
+ the application master is in ACCEPTED state
+
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of all Applications in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationTypes
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationTypes
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+
+
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+
+
+ @param states The {@link NodeState}s to filter on. If no filter states are
+ given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+
+ @param renewer
+ Address of the renewer who can renew these tokens when needed by
+ securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+ talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about a given queue.
+
+
+ @param queueName
+ Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+ in case of errors or if YARN rejects the request due to
+ access-control restrictions.
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+
+
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about top level queues.
+
+
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+
+
+ @param parent
+ Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+ of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information about acls for current user on all the
+ existing queues.
+
+
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+ current user
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given ApplicationAttempt.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param applicationAttemptId
+ {@link ApplicationAttemptId} of the application attempt that needs
+ a report
+ @return application attempt report
+ @throws YarnException
+ @throws {@link ApplicationAttemptNotFoundException} if application attempt
+ not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+ application.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Container.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param containerId
+ {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws {@link ContainerNotFoundException} if container not found.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+ attempts
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+ Attempts to move the given application to the given queue.
+
+
+ @param appId
+ Application to move.
+ @param queue
+ Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+
+
+
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+
+
+
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+
+
+
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+
+
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+ submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+
+
+
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+
+
+ @param request to update an existing Reservation (the
+ {@link ReservationUpdateRequest} should refer to an existing valid
+ {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ updated successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to remove an existing Reservation.
+
+
+ @param request to remove an existing Reservation (the
+ {@link ReservationDeleteRequest} should refer to an existing valid
+ {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ deleted successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by client to get node to labels mappings in existing cluster
+
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by client to get node labels in the cluster
+
+
+ @return cluster node labels collection
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ allocate
+ @param req Resource request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ check to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
]]>
+
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms. In the main loop, this method will log
+ the message "waiting in main loop" for each logInterval
times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
+ @param logInterval interval to log for each]]>
+
+
+
+
+
+
+
+
+
+ AMRMClientAsync handles communication with the ResourceManager
+ and provides asynchronous updates on events such as container allocations and
+ completions. It contains a thread that sends periodic heartbeats to the
+ ResourceManager.
+
+ It should be used by implementing a CallbackHandler:
+
+ {@code
+ class MyCallbackHandler implements AMRMClientAsync.CallbackHandler {
+ public void onContainersAllocated(List containers) {
+ [run tasks on the containers]
+ }
+
+ public void onContainersCompleted(List statuses) {
+ [update progress, check whether app is done]
+ }
+
+ public void onNodesUpdated(List updated) {}
+
+ public void onReboot() {}
+ }
+ }
+
+
+ The client's lifecycle should be managed similarly to the following:
+
+
+ {@code
+ AMRMClientAsync asyncClient =
+ createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ RegisterApplicationMasterResponse response = asyncClient
+ .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
+ appMasterTrackingUrl);
+ asyncClient.addContainerRequest(containerRequest);
+ [... wait for application to complete]
+ asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl);
+ asyncClient.stop();
+ }
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NMClientAsync handles communication with all the NodeManagers
+ and provides asynchronous updates on getting responses from them. It
+ maintains a thread pool to communicate with individual NMs where a number of
+ worker threads process requests to NMs by using {@link NMClientImpl}. The max
+ size of the thread pool is configurable through
+ {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE}.
+
+ It should be used in conjunction with a CallbackHandler. For example
+
+
+ {@code
+ class MyCallbackHandler implements NMClientAsync.CallbackHandler {
+ public void onContainerStarted(ContainerId containerId,
+ Map allServiceResponse) {
+ [post process after the container is started, process the response]
+ }
+
+ public void onContainerStatusReceived(ContainerId containerId,
+ ContainerStatus containerStatus) {
+ [make use of the status of the container]
+ }
+
+ public void onContainerStopped(ContainerId containerId) {
+ [post process after the container is stopped]
+ }
+
+ public void onStartContainerError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+
+ public void onGetContainerStatusError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+
+ public void onStopContainerError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+ }
+ }
+
+
+ The client's life-cycle should be managed like the following:
+
+
+ {@code
+ NMClientAsync asyncClient =
+ NMClientAsync.createNMClientAsync(new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ asyncClient.startContainer(container, containerLaunchContext);
+ [... wait for container being started]
+ asyncClient.getContainerStatus(container.getId(), container.getNodeId(),
+ container.getContainerToken());
+ [... handle the status in the callback instance]
+ asyncClient.stopContainer(container.getId(), container.getNodeId(),
+ container.getContainerToken());
+ [... wait for container being stopped]
+ asyncClient.stop();
+ }
+
]]>
+
+
+
+
+
+
+
+
+
+ NodeManager responds to indicate its
+ acceptance of the starting container request
+ @param containerId the Id of the container
+ @param allServiceResponse a Map between the auxiliary service names and
+ their outputs]]>
+
+
+
+
+
+
+ NodeManager responds with the status
+ of the container
+ @param containerId the Id of the container
+ @param containerStatus the status of the container]]>
+
+
+
+
+
+ NodeManager responds to indicate the
+ container is stopped.
+ @param containerId the Id of the container]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The callback interface needs to be implemented by {@link NMClientAsync}
+ users. The APIs are called when responses from NodeManager
are
+ available.
+
+
+
+ Once a callback happens, the users can chose to act on it in blocking or
+ non-blocking manner. If the action on callback is done in a blocking
+ manner, some of the threads performing requests on NodeManagers may get
+ blocked depending on how many threads in the pool are busy.
+
+
+
+ The implementation of the callback function should not throw the
+ unexpected exception. Otherwise, {@link NMClientAsync} will just
+ catch, log and then ignore it.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
new file mode 100644
index 0000000000..158528d6cf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_2.7.2.xml
@@ -0,0 +1,2581 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+ @param appId
+ {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of all Applications in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given ApplicationAttempt.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param applicationAttemptId
+ {@link ApplicationAttemptId} of the application attempt that needs
+ a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+ not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+ application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Container.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param containerId
+ {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+ attempt
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+ {@code
+ AMRMClient.createAMRMClientContainerRequest()
+ }
+ @return the newly create AMRMClient instance.]]>
+
+
+
+
+
+
+
+
+
+ RegisterApplicationMasterResponse
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ addContainerRequest are sent to the
+ ResourceManager
. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+
+
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+
+
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ allocate
+ @param req Resource request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ContainerRequests matching the given
+ parameters. These ContainerRequests should have been added via
+ addContainerRequest
earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical
+ Resource
size that fit in the given capability. In a
+ collection, requests will be returned in the same order as they were added.
+ @return Collection of request matching the parameters]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ AMRMClient. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ AMRMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+
+
+
+
+ AMRMClient. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ AMRMClient
.
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+
+
+
+
+
+
+ check to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
]]>
+
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms. In the main loop, this method will log
+ the message "waiting in main loop" for each logInterval
times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
+ @param logInterval interval to log for each]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Within a priority, all container requests must have the same value for
+ locality relaxation. Either enabled or disabled.
+ If locality relaxation is disabled, then across requests, locations at
+ different network levels may not be specified. E.g. its invalid to make a
+ request for a specific node and another request for a specific rack.
+ If locality relaxation is disabled, then only within the same request,
+ a node and its rack may be specified together. This allows for a specific
+ rack with a preference for a specific node within that rack.
+
+
+ To re-enable locality relaxation at a given priority, all pending requests
+ with locality relaxation disabled must be first removed. Then they can be
+ added back with locality relaxation enabled.
+
+ All getters return immutable values.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+ AMRMClient.ContainerRequest in an invalid way.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Start an allocated container.
+
+ The ApplicationMaster
or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+ NodeManager
to launch the
+ container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Stop an started container.
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the NodeManager
+
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Query the status of a container.
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the NodeManager
+
+ @return the status of a container
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+ Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled. However, containers will be stopped only
+ when service is stopped. i.e. after {@link NMClient#stop()}.
+
+ @param enabled whether the feature is enabled or not]]>
+
+
+
+
+
+ NMClient. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this NMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+
+
+
+
+ NMClient. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this NMClient
+
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+
+ -
+ Using the singleton instance of the cache is appropriate when running a
+ single ApplicationMaster in the same JVM.
+
+ -
+ When using the singleton, users don't need to do anything special,
+ {@link AMRMClient} and {@link NMClient} are already set up to use the
+ default singleton {@link NMTokenCache}
+
+
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+
+ -
+ If using the {@link AMRMClient} and the {@link NMClient}, setting up
+ and using an instance cache is as follows:
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ AMRMClient rmClient = AMRMClient.createAMRMClient();
+ NMClient nmClient = NMClient.createNMClient();
+ nmClient.setNMTokenCache(nmTokenCache);
+ ...
+
+
+ -
+ If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+ setting up and using an instance cache is as follows:
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ AMRMClient rmClient = AMRMClient.createAMRMClient();
+ NMClient nmClient = NMClient.createNMClient();
+ nmClient.setNMTokenCache(nmTokenCache);
+ AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+ NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+ ...
+
+
+ -
+ If using {@link ApplicationMasterProtocol} and
+ {@link ContainerManagementProtocol} directly, setting up and using an
+ instance cache is as follows:
+
+ NMTokenCache nmTokenCache = new NMTokenCache();
+ ...
+ ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+ ...
+ AllocateRequest allocateRequest = ...
+ ...
+ AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+ for (NMToken token : allocateResponse.getNMTokens()) {
+ nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+ }
+ ...
+ ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+ ...
+ nmPro.startContainer(container, containerContext);
+ ...
+
+
+
+ It is also possible to mix the usage of a client ({@code AMRMClient} or
+ {@code NMClient}, or the async versions of them) with a protocol proxy
+ ({@code ContainerManagementProtocolProxy} or
+ {@code ApplicationMasterProtocol}).]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The method to claim a resource with the SharedCacheManager.
+ The client uses a checksum to identify the resource and an
+ {@link ApplicationId} to identify which application will be using the
+ resource.
+
+
+
+ The SharedCacheManager
responds with whether or not the
+ resource exists in the cache. If the resource exists, a Path
+ to the resource in the shared cache is returned. If the resource does not
+ exist, null is returned instead.
+
+
+ @param applicationId ApplicationId of the application using the resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource
+ @return Path to the resource, or null if it does not exist]]>
+
+
+
+
+
+
+
+
+ The method to release a resource with the SharedCacheManager.
+ This method is called once an application is no longer using a claimed
+ resource in the shared cache. The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application is
+ releasing the resource.
+
+
+
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+
+
+ @param applicationId ApplicationId of the application releasing the
+ resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Submit a new application to YARN.
It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+
+
+
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+
+
+ This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}
+
+ @param appContext
+ {@link ApplicationSubmissionContext} containing all the details
+ needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+
+
+
+
+
+
+
+
+ Kill an application identified by given ID.
+
+
+ @param applicationId
+ {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+ in case of errors or if YARN rejects the request due to
+ access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Application.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+
+ If the user does not have VIEW_APP
access then the following
+ fields in the report will be set to stubbed values:
+
+ - host - set to "N/A"
+ - RPC port - set to -1
+ - client token - set to "N/A"
+ - diagnostics - set to "N/A"
+ - tracking URL - set to "N/A"
+ - original tracking URL - set to "N/A"
+ - resource usage report - all values are -1
+
+
+ @param appId
+ {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The AMRM token is required for AM to RM scheduling operations. For
+ managed Application Masters Yarn takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+
+ The AMRM token will be returned only if all the following conditions are
+ met:
+
+ - the requester is the owner of the ApplicationMaster
+ - the application master is an unmanaged ApplicationMaster
+ - the application master is in ACCEPTED state
+
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of all Applications in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationTypes
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+
+
+
+ If the user does not have VIEW_APP
access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+
+
+ @param applicationTypes
+ @param applicationStates
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+
+
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+
+
+ @param states The {@link NodeState}s to filter on. If no filter states are
+ given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+
+ @param renewer
+ Address of the renewer who can renew these tokens when needed by
+ securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+ talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about a given queue.
+
+
+ @param queueName
+ Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+ in case of errors or if YARN rejects the request due to
+ access-control restrictions.
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+
+
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about top level queues.
+
+
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+
+
+ @param parent
+ Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+ of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ Get information about acls for current user on all the
+ existing queues.
+
+
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+ current user
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given ApplicationAttempt.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param applicationAttemptId
+ {@link ApplicationAttemptId} of the application attempt that needs
+ a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+ not found
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+
+
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+ application.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of the given Container.
+
+
+
+ In secure mode, YARN
verifies access to the application, queue
+ etc. before accepting the request.
+
+
+ @param containerId
+ {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+
+
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+ attempts
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+ Attempts to move the given application to the given queue.
+
+
+ @param appId
+ Application to move.
+ @param queue
+ Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+
+
+
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+
+
+
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+
+
+
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+
+
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+ submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+
+
+
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+
+
+ @param request to update an existing Reservation (the
+ {@link ReservationUpdateRequest} should refer to an existing valid
+ {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ updated successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by clients to remove an existing Reservation.
+
+
+ @param request to remove an existing Reservation (the
+ {@link ReservationDeleteRequest} should refer to an existing valid
+ {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+ deleted successfully
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by client to get node to labels mappings in existing cluster
+
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by client to get labels to nodes mapping
+ in existing cluster
+
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+ The interface used by client to get labels to nodes mapping
+ for specified labels in existing cluster
+
+
+ @param labels labels for which labels to nodes mapping has to be retrieved
+ @return labels to nodes mappings for specific labels
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+ The interface used by client to get node labels in the cluster
+
+
+ @return cluster node labels collection
+ @throws YarnException
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ allocate
+ @param req Resource request]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ check to return true for each 1000 ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int)}
+ and {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check]]>
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms.
+ See also {@link #waitFor(com.google.common.base.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
]]>
+
+
+
+
+
+
+
+
+ check to return true for each
+ checkEveryMillis
ms. In the main loop, this method will log
+ the message "waiting in main loop" for each logInterval
times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call check
+ @param logInterval interval to log for each]]>
+
+
+
+
+
+
+
+
+
+ AMRMClientAsync handles communication with the ResourceManager
+ and provides asynchronous updates on events such as container allocations and
+ completions. It contains a thread that sends periodic heartbeats to the
+ ResourceManager.
+
+ It should be used by implementing a CallbackHandler:
+
+ {@code
+ class MyCallbackHandler implements AMRMClientAsync.CallbackHandler {
+ public void onContainersAllocated(List containers) {
+ [run tasks on the containers]
+ }
+
+ public void onContainersCompleted(List statuses) {
+ [update progress, check whether app is done]
+ }
+
+ public void onNodesUpdated(List updated) {}
+
+ public void onReboot() {}
+ }
+ }
+
+
+ The client's lifecycle should be managed similarly to the following:
+
+
+ {@code
+ AMRMClientAsync asyncClient =
+ createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ RegisterApplicationMasterResponse response = asyncClient
+ .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
+ appMasterTrackingUrl);
+ asyncClient.addContainerRequest(containerRequest);
+ [... wait for application to complete]
+ asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl);
+ asyncClient.stop();
+ }
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ NMClientAsync handles communication with all the NodeManagers
+ and provides asynchronous updates on getting responses from them. It
+ maintains a thread pool to communicate with individual NMs where a number of
+ worker threads process requests to NMs by using {@link NMClientImpl}. The max
+ size of the thread pool is configurable through
+ {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE}.
+
+ It should be used in conjunction with a CallbackHandler. For example
+
+
+ {@code
+ class MyCallbackHandler implements NMClientAsync.CallbackHandler {
+ public void onContainerStarted(ContainerId containerId,
+ Map allServiceResponse) {
+ [post process after the container is started, process the response]
+ }
+
+ public void onContainerStatusReceived(ContainerId containerId,
+ ContainerStatus containerStatus) {
+ [make use of the status of the container]
+ }
+
+ public void onContainerStopped(ContainerId containerId) {
+ [post process after the container is stopped]
+ }
+
+ public void onStartContainerError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+
+ public void onGetContainerStatusError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+
+ public void onStopContainerError(
+ ContainerId containerId, Throwable t) {
+ [handle the raised exception]
+ }
+ }
+ }
+
+
+ The client's life-cycle should be managed like the following:
+
+
+ {@code
+ NMClientAsync asyncClient =
+ NMClientAsync.createNMClientAsync(new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ asyncClient.startContainer(container, containerLaunchContext);
+ [... wait for container being started]
+ asyncClient.getContainerStatus(container.getId(), container.getNodeId(),
+ container.getContainerToken());
+ [... handle the status in the callback instance]
+ asyncClient.stopContainer(container.getId(), container.getNodeId(),
+ container.getContainerToken());
+ [... wait for container being stopped]
+ asyncClient.stop();
+ }
+
]]>
+
+
+
+
+
+
+
+
+
+ NodeManager responds to indicate its
+ acceptance of the starting container request
+ @param containerId the Id of the container
+ @param allServiceResponse a Map between the auxiliary service names and
+ their outputs]]>
+
+
+
+
+
+
+ NodeManager responds with the status
+ of the container
+ @param containerId the Id of the container
+ @param containerStatus the status of the container]]>
+
+
+
+
+
+ NodeManager responds to indicate the
+ container is stopped.
+ @param containerId the Id of the container]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The callback interface needs to be implemented by {@link NMClientAsync}
+ users. The APIs are called when responses from NodeManager
are
+ available.
+
+
+
+ Once a callback happens, the users can chose to act on it in blocking or
+ non-blocking manner. If the action on callback is done in a blocking
+ manner, some of the threads performing requests on NodeManagers may get
+ blocked depending on how many threads in the pool are busy.
+
+
+
+ The implementation of the callback function should not throw the
+ unexpected exception. Otherwise, {@link NMClientAsync} will just
+ catch, log and then ignore it.
+
]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
new file mode 100644
index 0000000000..9e37e8a059
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.6.0.xml
@@ -0,0 +1,2870 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+
+
+ @param entities
+ the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+
+
+ @param domain
+ an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+
+
+ @param renewer
+ Address of the renewer who can renew these tokens when needed by
+ securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+ timeline server
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Renew a timeline delegation token.
+
+
+ @param timelineDT
+ the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Cancel a timeline delegation token.
+
+
+ @param timelineDT
+ the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ parameterized event of type T]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ labels map]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ labels map]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ labels map]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Host can have multiple Node
s]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ label]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The object type on which this state machine operates.
+ @param The state of the entity.
+ @param The external eventType to be handled.
+ @param The event object.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
new file mode 100644
index 0000000000..f8773363f1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_2.7.2.xml
@@ -0,0 +1,3323 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type information of the proxy
+ @return Proxy to the RM
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+
+
+ @param entities
+ the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+
+
+ @param domain
+ an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+
+
+ @param renewer
+ Address of the renewer who can renew these tokens when needed by
+ securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+ timeline server
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Renew a timeline delegation token.
+
+
+ @param timelineDT
+ the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+ Cancel a timeline delegation token.
+
+
+ @param timelineDT
+ the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ parameterized event of type T]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ } labels map]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ } labels map]]>
+
+
+
+
+
+
+
+
+
+
+ } labels map]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Host can have multiple Node
s]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ } label]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ InputStream to be checksumed
+ @return the message digest of the input stream
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+ SharedCacheChecksum object based on the configurable
+ algorithm implementation
+ (see yarn.sharedcache.checksum.algo.impl
)
+
+ @return SharedCacheChecksum
object]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ The object type on which this state machine operates.
+ @param The state of the entity.
+ @param The external eventType to be handled.
+ @param The event object.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
new file mode 100644
index 0000000000..094962fc8f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.6.0.xml
@@ -0,0 +1,2059 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type of proxy.
+ @return Proxy to the ResourceManager for the specified server protocol.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true if the node is healthy, else false
]]>
+
+
+
+
+ diagnostic health report of the node.
+ @return diagnostic health report of the node]]>
+
+
+
+
+ last timestamp at which the health report was received.
+ @return last timestamp at which the health report was received]]>
+
+
+
+ NodeHealthStatus
is a summary of the health status of the
+ node.
+
+ It includes information such as:
+
+ -
+ An indicator of whether the node is healthy, as determined by the
+ health-check script.
+
+ - The previous time at which the health status was reported.
+ - A diagnostic report on the health status.
+
+
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true if the iteration has more elements.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
new file mode 100644
index 0000000000..1a1d88bc66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_2.7.2.xml
@@ -0,0 +1,1801 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type of proxy.
+ @return Proxy to the ResourceManager for the specified server protocol.
+ @throws IOException]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true if the node is healthy, else false
]]>
+
+
+
+
+ diagnostic health report of the node.
+ @return diagnostic health report of the node]]>
+
+
+
+
+ last timestamp at which the health report was received.
+ @return last timestamp at which the health report was received]]>
+
+
+
+
+ It includes information such as:
+
+ -
+ An indicator of whether the node is healthy, as determined by the
+ health-check script.
+
+ - The previous time at which the health status was reported.
+ - A diagnostic report on the health status.
+
+
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ true if the iteration has more elements.]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
new file mode 100644
index 0000000000..7b00145ced
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Null.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Null {
+ public Null() { }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
index 187dbbbc80..41aef33f89 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
@@ -30,6 +30,8 @@
${project.parent.basedir}
+ true
+ ../dev-support
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index d6ff6af862..df15c7ce1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -27,6 +27,8 @@
${project.parent.basedir}
+ true
+ ../dev-support
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f13d6ece47..17fc6e2647 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -30,6 +30,8 @@
${project.parent.basedir}
+ true
+ ../dev-support
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index ad9f977363..f792ccd34b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -30,6 +30,8 @@
${project.parent.parent.basedir}
+ true
+ ../../dev-support
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml
index 0f79226401..3e31ec047f 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -29,6 +29,9 @@
true
600000
${basedir}
+
+
+ dev-support
${basedir}/../../../hadoop-common-project/hadoop-common/target
@@ -53,6 +56,7 @@
conf/slaves
conf/container-executor.cfg
+ dev-support/jdiff/**
@@ -98,6 +102,131 @@
+
+
+ docs
+
+ false
+
+
+ 2.7.2
+ -unstable
+
+ 512m
+
+
+
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
+
+
+
+ javadoc
+
+ prepare-package
+
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+
+
+ site
+ prepare-package
+
+ copy
+
+
+
+
+ jdiff
+ jdiff
+ ${jdiff.version}
+ false
+ ${project.build.directory}
+ jdiff.jar
+
+
+ org.apache.hadoop
+ hadoop-annotations
+ ${project.version}
+ false
+ ${project.build.directory}
+ hadoop-annotations.jar
+
+
+ xerces
+ xercesImpl
+ ${xerces.version.jdiff}
+ false
+ ${project.build.directory}
+ xerces.jar
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ site
+ prepare-package
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+