YARN-11362: Fix several typos in YARN codebase of misspelled resource (#6474) Contributed by EremenkoValentin.

Reviewed-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
This commit is contained in:
Eremenko Valentin 2024-02-03 14:34:42 +03:00 committed by GitHub
parent 20d8596af2
commit 141627778d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 56 additions and 58 deletions

View File

@ -29,18 +29,18 @@
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SchedulerInvalidResoureRequestException extends YarnRuntimeException {
public class SchedulerInvalidResourceRequestException extends YarnRuntimeException {
private static final long serialVersionUID = 10081123982L;
public SchedulerInvalidResoureRequestException(String message) {
public SchedulerInvalidResourceRequestException(String message) {
super(message);
}
public SchedulerInvalidResoureRequestException(Throwable cause) {
public SchedulerInvalidResourceRequestException(Throwable cause) {
super(cause);
}
public SchedulerInvalidResoureRequestException(String message,
public SchedulerInvalidResourceRequestException(String message,
Throwable cause) {
super(message, cause);
}

View File

@ -133,17 +133,17 @@ public void testGetApplicationReport() throws Exception {
ApplicationCLI cli = createAndGetAppCLI();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
Map<String, Long> resourceSecondsMap = new HashMap<>();
Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 123456L);
resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 4567L);
preemptedResoureSecondsMap
preemptedResourceSecondsMap
.put(ResourceInformation.MEMORY_MB.getName(), 1111L);
preemptedResoureSecondsMap
preemptedResourceSecondsMap
.put(ResourceInformation.VCORES.getName(), 2222L);
ApplicationResourceUsageReport usageReport = i == 0 ? null :
ApplicationResourceUsageReport
.newInstance(2, 0, null, null, null, resourceSecondsMap, 0, 0,
preemptedResoureSecondsMap);
preemptedResourceSecondsMap);
ApplicationReport newApplicationReport = ApplicationReport.newInstance(
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
"user", "queue", "appname", "host", 124, null,

View File

@ -395,19 +395,19 @@ public static ApplicationReport convertToApplicationReport(
}
}
Map<String, Long> resourceSecondsMap = new HashMap<>();
Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
resourceSecondsMap
.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
resourceSecondsMap
.put(ResourceInformation.VCORES.getName(), vcoreSeconds);
preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
preemptedMemorySeconds);
preemptedResoureSecondsMap
preemptedResourceSecondsMap
.put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
appResources = ApplicationResourceUsageReport
.newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0,
preemptedResoureSecondsMap);
preemptedResourceSecondsMap);
}
NavigableSet<TimelineEvent> events = entity.getEvents();

View File

@ -345,19 +345,19 @@ private static ApplicationReportExt convertToApplicationReport(
long preemptedVcoreSeconds = parseLong(entityInfo,
ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS);
Map<String, Long> resourceSecondsMap = new HashMap<>();
Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
resourceSecondsMap
.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
resourceSecondsMap
.put(ResourceInformation.VCORES.getName(), vcoreSeconds);
preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
preemptedMemorySeconds);
preemptedResoureSecondsMap
preemptedResourceSecondsMap
.put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
appResources = ApplicationResourceUsageReport
.newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0,
preemptedResoureSecondsMap);
preemptedResourceSecondsMap);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {

View File

@ -55,7 +55,7 @@
import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@ -301,7 +301,7 @@ public void allocate(ApplicationAttemptId appAttemptId,
allocation = getScheduler().allocate(appAttemptId, ask,
request.getSchedulingRequests(), release,
blacklistAdditions, blacklistRemovals, containerUpdateRequests);
} catch (SchedulerInvalidResoureRequestException e) {
} catch (SchedulerInvalidResourceRequestException e) {
LOG.warn("Exceptions caught when scheduler handling requests");
throw new YarnException(e);
}

View File

@ -43,8 +43,7 @@
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException
.InvalidResourceType;
import org.apache.hadoop.yarn.exceptions
.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.AccessType;
@ -430,7 +429,7 @@ static void checkResourceRequestAgainstAvailableResource(Resource reqResource,
public static MaxResourceValidationResult
validateResourceRequestsAgainstQueueMaxResource(
ResourceRequest resReq, Resource availableResource)
throws SchedulerInvalidResoureRequestException {
throws SchedulerInvalidResourceRequestException {
final Resource reqResource = resReq.getCapability();
Map<String, ResourceInformation> resourcesWithZeroAmount =
getZeroResources(availableResource);

View File

@ -370,11 +370,11 @@ private ContainerAllocation assignNodeLocalContainers(
Resource clusterResource, PendingAsk nodeLocalAsk,
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
RMContainer reservedContainer, SchedulingMode schedulingMode,
ResourceLimits currentResoureLimits) {
ResourceLimits currentResourceLimits) {
if (canAssign(schedulerKey, node, NodeType.NODE_LOCAL, reservedContainer)) {
return assignContainer(clusterResource, node, schedulerKey,
nodeLocalAsk, NodeType.NODE_LOCAL, reservedContainer,
schedulingMode, currentResoureLimits);
schedulingMode, currentResourceLimits);
}
// Skip node-local request, go to rack-local request
@ -385,11 +385,11 @@ private ContainerAllocation assignRackLocalContainers(
Resource clusterResource, PendingAsk rackLocalAsk,
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
RMContainer reservedContainer, SchedulingMode schedulingMode,
ResourceLimits currentResoureLimits) {
ResourceLimits currentResourceLimits) {
if (canAssign(schedulerKey, node, NodeType.RACK_LOCAL, reservedContainer)) {
return assignContainer(clusterResource, node, schedulerKey,
rackLocalAsk, NodeType.RACK_LOCAL, reservedContainer,
schedulingMode, currentResoureLimits);
schedulingMode, currentResourceLimits);
}
// Skip rack-local request, go to off-switch request
@ -400,11 +400,11 @@ private ContainerAllocation assignOffSwitchContainers(
Resource clusterResource, PendingAsk offSwitchAsk,
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
RMContainer reservedContainer, SchedulingMode schedulingMode,
ResourceLimits currentResoureLimits) {
ResourceLimits currentResourceLimits) {
if (canAssign(schedulerKey, node, NodeType.OFF_SWITCH, reservedContainer)) {
return assignContainer(clusterResource, node, schedulerKey,
offSwitchAsk, NodeType.OFF_SWITCH, reservedContainer,
schedulingMode, currentResoureLimits);
schedulingMode, currentResourceLimits);
}
application.updateAppSkipNodeDiagnostics(
@ -419,7 +419,7 @@ private ContainerAllocation assignOffSwitchContainers(
private ContainerAllocation assignContainersOnNode(Resource clusterResource,
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
RMContainer reservedContainer, SchedulingMode schedulingMode,
ResourceLimits currentResoureLimits) {
ResourceLimits currentResourceLimits) {
ContainerAllocation allocation;
NodeType requestLocalityType = null;
@ -431,7 +431,7 @@ private ContainerAllocation assignContainersOnNode(Resource clusterResource,
allocation =
assignNodeLocalContainers(clusterResource, nodeLocalAsk,
node, schedulerKey, reservedContainer, schedulingMode,
currentResoureLimits);
currentResourceLimits);
if (Resources.greaterThan(rc, clusterResource,
allocation.getResourceToBeAllocated(), Resources.none())) {
allocation.requestLocalityType = requestLocalityType;
@ -458,7 +458,7 @@ private ContainerAllocation assignContainersOnNode(Resource clusterResource,
allocation =
assignRackLocalContainers(clusterResource, rackLocalAsk,
node, schedulerKey, reservedContainer, schedulingMode,
currentResoureLimits);
currentResourceLimits);
if (Resources.greaterThan(rc, clusterResource,
allocation.getResourceToBeAllocated(), Resources.none())) {
allocation.requestLocalityType = requestLocalityType;
@ -485,7 +485,7 @@ private ContainerAllocation assignContainersOnNode(Resource clusterResource,
allocation =
assignOffSwitchContainers(clusterResource, offSwitchAsk,
node, schedulerKey, reservedContainer, schedulingMode,
currentResoureLimits);
currentResourceLimits);
// When a returned allocation is LOCALITY_SKIPPED, since we're in
// off-switch request now, we will skip this app w.r.t priorities
@ -507,7 +507,7 @@ private ContainerAllocation assignContainersOnNode(Resource clusterResource,
private ContainerAllocation assignContainer(Resource clusterResource,
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
PendingAsk pendingAsk, NodeType type, RMContainer rmContainer,
SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
SchedulingMode schedulingMode, ResourceLimits currentResourceLimits) {
if (LOG.isDebugEnabled()) {
LOG.debug("assignContainers: node=" + node.getNodeName()
@ -547,8 +547,8 @@ private ContainerAllocation assignContainer(Resource clusterResource,
// max(required - headroom, amountNeedUnreserve)
Resource resourceNeedToUnReserve =
Resources.max(rc, clusterResource,
Resources.subtract(capability, currentResoureLimits.getHeadroom()),
currentResoureLimits.getAmountNeededUnreserve());
Resources.subtract(capability, currentResourceLimits.getHeadroom()),
currentResourceLimits.getAmountNeededUnreserve());
boolean needToUnreserve =
rc.isAnyMajorResourceAboveZero(resourceNeedToUnReserve);
@ -559,7 +559,7 @@ private ContainerAllocation assignContainer(Resource clusterResource,
// Check if we need to kill some containers to allocate this one
List<RMContainer> toKillContainers = null;
if (availableContainers == 0 && currentResoureLimits.isAllowPreemption()) {
if (availableContainers == 0 && currentResourceLimits.isAllowPreemption()) {
Resource availableAndKillable = Resources.clone(available);
for (RMContainer killableContainer : node
.getKillableContainers().values()) {
@ -590,7 +590,7 @@ private ContainerAllocation assignContainer(Resource clusterResource,
if (!needToUnreserve) {
// If we shouldn't allocate/reserve new container then we should
// unreserve one the same size we are asking for since the
// currentResoureLimits.getAmountNeededUnreserve could be zero. If
// currentResourceLimits.getAmountNeededUnreserve could be zero. If
// the limit was hit then use the amount we need to unreserve to be
// under the limit.
resourceNeedToUnReserve = capability;

View File

@ -41,8 +41,7 @@
import org.apache.hadoop.yarn.api.records.SchedulingRequest;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions
.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
@ -912,7 +911,7 @@ public Allocation allocate(ApplicationAttemptId appAttemptId,
// scheduler would clear them right away and AM
// would not get this information.
if (!invalidAsks.isEmpty()) {
throw new SchedulerInvalidResoureRequestException(String.format(
throw new SchedulerInvalidResourceRequestException(String.format(
"Resource request is invalid for application %s because queue %s "
+ "has 0 amount of resource for a resource type! "
+ "Validation result: %s",

View File

@ -23,7 +23,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.SchedulingRequest;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
@ -172,8 +172,8 @@ public PendingAskUpdateResult updatePendingAsk(
SchedulerRequestKey schedulerRequestKey,
SchedulingRequest schedulingRequest,
boolean recoverPreemptedRequestForAContainer)
throws SchedulerInvalidResoureRequestException {
throw new SchedulerInvalidResoureRequestException(this.getClass().getName()
throws SchedulerInvalidResourceRequestException {
throw new SchedulerInvalidResourceRequestException(this.getClass().getName()
+ " not be able to handle SchedulingRequest, there exists a "
+ "ResourceRequest with the same scheduler key=" + schedulerRequestKey
+ ", please send SchedulingRequest with a different allocationId and "

View File

@ -28,7 +28,7 @@
import org.apache.hadoop.yarn.api.records.SchedulingRequest;
import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
@ -81,7 +81,7 @@ public PendingAskUpdateResult updatePendingAsk(
Collection<ResourceRequest> requests,
boolean recoverPreemptedRequestForAContainer) {
if (requests != null && !requests.isEmpty()) {
throw new SchedulerInvalidResoureRequestException(
throw new SchedulerInvalidResourceRequestException(
this.getClass().getName()
+ " not be able to handle ResourceRequest, there exists a "
+ "SchedulingRequest with the same scheduler key="
@ -98,7 +98,7 @@ private PendingAskUpdateResult internalUpdatePendingAsk(
SchedulingRequest newSchedulingRequest, boolean recoverContainer) {
// When it is a recover container, there must exists an schedulingRequest.
if (recoverContainer && schedulingRequest == null) {
throw new SchedulerInvalidResoureRequestException("Trying to recover a "
throw new SchedulerInvalidResourceRequestException("Trying to recover a "
+ "container request=" + newSchedulingRequest.toString() + ", however"
+ "there's no existing scheduling request, this should not happen.");
}
@ -127,7 +127,7 @@ private PendingAskUpdateResult internalUpdatePendingAsk(
if (!schedulingRequest.equals(newSchedulingRequest)) {
// Rollback #numAllocations
sizing.setNumAllocations(newNumAllocations);
throw new SchedulerInvalidResoureRequestException(
throw new SchedulerInvalidResourceRequestException(
"Invalid updated SchedulingRequest added to scheduler, "
+ " we only allows changing numAllocations for the updated "
+ "SchedulingRequest. Old=" + schedulingRequest.toString()
@ -148,7 +148,7 @@ private PendingAskUpdateResult internalUpdatePendingAsk(
// Basic sanity check
if (newNumAllocations < 0) {
throw new SchedulerInvalidResoureRequestException(
throw new SchedulerInvalidResourceRequestException(
"numAllocation in ResourceSizing field must be >= 0, "
+ "updating schedulingRequest failed.");
}
@ -197,12 +197,12 @@ private String throwExceptionWithMetaInfo(String message) {
sb.append("AppId=").append(appSchedulingInfo.getApplicationId()).append(
" Key=").append(this.schedulerRequestKey).append(". Exception message:")
.append(message);
throw new SchedulerInvalidResoureRequestException(sb.toString());
throw new SchedulerInvalidResourceRequestException(sb.toString());
}
private void validateAndSetSchedulingRequest(SchedulingRequest
newSchedulingRequest)
throws SchedulerInvalidResoureRequestException {
throws SchedulerInvalidResourceRequestException {
// Check sizing exists
if (newSchedulingRequest.getResourceSizing() == null
|| newSchedulingRequest.getResourceSizing().getResources() == null) {

View File

@ -814,19 +814,19 @@ private static ApplicationAttemptStateData createFinishedAttempt(
ApplicationAttemptId attemptId, Container container, long startTime,
int amExitStatus) {
Map<String, Long> resourceSecondsMap = new HashMap<>();
Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
resourceSecondsMap
.put(ResourceInformation.MEMORY_MB.getName(), 0L);
resourceSecondsMap
.put(ResourceInformation.VCORES.getName(), 0L);
preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
0L);
preemptedResoureSecondsMap
preemptedResourceSecondsMap
.put(ResourceInformation.VCORES.getName(), 0L);
return ApplicationAttemptStateData.newInstance(attemptId,
container, null, startTime, RMAppAttemptState.FINISHED,
"myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED,
amExitStatus, 0, resourceSecondsMap, preemptedResoureSecondsMap, 0);
amExitStatus, 0, resourceSecondsMap, preemptedResourceSecondsMap, 0);
}
private ApplicationAttemptId storeAttempt(RMStateStore store,

View File

@ -1486,7 +1486,7 @@ public void testHierarchyQueuesCurrentLimits() throws Exception {
waitContainerAllocated(am1, 1 * GB, 1, 2, rm1, nm1);
// Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB
// Maximum resource of b1 is 100 * 0.895 * 0.792 = 71 GB
// 2 GBs used by am, so it's 71 - 2 = 69G.
Assert.assertEquals(69 * GB,
am1.doHeartbeat().getAvailableResources().getMemorySize());

View File

@ -46,7 +46,7 @@
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
@ -5513,7 +5513,7 @@ private void testSchedulingRejectedToQueueZeroCapacityOfResource(
+ resource + " and requested resource capabilities are: "
+ requests.stream().map(ResourceRequest::getCapability)
.collect(Collectors.toList()));
} catch (SchedulerInvalidResoureRequestException e) {
} catch (SchedulerInvalidResourceRequestException e) {
assertTrue(
"The thrown exception is not the expected one. Exception message: "
+ e.getMessage(),

View File

@ -21,7 +21,7 @@
import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTags;
import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
@ -104,7 +104,7 @@ private void assertInvalidSchedulingRequest(
allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
}
allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
} catch (SchedulerInvalidResoureRequestException e) {
} catch (SchedulerInvalidResourceRequestException e) {
// Expected
return;
}