YARN-1892. Improved some logs in the scheduler. Contributed by Jian He.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1587717 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
161d97807f
commit
44b6261bfa
@ -73,6 +73,8 @@ Release 2.4.1 - UNRELEASED
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
YARN-1892. Improved some logs in the scheduler. (Jian He via zjshen)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -343,10 +343,6 @@ private NodeStatus getNodeStatus(int responseId) {
|
||||
+ ", " + nodeHealthStatus.getHealthReport());
|
||||
}
|
||||
List<ContainerStatus> containersStatuses = getContainerStatuses();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(this.nodeId + " sending out status for "
|
||||
+ containersStatuses.size() + " containers");
|
||||
}
|
||||
NodeStatus nodeStatus =
|
||||
NodeStatus.newInstance(nodeId, responseId, containersStatuses,
|
||||
createKeepAliveApplicationList(), nodeHealthStatus);
|
||||
@ -373,7 +369,8 @@ protected List<ContainerStatus> getContainerStatuses() {
|
||||
}
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Sending out container statuses: " + containerStatuses);
|
||||
LOG.debug("Sending out " + containerStatuses.size()
|
||||
+ " container statuses: " + containerStatuses);
|
||||
}
|
||||
return containerStatuses;
|
||||
}
|
||||
|
@ -255,13 +255,15 @@ public synchronized RMContainer reserve(SchedulerNode node, Priority priority,
|
||||
this.reservedContainers.put(priority, reservedContainers);
|
||||
}
|
||||
reservedContainers.put(node.getNodeID(), rmContainer);
|
||||
|
||||
LOG.info("Application " + getApplicationId()
|
||||
+ " reserved container " + rmContainer
|
||||
+ " on node " + node + ", currently has " + reservedContainers.size()
|
||||
+ " at priority " + priority
|
||||
+ "; currentReservation " + currentReservation.getMemory());
|
||||
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Application attempt " + getApplicationAttemptId()
|
||||
+ " reserved container " + rmContainer + " on node " + node
|
||||
+ ". This attempt currently has " + reservedContainers.size()
|
||||
+ " reserved containers at priority " + priority
|
||||
+ "; currentReservation " + currentReservation.getMemory());
|
||||
}
|
||||
|
||||
return rmContainer;
|
||||
}
|
||||
|
||||
|
@ -107,8 +107,6 @@ public class LeafQueue implements CSQueue {
|
||||
private final Resource maximumAllocation;
|
||||
private final float minimumAllocationFactor;
|
||||
|
||||
private RMContainerTokenSecretManager containerTokenSecretManager;
|
||||
|
||||
private Map<String, User> users = new HashMap<String, User>();
|
||||
|
||||
private final QueueMetrics metrics;
|
||||
@ -149,7 +147,6 @@ public LeafQueue(CapacitySchedulerContext cs,
|
||||
Resources.ratio(resourceCalculator,
|
||||
Resources.subtract(maximumAllocation, minimumAllocation),
|
||||
maximumAllocation);
|
||||
this.containerTokenSecretManager = cs.getContainerTokenSecretManager();
|
||||
|
||||
float capacity =
|
||||
(float)cs.getConfiguration().getCapacity(getQueuePath()) / 100;
|
||||
@ -560,7 +557,7 @@ public String toString() {
|
||||
return queueName + ": " +
|
||||
"capacity=" + capacity + ", " +
|
||||
"absoluteCapacity=" + absoluteCapacity + ", " +
|
||||
"usedResources=" + usedResources +
|
||||
"usedResources=" + usedResources + ", " +
|
||||
"usedCapacity=" + getUsedCapacity() + ", " +
|
||||
"absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + ", " +
|
||||
"numApps=" + getNumApplications() + ", " +
|
||||
@ -949,15 +946,16 @@ private synchronized boolean assignToQueue(Resource clusterResource,
|
||||
Resources.add(usedResources, required),
|
||||
clusterResource);
|
||||
if (potentialNewCapacity > absoluteMaxCapacity) {
|
||||
LOG.info(getQueueName() +
|
||||
" usedResources: " + usedResources +
|
||||
" clusterResources: " + clusterResource +
|
||||
" currentCapacity " +
|
||||
Resources.divide(resourceCalculator, clusterResource,
|
||||
usedResources, clusterResource) +
|
||||
" required " + required +
|
||||
" potentialNewCapacity: " + potentialNewCapacity + " ( " +
|
||||
" max-capacity: " + absoluteMaxCapacity + ")");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getQueueName()
|
||||
+ " usedResources: " + usedResources
|
||||
+ " clusterResources: " + clusterResource
|
||||
+ " currentCapacity "
|
||||
+ Resources.divide(resourceCalculator, clusterResource,
|
||||
usedResources, clusterResource) + " required " + required
|
||||
+ " potentialNewCapacity: " + potentialNewCapacity + " ( "
|
||||
+ " max-capacity: " + absoluteMaxCapacity + ")");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1300,7 +1298,7 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod
|
||||
ResourceRequest request, NodeType type, RMContainer rmContainer) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("assignContainers: node=" + node.getNodeName()
|
||||
+ " application=" + application.getApplicationId().getId()
|
||||
+ " application=" + application.getApplicationId()
|
||||
+ " priority=" + priority.getPriority()
|
||||
+ " request=" + request + " type=" + type);
|
||||
}
|
||||
@ -1352,14 +1350,10 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod
|
||||
allocatedContainer);
|
||||
|
||||
LOG.info("assignedContainer" +
|
||||
" application=" + application.getApplicationId() +
|
||||
" application attempt=" + application.getApplicationAttemptId() +
|
||||
" container=" + container +
|
||||
" containerId=" + container.getId() +
|
||||
" queue=" + this +
|
||||
" usedCapacity=" + getUsedCapacity() +
|
||||
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||
" used=" + usedResources +
|
||||
" cluster=" + clusterResource);
|
||||
" clusterResource=" + clusterResource);
|
||||
|
||||
return container.getResource();
|
||||
} else {
|
||||
@ -1367,13 +1361,11 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod
|
||||
reserve(application, priority, node, rmContainer, container);
|
||||
|
||||
LOG.info("Reserved container " +
|
||||
" application=" + application.getApplicationId() +
|
||||
" application attempt=" + application.getApplicationAttemptId() +
|
||||
" resource=" + request.getCapability() +
|
||||
" queue=" + this.toString() +
|
||||
" usedCapacity=" + getUsedCapacity() +
|
||||
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||
" used=" + usedResources +
|
||||
" cluster=" + clusterResource);
|
||||
" node=" + node +
|
||||
" clusterResource=" + clusterResource);
|
||||
|
||||
return request.getCapability();
|
||||
}
|
||||
@ -1440,11 +1432,7 @@ public void completedContainer(Resource clusterResource,
|
||||
application, container.getResource());
|
||||
LOG.info("completedContainer" +
|
||||
" container=" + container +
|
||||
" resource=" + container.getResource() +
|
||||
" queue=" + this +
|
||||
" usedCapacity=" + getUsedCapacity() +
|
||||
" absoluteUsedCapacity=" + getAbsoluteUsedCapacity() +
|
||||
" used=" + usedResources +
|
||||
" cluster=" + clusterResource);
|
||||
}
|
||||
}
|
||||
|
@ -231,12 +231,19 @@ public synchronized void reserveResource(
|
||||
" on node " + this);
|
||||
}
|
||||
|
||||
LOG.info("Updated reserved container " +
|
||||
reservedContainer.getContainer().getId() + " on node " +
|
||||
this + " for application " + application);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Updated reserved container "
|
||||
+ reservedContainer.getContainer().getId() + " on node " + this
|
||||
+ " for application attempt "
|
||||
+ application.getApplicationAttemptId());
|
||||
}
|
||||
} else {
|
||||
LOG.info("Reserved container " + reservedContainer.getContainer().getId() +
|
||||
" on node " + this + " for application " + application);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Reserved container "
|
||||
+ reservedContainer.getContainer().getId() + " on node " + this
|
||||
+ " for application attempt "
|
||||
+ application.getApplicationAttemptId());
|
||||
}
|
||||
}
|
||||
this.reservedContainer = reservedContainer;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user