HADOOP-12428. Fix inconsistency between log-level guards and statements. Contributed by Jagadesh Kiran N and Jackie Chang.
This commit is contained in:
parent
06022b8fdc
commit
dfd807afab
@ -788,6 +788,9 @@ Release 2.8.0 - UNRELEASED
|
||||
JarFile with other users when loading resource from URL in Configuration
|
||||
class. (zxu)
|
||||
|
||||
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
||||
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
||||
|
@ -941,6 +941,9 @@ Release 2.8.0 - UNRELEASED
|
||||
HDFS-9111. Move hdfs-client protobuf convert methods from PBHelper to
|
||||
PBHelperClient. (Mingliang Liu via wheat9)
|
||||
|
||||
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
||||
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
@ -752,7 +752,7 @@ public void writeBlock(final ExtendedBlock block,
|
||||
mirrorInStatus = connectAck.getStatus();
|
||||
firstBadLink = connectAck.getFirstBadLink();
|
||||
if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
|
||||
LOG.info("Datanode " + targets.length +
|
||||
LOG.debug("Datanode " + targets.length +
|
||||
" got response for connect ack " +
|
||||
" from downstream datanode with firstbadlink as " +
|
||||
firstBadLink);
|
||||
@ -791,7 +791,7 @@ public void writeBlock(final ExtendedBlock block,
|
||||
// send connect-ack to source for clients and not transfer-RBW/Finalized
|
||||
if (isClient && !isTransfer) {
|
||||
if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
|
||||
LOG.info("Datanode " + targets.length +
|
||||
LOG.debug("Datanode " + targets.length +
|
||||
" forwarding connect ack to upstream firstbadlink is " +
|
||||
firstBadLink);
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ private synchronized void applyEdits(long firstTxId, int numTxns, byte[] data)
|
||||
assert backupInputStream.length() == 0 : "backup input stream is not empty";
|
||||
try {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.debug("data:" + StringUtils.byteToHexString(data));
|
||||
LOG.trace("data:" + StringUtils.byteToHexString(data));
|
||||
}
|
||||
|
||||
FSEditLogLoader logLoader =
|
||||
|
@ -375,7 +375,7 @@ private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
|
||||
"or call saveNamespace on the active node.\n" +
|
||||
"Error: " + e.getLocalizedMessage();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.fatal(msg, e);
|
||||
LOG.debug(msg, e);
|
||||
} else {
|
||||
LOG.fatal(msg);
|
||||
}
|
||||
|
@ -260,7 +260,7 @@ void doTailEdits() throws IOException, InterruptedException {
|
||||
throw elie;
|
||||
} finally {
|
||||
if (editsLoaded > 0 || LOG.isDebugEnabled()) {
|
||||
LOG.info(String.format("Loaded %d edits starting from txid %d ",
|
||||
LOG.debug(String.format("Loaded %d edits starting from txid %d ",
|
||||
editsLoaded, lastTxnId));
|
||||
}
|
||||
}
|
||||
|
@ -387,6 +387,9 @@ Release 2.8.0 - UNRELEASED
|
||||
MAPREDUCE-6483. Replace deprecated method NameNode.getUri() with
|
||||
DFSUtilClient.getNNUri() in TestMRCredentials. (Mingliang Liu via wheat9)
|
||||
|
||||
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
||||
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-6376. Add avro binary support for jhist files (Ray Chiang via
|
||||
|
@ -1122,7 +1122,7 @@ private void containerAssigned(Container allocated,
|
||||
assignedRequests.add(allocated, assigned.attemptID);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("Assigned container (" + allocated + ") "
|
||||
LOG.debug("Assigned container (" + allocated + ") "
|
||||
+ " to task " + assigned.attemptID + " on node "
|
||||
+ allocated.getNodeId().toString());
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ private void decResourceRequest(Priority priority, String resourceName,
|
||||
addResourceRequestToAsk(remoteRequest);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("AFTER decResourceRequest:" + " applicationId="
|
||||
LOG.debug("AFTER decResourceRequest:" + " applicationId="
|
||||
+ applicationId.getId() + " priority=" + priority.getPriority()
|
||||
+ " resourceName=" + resourceName + " numContainers="
|
||||
+ remoteRequest.getNumContainers() + " #asks=" + ask.size());
|
||||
|
@ -112,7 +112,8 @@ private List<InputSplit> getSplits(Configuration configuration, int numSplits,
|
||||
FileSplit split = new FileSplit(listingFilePath, lastSplitStart,
|
||||
lastPosition - lastSplitStart, null);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info ("Creating split : " + split + ", bytes in split: " + currentSplitSize);
|
||||
LOG.debug("Creating split : " + split + ", bytes in split: "
|
||||
+ currentSplitSize);
|
||||
}
|
||||
splits.add(split);
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ public void run() {
|
||||
return;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info(" job " + job.getName() + " completed ");
|
||||
LOG.debug(" job " + job.getName() + " completed ");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -457,6 +457,9 @@ Release 2.8.0 - UNRELEASED
|
||||
YARN-3920. FairScheduler container reservation on a node should be
|
||||
configurable to limit it to large containers (adhoot via asuresh)
|
||||
|
||||
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
||||
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
YARN-3339. TestDockerContainerExecutor should pull a single image and not
|
||||
|
@ -748,7 +748,7 @@ private void decResourceRequest(Priority priority,
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("AFTER decResourceRequest:" + " applicationId="
|
||||
LOG.debug("AFTER decResourceRequest:" + " applicationId="
|
||||
+ " priority=" + priority.getPriority()
|
||||
+ " resourceName=" + resourceName + " numContainers="
|
||||
+ resourceRequestInfo.remoteRequest.getNumContainers()
|
||||
|
@ -46,7 +46,7 @@ public Token<ContainerTokenIdentifier> selectToken(Text service,
|
||||
}
|
||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("Looking for service: " + service + ". Current token is "
|
||||
LOG.debug("Looking for service: " + service + ". Current token is "
|
||||
+ token);
|
||||
}
|
||||
if (ContainerTokenIdentifier.KIND.equals(token.getKind()) &&
|
||||
|
@ -42,7 +42,7 @@ public Token<NMTokenIdentifier> selectToken(Text service,
|
||||
}
|
||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info("Looking for service: " + service + ". Current token is "
|
||||
LOG.debug("Looking for service: " + service + ". Current token is "
|
||||
+ token);
|
||||
}
|
||||
if (NMTokenIdentifier.KIND.equals(token.getKind()) &&
|
||||
|
@ -1163,8 +1163,8 @@ synchronized void allocateResource(Resource clusterResource,
|
||||
metrics.setAvailableResourcesToUser(userName, application.getHeadroom());
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.info(getQueueName() +
|
||||
" user=" + userName +
|
||||
LOG.debug(getQueueName() +
|
||||
" user=" + userName +
|
||||
" used=" + queueUsage.getUsed() + " numContainers=" + numContainers +
|
||||
" headroom = " + application.getHeadroom() +
|
||||
" user-resources=" + user.getUsed()
|
||||
|
Loading…
Reference in New Issue
Block a user