HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (Contributed by Xiaoyu Yao)
This commit is contained in:
parent
bc11e158b1
commit
3841d09765
@ -957,6 +957,8 @@ Release 2.7.1 - UNRELEASED
|
||||
HDFS-8523. Remove usage information on unsupported operation
|
||||
"fsck -showprogress" from branch-2 (J.Andreina via vinayakumarb)
|
||||
|
||||
HDFS-8522. Change heavily recorded NN logs from INFO to DEBUG level. (xyao)
|
||||
|
||||
Release 2.7.0 - 2015-04-20
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -1956,10 +1956,8 @@ boolean truncateInt(String srcArg, long newLength,
|
||||
long mtime)
|
||||
throws IOException, UnresolvedLinkException {
|
||||
String src = srcArg;
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
|
||||
+ src + " newLength=" + newLength);
|
||||
}
|
||||
NameNode.stateChangeLog.debug(
|
||||
"DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
|
||||
if (newLength < 0) {
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Cannot truncate to a negative file size: " + newLength + ".");
|
||||
@ -2108,10 +2106,10 @@ Block prepareFileForTruncate(INodesInPath iip,
|
||||
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
|
||||
getBlockManager().addBlockCollection(truncatedBlockUC, file);
|
||||
|
||||
NameNode.stateChangeLog.info("BLOCK* prepareFileForTruncate: "
|
||||
+ "Scheduling copy-on-truncate to new size "
|
||||
+ truncatedBlockUC.getNumBytes() + " new block " + newBlock
|
||||
+ " old block " + truncatedBlockUC.getTruncateBlock());
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" +
|
||||
" size {} new block {} old block {}", truncatedBlockUC.getNumBytes(),
|
||||
newBlock, truncatedBlockUC.getTruncateBlock());
|
||||
} else {
|
||||
// Use new generation stamp for in-place truncate recovery
|
||||
blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
|
||||
@ -2124,10 +2122,10 @@ Block prepareFileForTruncate(INodesInPath iip,
|
||||
truncatedBlockUC.getTruncateBlock().setGenerationStamp(
|
||||
newBlock.getGenerationStamp());
|
||||
|
||||
NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: "
|
||||
+ "Scheduling in-place block truncate to new size "
|
||||
+ truncatedBlockUC.getTruncateBlock().getNumBytes()
|
||||
+ " block=" + truncatedBlockUC);
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* prepareFileForTruncate: {} Scheduling in-place block " +
|
||||
"truncate to new size {}",
|
||||
truncatedBlockUC.getTruncateBlock().getNumBytes(), truncatedBlockUC);
|
||||
}
|
||||
if (shouldRecoverNow) {
|
||||
truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp());
|
||||
@ -2774,11 +2772,9 @@ private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
|
||||
String clientMachine, boolean newBlock, boolean logRetryCache)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
|
||||
+ ", holder=" + holder
|
||||
+ ", clientMachine=" + clientMachine);
|
||||
}
|
||||
NameNode.stateChangeLog.debug(
|
||||
"DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}",
|
||||
src, holder, clientMachine);
|
||||
boolean skipSync = false;
|
||||
LocatedBlock lb = null;
|
||||
HdfsFileStatus stat = null;
|
||||
@ -2806,12 +2802,10 @@ private LastBlockWithStatus appendFileInt(final String srcArg, String holder,
|
||||
}
|
||||
}
|
||||
if (lb != null) {
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "
|
||||
+src+" for "+holder+" at "+clientMachine
|
||||
+" block " + lb.getBlock()
|
||||
+" block size " + lb.getBlock().getNumBytes());
|
||||
}
|
||||
NameNode.stateChangeLog.debug(
|
||||
"DIR* NameSystem.appendFile: file {} for {} at {} block {} block" +
|
||||
" size {}", src, holder, clientMachine, lb.getBlock(),
|
||||
lb.getBlock().getNumBytes());
|
||||
}
|
||||
logAuditEvent(true, "append", srcArg);
|
||||
return new LastBlockWithStatus(lb, stat);
|
||||
@ -2840,10 +2834,8 @@ void setBlockPoolId(String bpid) {
|
||||
LocatedBlock getAdditionalBlock(
|
||||
String src, long fileId, String clientName, ExtendedBlock previous,
|
||||
DatanodeInfo[] excludedNodes, String[] favoredNodes) throws IOException {
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: "
|
||||
+ src + " inodeId " + fileId + " for " + clientName);
|
||||
}
|
||||
NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {} inodeId {}" +
|
||||
" for {}", src, fileId, clientName);
|
||||
|
||||
waitForLoadingFSImage();
|
||||
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
|
||||
@ -2950,10 +2942,8 @@ LocatedBlock getAdditionalDatanode(String src, long fileId,
|
||||
*/
|
||||
void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
|
||||
throws IOException {
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: " + b
|
||||
+ "of file " + src);
|
||||
}
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
|
||||
waitForLoadingFSImage();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
@ -2962,10 +2952,8 @@ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
|
||||
FSDirWriteFileOp.abandonBlock(dir, pc, b, fileId, src, holder);
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
|
||||
+ b + " is removed from pendingCreates");
|
||||
}
|
||||
NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} is " +
|
||||
"removed from pendingCreates", b);
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
@ -2973,7 +2961,8 @@ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder)
|
||||
}
|
||||
|
||||
INodeFile checkLease(
|
||||
String src, String holder, INode inode, long fileId) throws LeaseExpiredException, FileNotFoundException {
|
||||
String src, String holder, INode inode, long fileId)
|
||||
throws LeaseExpiredException, FileNotFoundException {
|
||||
assert hasReadLock();
|
||||
final String ident = src + " (inode " + fileId + ")";
|
||||
if (inode == null) {
|
||||
@ -4039,11 +4028,8 @@ private void closeFile(String path, INodeFile file) {
|
||||
waitForLoadingFSImage();
|
||||
// file is closed
|
||||
getEditLog().logCloseFile(path, file);
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("closeFile: "
|
||||
+path+" with "+ file.getBlocks().length
|
||||
+" blocks is persisted to the file system");
|
||||
}
|
||||
NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted" +
|
||||
" to the file system", path, file.getBlocks().length);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5903,7 +5889,9 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
|
||||
if (cookieTab[0] == null) {
|
||||
cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0]));
|
||||
}
|
||||
LOG.info("there are no corrupt file blocks.");
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("there are no corrupt file blocks.");
|
||||
}
|
||||
return corruptFiles;
|
||||
}
|
||||
|
||||
@ -5938,7 +5926,9 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
|
||||
}
|
||||
}
|
||||
cookieTab[0] = String.valueOf(skip);
|
||||
LOG.info("list corrupt file blocks returned: " + count);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("list corrupt file blocks returned: " + count);
|
||||
}
|
||||
return corruptFiles;
|
||||
} finally {
|
||||
readUnlock();
|
||||
|
Loading…
Reference in New Issue
Block a user