From ed72daa5df97669906234e8ac9a406d78136b206 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 1 Apr 2015 12:53:25 -0700 Subject: [PATCH] HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/BlockReaderFactory.java | 24 +++++++++---------- .../java/org/apache/hadoop/hdfs/HAUtil.java | 12 ++++++---- .../hdfs/server/datanode/BPServiceActor.java | 8 ++++--- .../fsdataset/impl/FsDatasetCache.java | 8 ++++--- .../server/namenode/FileJournalManager.java | 22 ++++++++++------- .../hadoop/hdfs/server/namenode/NameNode.java | 4 +++- .../hdfs/shortcircuit/ShortCircuitCache.java | 4 +++- .../offlineImageViewer/FSImageLoader.java | 6 +++-- .../hadoop/hdfs/util/LightWeightHashSet.java | 6 +++-- .../apache/hadoop/hdfs/web/TokenAspect.java | 8 +++++-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 16 +++++++++---- 12 files changed, 78 insertions(+), 43 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cba53a38bc..435fdd7b81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED HDFS-8009. Signal congestion on the DataNode. (wheat9) + HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). + (Walter Su via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index 1e915b2812..8f33899ea0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -369,9 +369,9 @@ private BlockReader getLegacyBlockReaderLocal() throws IOException { return null; } if (clientContext.getDisableLegacyBlockReaderLocal()) { - PerformanceAdvisory.LOG.debug(this + ": can't construct " + - "BlockReaderLocalLegacy because " + - "disableLegacyBlockReaderLocal is set."); + PerformanceAdvisory.LOG.debug("{}: can't construct " + + "BlockReaderLocalLegacy because " + + "disableLegacyBlockReaderLocal is set.", this); return null; } IOException ioe = null; @@ -410,8 +410,8 @@ private BlockReader getBlockReaderLocal() throws InvalidToken { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForShortCircuit()) { - PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " + - "usable for short circuit; giving up on BlockReaderLocal."); + PerformanceAdvisory.LOG.debug("{}: {} is not usable for short circuit; " + + "giving up on BlockReaderLocal.", this, pathInfo); return null; } ShortCircuitCache cache = clientContext.getShortCircuitCache(); @@ -426,11 +426,9 @@ private BlockReader getBlockReaderLocal() throws InvalidToken { throw exc; } if (info.getReplica() == null) { - if (LOG.isTraceEnabled()) { - PerformanceAdvisory.LOG.debug(this + ": failed to get " + - "ShortCircuitReplica. Cannot construct " + - "BlockReaderLocal via " + pathInfo.getPath()); - } + PerformanceAdvisory.LOG.debug("{}: failed to get " + + "ShortCircuitReplica. Cannot construct " + + "BlockReaderLocal via {}", this, pathInfo.getPath()); return null; } return new BlockReaderLocal.Builder(conf). @@ -610,9 +608,9 @@ private BlockReader getRemoteBlockReaderFromDomain() throws IOException { getPathInfo(inetSocketAddress, conf); } if (!pathInfo.getPathState().getUsableForDataTransfer()) { - PerformanceAdvisory.LOG.debug(this + ": not trying to create a " + - "remote block reader because the UNIX domain socket at " + - pathInfo + " is not usable."); + PerformanceAdvisory.LOG.debug("{}: not trying to create a " + + "remote block reader because the UNIX domain socket at {}" + + " is not usable.", this, pathInfo); return null; } if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index f91f7094bb..0ee57c2416 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -319,12 +319,16 @@ public static void cloneDelegationTokenForLogicalUri( buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); - LOG.debug("Mapped HA service delegation token for logical URI " + - haUri + " to namenode " + singleNNAddr); + if (LOG.isDebugEnabled()) { + LOG.debug("Mapped HA service delegation token for logical URI " + + haUri + " to namenode " + singleNNAddr); + } } } else { - LOG.debug("No HA service delegation token found for logical URI " + - haUri); + if (LOG.isDebugEnabled()) { + LOG.debug("No HA service delegation token found for logical URI " + + haUri); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index df582f1846..dd6f9ac032 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -580,9 +580,11 @@ DatanodeCommand cacheReport() throws IOException { long createCost = createTime - startTime; long sendCost = sendTime - createTime; dn.getMetrics().addCacheReport(sendCost); - LOG.debug("CacheReport of " + blockIds.size() - + " block(s) took " + createCost + " msec to generate and " - + sendCost + " msecs for RPC and NN processing"); + if (LOG.isDebugEnabled()) { + LOG.debug("CacheReport of " + blockIds.size() + + " block(s) took " + createCost + " msec to generate and " + + sendCost + " msecs for RPC and NN processing"); + } } return cmd; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index c6408e6d5e..e0df0f2e96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -319,9 +319,11 @@ synchronized void uncacheBlock(String bpid, long blockId) { mappableBlockMap.put(key, new Value(prevValue.mappableBlock, State.UNCACHING)); if (deferred) { - LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " + - "for uncaching in {} ", - key, DurationFormatUtils.formatDurationHMS(revocationPollingMs)); + if (LOG.isDebugEnabled()) { + LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " + + "for uncaching in {} ", + key, DurationFormatUtils.formatDurationHMS(revocationPollingMs)); + } deferredUncachingExecutor.schedule( new UncachingTask(key, revocationMs), revocationPollingMs, TimeUnit.MILLISECONDS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 12733fd0d3..b760dd80ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -320,9 +320,11 @@ synchronized public void selectInputStreams( Collection streams, long fromTxId, boolean inProgressOk) throws IOException { List elfs = matchEditLogs(sd.getCurrentDir()); - LOG.debug(this + ": selecting input streams starting at " + fromTxId + - (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") + - "from among " + elfs.size() + " candidate file(s)"); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": selecting input streams starting at " + fromTxId + + (inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") + + "from among " + elfs.size() + " candidate file(s)"); + } addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk); } @@ -331,8 +333,10 @@ static void addStreamsToCollectionFromFiles(Collection elfs, for (EditLogFile elf : elfs) { if (elf.isInProgress()) { if (!inProgressOk) { - LOG.debug("passing over " + elf + " because it is in progress " + - "and we are ignoring in-progress logs."); + if (LOG.isDebugEnabled()) { + LOG.debug("passing over " + elf + " because it is in progress " + + "and we are ignoring in-progress logs."); + } continue; } try { @@ -345,9 +349,11 @@ static void addStreamsToCollectionFromFiles(Collection elfs, } if (elf.lastTxId < fromTxId) { assert elf.lastTxId != HdfsConstants.INVALID_TXID; - LOG.debug("passing over " + elf + " because it ends at " + - elf.lastTxId + ", but we only care about transactions " + - "as new as " + fromTxId); + if (LOG.isDebugEnabled()) { + LOG.debug("passing over " + elf + " because it ends at " + + elf.lastTxId + ", but we only care about transactions " + + "as new as " + fromTxId); + } continue; } EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 04be1adcaa..4575d64724 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1534,7 +1534,9 @@ public static void initializeGenericKeys(Configuration conf, URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY)); conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString()); - LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString()); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index bccfd6ead8..73c52d56e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -446,7 +446,9 @@ void unref(ShortCircuitReplica replica) { purgeReason = "purging replica because it is stale."; } if (purgeReason != null) { - LOG.debug(this + ": " + purgeReason); + if (LOG.isDebugEnabled()) { + LOG.debug(this + ": " + purgeReason); + } purge(replica); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index fd29106192..351ff03224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -147,8 +147,10 @@ public int compare(FsImageProto.FileSummary.Section s1, summary.getCodec(), new BufferedInputStream(new LimitInputStream( fin, s.getLength()))); - LOG.debug("Loading section " + s.getName() + " length: " + s.getLength - ()); + if (LOG.isDebugEnabled()) { + LOG.debug("Loading section " + s.getName() + " length: " + s.getLength + ()); + } switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) { case STRING_TABLE: stringTable = loadStringTable(is); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java index b60ef7025c..55655ecfc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java @@ -126,8 +126,10 @@ public LightWeightHashSet(int initCapacity, float maxLoadFactor, this.shrinkThreshold = (int) (capacity * minLoadFactor); entries = new LinkedElement[capacity]; - LOG.debug("initial capacity=" + initialCapacity + ", max load factor= " - + maxLoadFactor + ", min load factor= " + minLoadFactor); + if (LOG.isDebugEnabled()) { + LOG.debug("initial capacity=" + initialCapacity + ", max load factor= " + + maxLoadFactor + ", min load factor= " + minLoadFactor); + } } public LightWeightHashSet() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index fc18dbe72c..164692e8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -136,7 +136,9 @@ synchronized void ensureTokenInitialized() throws IOException { if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); - LOG.debug("Created new DT for " + token.getService()); + if(LOG.isDebugEnabled()) { + LOG.debug("Created new DT for " + token.getService()); + } } hasInitedToken = true; } @@ -149,7 +151,9 @@ public synchronized void reset() { synchronized void initDelegationToken(UserGroupInformation ugi) { Token token = selectDelegationToken(ugi); if (token != null) { - LOG.debug("Found existing DT for " + token.getService()); + if(LOG.isDebugEnabled()) { + LOG.debug("Found existing DT for " + token.getService()); + } fs.setDelegationToken(token); hasInitedToken = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 12adb05ab7..6a55899180 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -224,12 +224,16 @@ protected synchronized Token getDelegationToken() throws IOException { // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { - LOG.debug("Using UGI token: " + token); + if(LOG.isDebugEnabled()) { + LOG.debug("Using UGI token: " + token); + } canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { - LOG.debug("Fetched new token: " + token); + if(LOG.isDebugEnabled()) { + LOG.debug("Fetched new token: " + token); + } } else { // security is disabled canRefreshDelegationToken = false; } @@ -244,7 +248,9 @@ synchronized boolean replaceExpiredDelegationToken() throws IOException { boolean replaced = false; if (canRefreshDelegationToken) { Token token = getDelegationToken(null); - LOG.debug("Replaced expired token: " + token); + if(LOG.isDebugEnabled()) { + LOG.debug("Replaced expired token: " + token); + } setDelegationToken(token); replaced = (token != null); } @@ -1188,7 +1194,9 @@ public synchronized void close() throws IOException { cancelDelegationToken(delegationToken); } } catch (IOException ioe) { - LOG.debug("Token cancel failed: "+ioe); + if (LOG.isDebugEnabled()) { + LOG.debug("Token cancel failed: " + ioe); + } } finally { super.close(); }