From 5f6e22516668ff94a76737ad5e2cdcb2ff9f6dfd Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 18 Mar 2019 13:57:18 -0400 Subject: [PATCH] YARN-9363. Replaced debug logging with SLF4J parameterized log message. Contributed by Prabhu Joseph --- .../hadoop/fs/DelegationTokenRenewer.java | 4 +- .../yarn/service/provider/ProviderUtils.java | 7 +-- .../api/impl/FileSystemTimelineWriter.java | 4 +- .../hadoop/yarn/csi/client/CsiGrpcClient.java | 7 +-- .../hadoop/yarn/csi/client/FakeCsiDriver.java | 5 +- .../util/timeline/TimelineServerUtils.java | 7 +-- .../WindowsSecureContainerExecutor.java | 32 ++++--------- .../RecoverPausedContainerLaunch.java | 6 +-- .../CGroupElasticMemoryController.java | 8 ++-- .../linux/resources/CGroupsHandlerImpl.java | 7 +-- .../resources/CGroupsResourceCalculator.java | 18 ++++--- .../resources/CombinedResourceCalculator.java | 8 ++-- .../linux/resources/DefaultOOMHandler.java | 8 ++-- .../NetworkTagMappingManagerFactory.java | 6 +-- .../resources/fpga/FpgaResourceAllocator.java | 7 +-- .../fpga/FpgaResourceHandlerImpl.java | 7 +-- .../resources/gpu/GpuResourceAllocator.java | 7 +-- .../resources/gpu/GpuResourceHandlerImpl.java | 8 ++-- .../resources/numa/NumaNodeResource.java | 7 +-- .../resources/numa/NumaResourceAllocator.java | 7 +-- .../numa/NumaResourceHandlerImpl.java | 8 ++-- .../deviceframework/DeviceMappingManager.java | 24 ++++------ .../deviceframework/DevicePluginAdapter.java | 7 +-- ...DeviceResourceDockerRuntimePluginImpl.java | 47 +++++++------------ .../DeviceResourceHandlerImpl.java | 20 +++----- .../DeviceResourceUpdaterImpl.java | 7 +-- .../fpga/FpgaResourcePlugin.java | 7 +-- .../gpu/NvidiaDockerV1CommandPlugin.java | 21 ++++----- .../gpu/NvidiaDockerV2CommandPlugin.java | 7 +-- .../nodemanager/TestNodeManagerMXBean.java | 8 ++-- .../TestCGroupElasticMemoryController.java | 8 ++-- .../AbstractAutoCreatedLeafQueue.java | 4 +- .../scheduler/capacity/CapacityScheduler.java | 6 +-- .../scheduler/capacity/ParentQueue.java | 12 ++--- .../QueueManagementDynamicEditPolicy.java | 13 ++--- ...uaranteedOrZeroCapacityOverTimePolicy.java | 12 ++--- .../constraint/PlacementConstraintsUtil.java | 28 ++++------- .../scheduler/fair/FSQueue.java | 6 +-- .../scheduler/fifo/FifoScheduler.java | 11 ++--- .../LocalityAppPlacementAllocator.java | 6 +-- .../server/resourcemanager/Application.java | 29 +++++------- .../storage/flow/FlowScanner.java | 4 +- 42 files changed, 188 insertions(+), 277 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java index 09c3a8ad3d..2feb937525 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java @@ -242,9 +242,7 @@ public void removeRenewAction( } catch (InterruptedException ie) { LOG.error("Interrupted while canceling token for " + fs.getUri() + "filesystem"); - if (LOG.isDebugEnabled()) { - LOG.debug("Exception in removeRenewAction: ", ie); - } + LOG.debug("Exception in removeRenewAction: {}", ie); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java index ea1fb0c51d..5fc96a09df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java @@ -212,11 +212,8 @@ public static synchronized void createConfigFileAndAddLocalResource( log.info("Component instance conf dir already exists: " + compInstanceDir); } - if (log.isDebugEnabled()) { - log.debug("Tokens substitution for component instance: " + instance - .getCompInstanceName() + System.lineSeparator() - + tokensForSubstitution); - } + log.debug("Tokens substitution for component instance: {}{}{}" + instance + .getCompInstanceName(), System.lineSeparator(), tokensForSubstitution); for (ConfigFile originalFile : compLaunchContext.getConfiguration() .getFiles()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java index e60518422e..b92f4e4123 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/FileSystemTimelineWriter.java @@ -287,9 +287,7 @@ private void writeDomain(ApplicationAttemptId appAttemptId, Path domainLogPath = new Path(attemptDirCache.getAppAttemptDir(appAttemptId), DOMAIN_LOG_PREFIX + appAttemptId.toString()); - if (LOG.isDebugEnabled()) { - LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath); - } + LOG.debug("Writing domains for {} to {}", appAttemptId, domainLogPath); this.logFDsCache.writeDomainLog( fs, domainLogPath, objMapper, domain, isAppendSupported); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java index 5dc1b3f794..af6eec2356 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java @@ -27,8 +27,8 @@ import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.unix.DomainSocketAddress; import io.netty.util.concurrent.DefaultThreadFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.SocketAddress; @@ -39,7 +39,8 @@ */ public final class CsiGrpcClient implements AutoCloseable { - private static final Log LOG = LogFactory.getLog(CsiGrpcClient.class); + private static final Logger LOG = + LoggerFactory.getLogger(CsiGrpcClient.class); private final ManagedChannel channel; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java index e4d4da2788..0c6de32e51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiDriver.java @@ -25,7 +25,8 @@ import org.apache.hadoop.yarn.csi.utils.GrpcHelper; import java.io.IOException; -import java.util.logging.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A fake implementation of CSI driver. @@ -33,7 +34,7 @@ */ public class FakeCsiDriver { - private static final Logger LOG = Logger + private static final Logger LOG = LoggerFactory .getLogger(FakeCsiDriver.class.getName()); private Server server; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java index 3021def7b2..15c6d3dadb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java @@ -21,8 +21,8 @@ import java.util.LinkedHashSet; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AuthenticationFilterInitializer; import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter; @@ -33,7 +33,8 @@ * Set of utility methods to be used across timeline reader and collector. */ public final class TimelineServerUtils { - private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(TimelineServerUtils.class); private TimelineServerUtils() { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index 5a39cc3788..c4d6918cf1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -36,8 +36,8 @@ import java.util.Map; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DelegateToFileSystem; import org.apache.hadoop.fs.FileContext; @@ -68,8 +68,8 @@ */ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor { - private static final Log LOG = LogFactory - .getLog(WindowsSecureContainerExecutor.class); + private static final Logger LOG = LoggerFactory + .getLogger(WindowsSecureContainerExecutor.class); public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s"; @@ -591,10 +591,7 @@ protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder( @Override protected void copyFile(Path src, Path dst, String owner) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("copyFile: %s -> %s owner:%s", src.toString(), - dst.toString(), owner)); - } + LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner); Native.Elevated.copy(src, dst, true); Native.Elevated.chown(dst, owner, nodeManagerGroup); } @@ -607,10 +604,7 @@ protected void createDir(Path dirPath, FsPermission perms, // This is similar to how LCE creates dirs // perms = new FsPermission(DIR_PERM); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("createDir: %s perm:%s owner:%s", - dirPath.toString(), perms.toString(), owner)); - } + LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner); super.createDir(dirPath, perms, createParent, owner); lfs.setOwner(dirPath, owner, nodeManagerGroup); @@ -619,10 +613,7 @@ protected void createDir(Path dirPath, FsPermission perms, @Override protected void setScriptExecutable(Path script, String owner) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("setScriptExecutable: %s owner:%s", - script.toString(), owner)); - } + LOG.debug("setScriptExecutable: {} owner:{}", script, owner); super.setScriptExecutable(script, owner); Native.Elevated.chown(script, owner, nodeManagerGroup); } @@ -630,10 +621,7 @@ protected void setScriptExecutable(Path script, String owner) @Override public Path localizeClasspathJar(Path jarPath, Path target, String owner) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("localizeClasspathJar: %s %s o:%s", - jarPath, target, owner)); - } + LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner); createDir(target, new FsPermission(DIR_PERM), true, owner); String fileName = jarPath.getName(); Path dst = new Path(target, fileName); @@ -669,9 +657,7 @@ public void startLocalizer(LocalizerStartContext ctx) throws IOException, copyFile(nmPrivateContainerTokensPath, tokenDst, user); File cwdApp = new File(appStorageDir.toString()); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("cwdApp: %s", cwdApp)); - } + LOG.debug("cwdApp: {}", cwdApp); List command ; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java index 761fe3b11e..c678c91860 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/RecoverPausedContainerLaunch.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -42,7 +42,7 @@ */ public class RecoverPausedContainerLaunch extends ContainerLaunch { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( RecoveredContainerLaunch.class); public RecoverPausedContainerLaunch(Context context, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java index 752c3a6b2d..e6a5999f7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupElasticMemoryController.java @@ -19,8 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.ApplicationConstants; @@ -56,8 +56,8 @@ * a container to kill. The algorithm that picks the container is a plugin. */ public class CGroupElasticMemoryController extends Thread { - protected static final Log LOG = LogFactory - .getLog(CGroupElasticMemoryController.class); + protected static final Logger LOG = LoggerFactory + .getLogger(CGroupElasticMemoryController.class); private final Clock clock = new MonotonicClock(); private String yarnCGroupPath; private String oomListenerPath; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java index 20e0fc1506..fab1490833 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java @@ -561,11 +561,8 @@ public void updateCGroupParam(CGroupController controller, String cGroupId, String cGroupParamPath = getPathForCGroupParam(controller, cGroupId, param); PrintWriter pw = null; - if (LOG.isDebugEnabled()) { - LOG.debug( - String.format("updateCGroupParam for path: %s with value %s", - cGroupParamPath, value)); - } + LOG.debug("updateCGroupParam for path: {} with value {}", + cGroupParamPath, value); try { File file = new File(cGroupParamPath); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java index 50ce3eac14..0b25db4ecc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.CpuTimeTracker; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.SysInfoLinux; @@ -63,8 +63,8 @@ enum Result { Continue, Exit } - protected static final Log LOG = LogFactory - .getLog(CGroupsResourceCalculator.class); + protected static final Logger LOG = LoggerFactory + .getLogger(CGroupsResourceCalculator.class); private static final String PROCFS = "/proc"; static final String CGROUP = "cgroup"; static final String CPU_STAT = "cpuacct.stat"; @@ -145,9 +145,7 @@ public void initialize() throws YarnException { @Override public float getCpuUsagePercent() { - if (LOG.isDebugEnabled()) { - LOG.debug("Process " + pid + " jiffies:" + processTotalJiffies); - } + LOG.debug("Process {} jiffies:{}", pid, processTotalJiffies); return cpuTimeTracker.getCpuTrackerUsagePercent(); } @@ -187,9 +185,9 @@ public void updateProcessTree() { processPhysicalMemory = getMemorySize(memStat); if (memswStat.exists()) { processVirtualMemory = getMemorySize(memswStat); - } else if(LOG.isDebugEnabled()) { - LOG.debug("Swap cgroups monitoring is not compiled into the kernel " + - memswStat.getAbsolutePath().toString()); + } else { + LOG.debug("Swap cgroups monitoring is not compiled into the kernel {}", + memswStat.getAbsolutePath()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java index 84b3ed005d..5d118182a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CombinedResourceCalculator.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree; @@ -29,8 +29,8 @@ * it is backward compatible with procfs in terms of virtual memory usage. */ public class CombinedResourceCalculator extends ResourceCalculatorProcessTree { - protected static final Log LOG = LogFactory - .getLog(CombinedResourceCalculator.class); + protected static final Logger LOG = LoggerFactory + .getLogger(CombinedResourceCalculator.class); private ProcfsBasedProcessTree procfs; private CGroupsResourceCalculator cgroup; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java index 844bb6c414..6d74809dcc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/DefaultOOMHandler.java @@ -19,8 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ExecutionType; @@ -46,8 +46,8 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public class DefaultOOMHandler implements Runnable { - protected static final Log LOG = LogFactory - .getLog(DefaultOOMHandler.class); + protected static final Logger LOG = LoggerFactory + .getLogger(DefaultOOMHandler.class); private final Context context; private final String memoryStatFile; private final CGroupsHandler cgroups; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java index 17e2e21744..cc7fc13afe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/NetworkTagMappingManagerFactory.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -32,7 +32,7 @@ * */ public final class NetworkTagMappingManagerFactory { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( NetworkTagMappingManagerFactory.class); private NetworkTagMappingManagerFactory() {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java index 62dd3c497f..334c6bd4c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceAllocator.java @@ -22,8 +22,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -44,7 +44,8 @@ * */ public class FpgaResourceAllocator { - static final Log LOG = LogFactory.getLog(FpgaResourceAllocator.class); + static final Logger LOG = LoggerFactory. + getLogger(FpgaResourceAllocator.class); private List allowedFpgas = new LinkedList<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java index 61ffd355d2..d9ca8d1041 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/fpga/FpgaResourceHandlerImpl.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.fpga; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -50,7 +50,8 @@ @InterfaceAudience.Private public class FpgaResourceHandlerImpl implements ResourceHandler { - static final Log LOG = LogFactory.getLog(FpgaResourceHandlerImpl.class); + static final Logger LOG = LoggerFactory. + getLogger(FpgaResourceHandlerImpl.class); private final String REQUEST_FPGA_IP_ID_KEY = "REQUESTED_FPGA_IP_ID"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java index 2496ac851c..67936ba3b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java @@ -21,8 +21,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; @@ -52,7 +52,8 @@ * Allocate GPU resources according to requirements */ public class GpuResourceAllocator { - final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class); + final static Logger LOG = LoggerFactory. + getLogger(GpuResourceAllocator.class); private static final int WAIT_MS_PER_LOOP = 1000; private Set allowedGpuDevices = new TreeSet<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java index 2c9baf2305..9474b0f847 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -41,8 +41,8 @@ import java.util.List; public class GpuResourceHandlerImpl implements ResourceHandler { - final static Log LOG = LogFactory - .getLog(GpuResourceHandlerImpl.class); + final static Logger LOG = LoggerFactory + .getLogger(GpuResourceHandlerImpl.class); // This will be used by container-executor to add necessary clis public static final String EXCLUDED_GPUS_CLI_OPTION = "--excluded_gpus"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java index f434412ac2..7cb720dbc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaNodeResource.java @@ -20,8 +20,8 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; @@ -36,7 +36,8 @@ public class NumaNodeResource { private long usedMemory; private int usedCpus; - private static final Log LOG = LogFactory.getLog(NumaNodeResource.class); + private static final Logger LOG = LoggerFactory. + getLogger(NumaNodeResource.class); private Map containerVsMemUsage = new ConcurrentHashMap<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java index e152bdab87..08c328278f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java @@ -29,8 +29,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.apache.hadoop.util.StringUtils; @@ -51,7 +51,8 @@ */ public class NumaResourceAllocator { - private static final Log LOG = LogFactory.getLog(NumaResourceAllocator.class); + private static final Logger LOG = LoggerFactory. + getLogger(NumaResourceAllocator.class); // Regex to find node ids, Ex: 'available: 2 nodes (0-1)' private static final String NUMA_NODEIDS_REGEX = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java index e6e3159b48..8a6ebda860 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceHandlerImpl.java @@ -20,8 +20,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -39,8 +39,8 @@ */ public class NumaResourceHandlerImpl implements ResourceHandler { - private static final Log LOG = LogFactory - .getLog(NumaResourceHandlerImpl.class); + private static final Logger LOG = LoggerFactory + .getLogger(NumaResourceHandlerImpl.class); private final NumaResourceAllocator numaResourceAllocator; private final String numaCtlCmd; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java index b620620dc9..ed80d3f80e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceMappingManager.java @@ -22,8 +22,8 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; @@ -52,7 +52,8 @@ * scheduler. * */ public class DeviceMappingManager { - static final Log LOG = LogFactory.getLog(DeviceMappingManager.class); + static final Logger LOG = LoggerFactory. + getLogger(DeviceMappingManager.class); private Context nmContext; private static final int WAIT_MS_PER_LOOP = 1000; @@ -163,10 +164,7 @@ private synchronized DeviceAllocation internalAssignDevices( ContainerId containerId = container.getContainerId(); int requestedDeviceCount = getRequestedDeviceCount(resourceName, requestedResource); - if (LOG.isDebugEnabled()) { - LOG.debug("Try allocating " + requestedDeviceCount - + " " + resourceName); - } + LOG.debug("Try allocating {} {}", requestedDeviceCount, resourceName); // Assign devices to container if requested some. if (requestedDeviceCount > 0) { if (requestedDeviceCount > getAvailableDevices(resourceName)) { @@ -266,10 +264,8 @@ public synchronized void cleanupAssignedDevices(String resourceName, while (iter.hasNext()) { entry = iter.next(); if (entry.getValue().equals(containerId)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Recycle devices: " + entry.getKey() - + ", type: " + resourceName + " from " + containerId); - } + LOG.debug("Recycle devices: {}, type: {} from {}", entry.getKey(), + resourceName, containerId); iter.remove(); } } @@ -317,10 +313,8 @@ private void pickAndDoSchedule(Set allowed, ContainerId containerId = c.getContainerId(); Map env = c.getLaunchContext().getEnvironment(); if (null == dps) { - if (LOG.isDebugEnabled()) { - LOG.debug("Customized device plugin scheduler is preferred " - + "but not implemented, use default logic"); - } + LOG.debug("Customized device plugin scheduler is preferred " + + "but not implemented, use default logic"); defaultScheduleAction(allowed, used, assigned, containerId, count); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java index 462e45a52a..a99cc966e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DevicePluginAdapter.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -47,7 +47,8 @@ * * */ public class DevicePluginAdapter implements ResourcePlugin { - private final static Log LOG = LogFactory.getLog(DevicePluginAdapter.class); + private final static Logger LOG = LoggerFactory. + getLogger(DevicePluginAdapter.class); private final String resourceName; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java index aaa11bd858..285ed05655 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceDockerRuntimePluginImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin; @@ -47,7 +47,7 @@ public class DeviceResourceDockerRuntimePluginImpl implements DockerCommandPlugin { - final static Log LOG = LogFactory.getLog( + final static Logger LOG = LoggerFactory.getLogger( DeviceResourceDockerRuntimePluginImpl.class); private String resourceName; @@ -73,9 +73,7 @@ public DeviceResourceDockerRuntimePluginImpl(String resourceName, public void updateDockerRunCommand(DockerRunCommand dockerRunCommand, Container container) throws ContainerExecutionException { String containerId = container.getContainerId().toString(); - if (LOG.isDebugEnabled()) { - LOG.debug("Try to update docker run command for: " + containerId); - } + LOG.debug("Try to update docker run command for: {}", containerId); if(!requestedDevice(resourceName, container)) { return; } @@ -89,17 +87,12 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand, } // handle runtime dockerRunCommand.addRuntime(deviceRuntimeSpec.getContainerRuntime()); - if (LOG.isDebugEnabled()) { - LOG.debug("Handle docker container runtime type: " - + deviceRuntimeSpec.getContainerRuntime() + " for container: " - + containerId); - } + LOG.debug("Handle docker container runtime type: {} for container: {}", + deviceRuntimeSpec.getContainerRuntime(), containerId); // handle device mounts Set deviceMounts = deviceRuntimeSpec.getDeviceMounts(); - if (LOG.isDebugEnabled()) { - LOG.debug("Handle device mounts: " + deviceMounts + " for container: " - + containerId); - } + LOG.debug("Handle device mounts: {} for container: {}", deviceMounts, + containerId); for (MountDeviceSpec mountDeviceSpec : deviceMounts) { dockerRunCommand.addDevice( mountDeviceSpec.getDevicePathInHost(), @@ -107,10 +100,8 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand, } // handle volume mounts Set mountVolumeSpecs = deviceRuntimeSpec.getVolumeMounts(); - if (LOG.isDebugEnabled()) { - LOG.debug("Handle volume mounts: " + mountVolumeSpecs + " for container: " - + containerId); - } + LOG.debug("Handle volume mounts: {} for container: {}", mountVolumeSpecs, + containerId); for (MountVolumeSpec mountVolumeSpec : mountVolumeSpecs) { if (mountVolumeSpec.getReadOnly()) { dockerRunCommand.addReadOnlyMountLocation( @@ -124,10 +115,8 @@ public void updateDockerRunCommand(DockerRunCommand dockerRunCommand, } // handle envs dockerRunCommand.addEnv(deviceRuntimeSpec.getEnvs()); - if (LOG.isDebugEnabled()) { - LOG.debug("Handle envs: " + deviceRuntimeSpec.getEnvs() - + " for container: " + containerId); - } + LOG.debug("Handle envs: {} for container: {}", + deviceRuntimeSpec.getEnvs(), containerId); } @Override @@ -147,10 +136,8 @@ public DockerVolumeCommand getCreateDockerVolumeCommand(Container container) DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND); command.setDriverName(volumeSec.getVolumeDriver()); command.setVolumeName(volumeSec.getVolumeName()); - if (LOG.isDebugEnabled()) { - LOG.debug("Get volume create request from plugin:" + volumeClaims - + " for container: " + container.getContainerId().toString()); - } + LOG.debug("Get volume create request from plugin:{} for container: {}", + volumeClaims, container.getContainerId()); return command; } } @@ -195,10 +182,8 @@ private Set getAllocatedDevices(Container container) { allocated = devicePluginAdapter .getDeviceMappingManager() .getAllocatedDevices(resourceName, containerId); - if (LOG.isDebugEnabled()) { - LOG.debug("Get allocation from deviceMappingManager: " - + allocated + ", " + resourceName + " for container: " + containerId); - } + LOG.debug("Get allocation from deviceMappingManager: {}, {} for" + + " container: {}", allocated, resourceName, containerId); cachedAllocation.put(containerId, allocated); return allocated; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java index 0e2a6f8464..97ff94f788 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceHandlerImpl.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -53,7 +53,8 @@ * */ public class DeviceResourceHandlerImpl implements ResourceHandler { - static final Log LOG = LogFactory.getLog(DeviceResourceHandlerImpl.class); + static final Logger LOG = LoggerFactory. + getLogger(DeviceResourceHandlerImpl.class); private final String resourceName; private final DevicePlugin devicePlugin; @@ -134,10 +135,7 @@ public synchronized List preStart(Container container) String containerIdStr = container.getContainerId().toString(); DeviceMappingManager.DeviceAllocation allocation = deviceMappingManager.assignDevices(resourceName, container); - if (LOG.isDebugEnabled()) { - LOG.debug("Allocated to " - + containerIdStr + ": " + allocation); - } + LOG.debug("Allocated to {}: {}", containerIdStr, allocation); DeviceRuntimeSpec spec; try { spec = devicePlugin.onDevicesAllocated( @@ -291,13 +289,9 @@ public DeviceType getDeviceType(Device device) { } DeviceType deviceType; try { - if (LOG.isDebugEnabled()) { - LOG.debug("Try to get device type from device path: " + devName); - } + LOG.debug("Try to get device type from device path: {}", devName); String output = shellWrapper.getDeviceFileType(devName); - if (LOG.isDebugEnabled()) { - LOG.debug("stat output:" + output); - } + LOG.debug("stat output:{}", output); deviceType = output.startsWith("c") ? DeviceType.CHAR : DeviceType.BLOCK; } catch (IOException e) { String msg = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java index e5ef578058..da81cbbac8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/deviceframework/DeviceResourceUpdaterImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.Device; @@ -33,7 +33,8 @@ * */ public class DeviceResourceUpdaterImpl extends NodeResourceUpdaterPlugin { - final static Log LOG = LogFactory.getLog(DeviceResourceUpdaterImpl.class); + final static Logger LOG = LoggerFactory. + getLogger(DeviceResourceUpdaterImpl.class); private String resourceName; private DevicePlugin devicePlugin; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java index 9add3d218b..4dab1a47a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaResourcePlugin.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -37,7 +37,8 @@ import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NMResourceInfo; public class FpgaResourcePlugin implements ResourcePlugin { - private static final Log LOG = LogFactory.getLog(FpgaResourcePlugin.class); + private static final Logger LOG = LoggerFactory. + getLogger(FpgaResourcePlugin.class); private ResourceHandler fpgaResourceHandler = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java index c2e315a51c..36a0d55ea7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV1CommandPlugin.java @@ -20,8 +20,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -50,7 +50,8 @@ * Implementation to use nvidia-docker v1 as GPU docker command plugin. */ public class NvidiaDockerV1CommandPlugin implements DockerCommandPlugin { - final static Log LOG = LogFactory.getLog(NvidiaDockerV1CommandPlugin.class); + final static Logger LOG = LoggerFactory. + getLogger(NvidiaDockerV1CommandPlugin.class); private Configuration conf; private Map> additionalCommands = null; @@ -121,9 +122,7 @@ private void init() throws ContainerExecutionException { addToCommand(DEVICE_OPTION, getValue(str)); } else if (str.startsWith(VOLUME_DRIVER_OPTION)) { volumeDriver = getValue(str); - if (LOG.isDebugEnabled()) { - LOG.debug("Found volume-driver:" + volumeDriver); - } + LOG.debug("Found volume-driver:{}", volumeDriver); } else if (str.startsWith(MOUNT_RO_OPTION)) { String mount = getValue(str); if (!mount.endsWith(":ro")) { @@ -286,15 +285,11 @@ public DockerVolumeCommand getCreateDockerVolumeCommand(Container container) if (VOLUME_NAME_PATTERN.matcher(mountSource).matches()) { // This is a valid named volume newVolumeName = mountSource; - if (LOG.isDebugEnabled()) { - LOG.debug("Found volume name for GPU:" + newVolumeName); - } + LOG.debug("Found volume name for GPU:{}", newVolumeName); break; } else{ - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to match " + mountSource - + " to named-volume regex pattern"); - } + LOG.debug("Failed to match {} to named-volume regex pattern", + mountSource); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java index ff25eb6ced..f5844858b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/NvidiaDockerV2CommandPlugin.java @@ -19,8 +19,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings; @@ -41,7 +41,8 @@ * Implementation to use nvidia-docker v2 as GPU docker command plugin. */ public class NvidiaDockerV2CommandPlugin implements DockerCommandPlugin { - final static Log LOG = LogFactory.getLog(NvidiaDockerV2CommandPlugin.class); + final static Logger LOG = LoggerFactory. + getLogger(NvidiaDockerV2CommandPlugin.class); private String nvidiaRuntime = "nvidia"; private String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java index 80b915c5f9..7a6cc6770c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerMXBean.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -33,8 +33,8 @@ * Class for testing {@link NodeManagerMXBean} implementation. */ public class TestNodeManagerMXBean { - public static final Log LOG = LogFactory.getLog( - TestNodeManagerMXBean.class); + public static final Logger LOG = LoggerFactory.getLogger( + TestNodeManagerMXBean.class); @Test public void testNodeManagerMXBean() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java index 40a296cf22..f10ec50f3f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -43,8 +43,8 @@ * Test for elastic non-strict memory controller based on cgroups. */ public class TestCGroupElasticMemoryController { - protected static final Log LOG = LogFactory - .getLog(TestCGroupElasticMemoryController.class); + protected static final Logger LOG = LoggerFactory + .getLogger(TestCGroupElasticMemoryController.class); private YarnConfiguration conf = new YarnConfiguration(); private File script = new File("target/" + TestCGroupElasticMemoryController.class.getName()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java index 1ce67d660b..9e5bdb0bc0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractAutoCreatedLeafQueue.java @@ -92,10 +92,8 @@ public void setEntitlement(String nodeLabel, QueueEntitlement entitlement) // note: we currently set maxCapacity to capacity // this might be revised later setMaxCapacity(nodeLabel, entitlement.getMaxCapacity()); - if (LOG.isDebugEnabled()) { - LOG.debug("successfully changed to {} for queue {}", capacity, this + LOG.debug("successfully changed to {} for queue {}", capacity, this .getQueueName()); - } //update queue used capacity etc CSQueueUtils.updateQueueStatistics(resourceCalculator, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 49f1954710..4baf405385 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -1053,10 +1053,8 @@ private void addApplicationAttempt( + " to scheduler from user " + application.getUser() + " in queue " + queue.getQueueName()); if (isAttemptRecovering) { - if (LOG.isDebugEnabled()) { - LOG.debug(applicationAttemptId - + " is recovering. Skipping notifying ATTEMPT_ADDED"); - } + LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED", + applicationAttemptId); } else{ rmContext.getDispatcher().getEventHandler().handle( new RMAppAttemptEvent(applicationAttemptId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index cb6fc28f70..53e8fd2429 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -593,10 +593,8 @@ public CSAssignment assignContainers(Resource clusterResource, NodeType.NODE_LOCAL); while (canAssign(clusterResource, node)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Trying to assign containers to child-queue of " - + getQueueName()); - } + LOG.debug("Trying to assign containers to child-queue of {}", + getQueueName()); // Are we over maximum-capacity for this queue? // This will also consider parent's limits and also continuous reservation @@ -781,10 +779,8 @@ private CSAssignment assignContainersToChildQueues(Resource cluster, for (Iterator iter = sortAndGetChildrenAllocationIterator( candidates.getPartition()); iter.hasNext(); ) { CSQueue childQueue = iter.next(); - if(LOG.isDebugEnabled()) { - LOG.debug("Trying to assign to queue: " + childQueue.getQueuePath() - + " stats: " + childQueue); - } + LOG.debug("Trying to assign to queue: {} stats: {}", + childQueue.getQueuePath(), childQueue); // Get ResourceLimits of child queue before assign containers ResourceLimits childLimits = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java index ea43ac82f5..9602558848 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/QueueManagementDynamicEditPolicy.java @@ -221,15 +221,10 @@ List manageAutoCreatedLeafQueues() + parentQueue.getQueueName(), e); } } else{ - if (LOG.isDebugEnabled()) { - LOG.debug( - "Skipping queue management updates for parent queue " - + parentQueue - .getQueuePath() + " " - + "since configuration for auto creating queues beyond " - + "parent's " - + "guaranteed capacity is disabled"); - } + LOG.debug("Skipping queue management updates for parent queue {} " + + "since configuration for auto creating queues beyond " + + "parent's guaranteed capacity is disabled", + parentQueue.getQueuePath()); } return queueManagementChanges; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java index b1d3f747ff..d91f4887db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/GuaranteedOrZeroCapacityOverTimePolicy.java @@ -669,19 +669,15 @@ public void commitQueueManagementChanges( if (updatedQueueTemplate.getQueueCapacities(). getCapacity(nodeLabel) > 0) { if (isActive(leafQueue, nodeLabel)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Queue is already active." + " Skipping activation : " - + leafQueue.getQueueName()); - } + LOG.debug("Queue is already active. Skipping activation : {}", + leafQueue.getQueueName()); } else{ activate(leafQueue, nodeLabel); } } else{ if (!isActive(leafQueue, nodeLabel)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Queue is already de-activated. Skipping " - + "de-activation : " + leafQueue.getQueueName()); - } + LOG.debug("Queue is already de-activated. Skipping " + + "de-activation : {}", leafQueue.getQueueName()); } else{ deactivate(leafQueue, nodeLabel); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java index 8711cb4074..d04cf9c253 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java @@ -154,18 +154,13 @@ private static boolean getNodeConstraintEvaluatedResult( if (schedulerNode.getNodeAttributes() == null || !schedulerNode.getNodeAttributes().contains(requestAttribute)) { if (opCode == NodeAttributeOpCode.NE) { - if (LOG.isDebugEnabled()) { - LOG.debug("Incoming requestAttribute:" + requestAttribute - + "is not present in " + schedulerNode.getNodeID() - + ", however opcode is NE. Hence accept this node."); - } + LOG.debug("Incoming requestAttribute:{} is not present in {}," + + " however opcode is NE. Hence accept this node.", + requestAttribute, schedulerNode.getNodeID()); return true; } - if (LOG.isDebugEnabled()) { - LOG.debug("Incoming requestAttribute:" + requestAttribute - + "is not present in " + schedulerNode.getNodeID() - + ", skip such node."); - } + LOG.debug("Incoming requestAttribute:{} is not present in {}," + + " skip such node.", requestAttribute, schedulerNode.getNodeID()); return false; } @@ -183,21 +178,16 @@ private static boolean getNodeConstraintEvaluatedResult( } if (requestAttribute.equals(nodeAttribute)) { if (isOpCodeMatches(requestAttribute, nodeAttribute, opCode)) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Incoming requestAttribute:" + requestAttribute - + " matches with node:" + schedulerNode.getNodeID()); - } + LOG.debug("Incoming requestAttribute:{} matches with node:{}", + requestAttribute, schedulerNode.getNodeID()); found = true; return found; } } } if (!found) { - if (LOG.isDebugEnabled()) { - LOG.info("skip this node:" + schedulerNode.getNodeID() - + " for requestAttribute:" + requestAttribute); - } + LOG.debug("skip this node:{} for requestAttribute:{}", + schedulerNode.getNodeID(), requestAttribute); return false; } return true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 1bf3618f3e..c22fdb05c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -426,10 +426,8 @@ public abstract void collectSchedulerApplications( */ boolean assignContainerPreCheck(FSSchedulerNode node) { if (node.getReservedContainer() != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Assigning container failed on node '" + node.getNodeName() - + " because it has reserved containers."); - } + LOG.debug("Assigning container failed on node '{}' because it has" + + " reserved containers.", node.getNodeName()); return false; } else if (!Resources.fitsIn(getResourceUsage(), getMaxShare())) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index dd64f6d9bf..9bd2a111cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -397,9 +397,8 @@ public synchronized void addApplication(ApplicationId applicationId, LOG.info("Accepted application " + applicationId + " from user: " + user + ", currently num of applications: " + applications.size()); if (isAppRecovering) { - if (LOG.isDebugEnabled()) { - LOG.debug(applicationId + " is recovering. Skip notifying APP_ACCEPTED"); - } + LOG.debug("{} is recovering. Skip notifying APP_ACCEPTED", + applicationId); } else { rmContext.getDispatcher().getEventHandler() .handle(new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED)); @@ -429,10 +428,8 @@ public synchronized void addApplication(ApplicationId applicationId, LOG.info("Added Application Attempt " + appAttemptId + " to scheduler from user " + application.getUser()); if (isAttemptRecovering) { - if (LOG.isDebugEnabled()) { - LOG.debug(appAttemptId - + " is recovering. Skipping notifying ATTEMPT_ADDED"); - } + LOG.debug("{} is recovering. Skipping notifying ATTEMPT_ADDED", + appAttemptId); } else { rmContext.getDispatcher().getEventHandler().handle( new RMAppAttemptEvent(appAttemptId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java index d0677c37ca..5c9ce50550 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java @@ -396,10 +396,8 @@ public boolean precheckNode(SchedulerNode schedulerNode, SchedulingMode schedulingMode) { // We will only look at node label = nodeLabelToLookAt according to // schedulingMode and partition of node. - if(LOG.isDebugEnabled()) { - LOG.debug("precheckNode is invoked for " + schedulerNode.getNodeID() + "," - + schedulingMode); - } + LOG.debug("precheckNode is invoked for {},{}", schedulerNode.getNodeID(), + schedulingMode); String nodePartitionToLookAt; if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) { nodePartitionToLookAt = schedulerNode.getPartition(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index b686a9c14d..94e8933bb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -223,10 +223,8 @@ public synchronized void addTask(Task task) { if (requests == null) { requests = new HashMap(); this.requests.put(schedulerKey, requests); - if(LOG.isDebugEnabled()) { - LOG.debug("Added priority=" + schedulerKey.getPriority() - + " application="+ applicationId); - } + LOG.debug("Added priority={} application={}", schedulerKey.getPriority(), + applicationId); } final Resource capability = requestSpec.get(schedulerKey); @@ -242,10 +240,7 @@ public synchronized void addTask(Task task) { LOG.info("Added task " + task.getTaskId() + " to application " + applicationId + " at priority " + schedulerKey.getPriority()); - if(LOG.isDebugEnabled()) { - LOG.debug("addTask: application=" + applicationId - + " #asks=" + ask.size()); - } + LOG.debug("addTask: application={} #asks={}", applicationId, ask.size()); // Create resource requests for (String host : task.getHosts()) { @@ -320,12 +315,12 @@ private synchronized void addResourceRequest( public synchronized List getResources() throws IOException { if(LOG.isDebugEnabled()) { - LOG.debug("getResources begin:" + " application=" + applicationId - + " #ask=" + ask.size()); + LOG.debug("getResources begin: application={} #ask={}", + applicationId, ask.size()); for (ResourceRequest request : ask) { - LOG.debug("getResources:" + " application=" + applicationId - + " ask-request=" + request); + LOG.debug("getResources: application={} ask-request={}", + applicationId, request); } } @@ -346,8 +341,8 @@ public synchronized List getResources() throws IOException { ask.clear(); if(LOG.isDebugEnabled()) { - LOG.debug("getResources() for " + applicationId + ":" - + " ask=" + ask.size() + " received=" + containers.size()); + LOG.debug("getResources() for {}: ask={} received={}", + applicationId, ask.size(), containers.size()); } return containers; @@ -451,10 +446,8 @@ private void updateResourceRequests(Map requests, updateResourceRequest(requests.get(ResourceRequest.ANY)); - if(LOG.isDebugEnabled()) { - LOG.debug("updateResourceDemands:" + " application=" + applicationId - + " #asks=" + ask.size()); - } + LOG.debug("updateResourceDemands: application={} #asks={}", + applicationId, ask.size()); } private void updateResourceRequest(ResourceRequest request) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java index 31122cae65..9ecb6f6023 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java @@ -107,9 +107,7 @@ class FlowScanner implements RegionScanner, Closeable { YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD, YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD); } - if (LOG.isDebugEnabled()) { - LOG.debug(" batch size=" + batchSize); - } + LOG.debug(" batch size={}", batchSize); }