From 90de1ff151ede83a6f963aaf2407d3eb6220ae40 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Mon, 13 Feb 2023 11:24:06 -0800 Subject: [PATCH] HADOOP-18206 Cleanup the commons-logging references and restrict its usage in future (#5315) --- LICENSE-binary | 1 - hadoop-common-project/hadoop-common/pom.xml | 5 -- .../java/org/apache/hadoop/io/IOUtils.java | 25 ------ .../java/org/apache/hadoop/log/LogLevel.java | 40 ++-------- .../hadoop/service/ServiceOperations.java | 6 +- .../org/apache/hadoop/util/LogAdapter.java | 78 ------------------- .../apache/hadoop/util/ReflectionUtils.java | 13 ++-- .../org/apache/hadoop/util/SignalLogger.java | 24 +++--- .../org/apache/hadoop/util/StringUtils.java | 28 ++----- .../TestViewFileSystemLocalFileSystem.java | 9 +-- ...leSystemOverloadSchemeLocalFileSystem.java | 8 +- .../hadoop/http/TestHttpServerWithSpnego.java | 4 - .../org/apache/hadoop/log/TestLog4Json.java | 4 - .../org/apache/hadoop/log/TestLogLevel.java | 6 +- .../apache/hadoop/test/GenericTestUtils.java | 71 ++--------------- .../org/apache/hadoop/util/TestJarFinder.java | 4 +- .../apache/hadoop/util/TestSignalLogger.java | 4 +- hadoop-common-project/hadoop-nfs/pom.xml | 5 -- .../hadoop-hdfs-client/pom.xml | 4 - hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 5 -- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 4 - .../federation/router/TestRouterRpc.java | 6 +- .../router/TestRouterRpcMultiDestination.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 5 -- .../hdfs/server/common/MetricsLoggerTask.java | 27 ++----- .../hdfs/server/datanode/BlockReceiver.java | 11 ++- .../hdfs/server/datanode/BlockSender.java | 9 +-- .../hadoop/hdfs/server/datanode/DataNode.java | 14 ++-- .../server/datanode/DataSetLockManager.java | 7 +- .../hdfs/server/datanode/DataXceiver.java | 28 +++---- .../hdfs/server/namenode/FSNamesystem.java | 20 ++--- .../server/namenode/FsImageValidation.java | 16 +--- .../hadoop/hdfs/server/namenode/NameNode.java | 9 +-- .../hdfs/TestBlockTokenWrappingQOP.java | 6 +- .../org/apache/hadoop/hdfs/TestDFSRename.java | 2 +- .../hadoop/hdfs/TestStripedFileAppend.java | 6 +- .../datanode/TestDataNodeMetricsLogger.java | 10 +-- .../server/namenode/TestAuditLogAtDebug.java | 5 +- .../hdfs/server/namenode/TestAuditLogger.java | 8 +- .../namenode/TestAuditLoggerWithCommands.java | 2 +- .../hdfs/server/namenode/TestAuditLogs.java | 11 ++- .../hadoop/hdfs/server/namenode/TestFsck.java | 5 +- .../namenode/TestNameNodeMetricsLogger.java | 10 +-- .../ha/TestDNFencingWithReplication.java | 2 +- .../TestSimpleExponentialForecast.java | 10 +-- .../v2/TestSpeculativeExecOnCluster.java | 7 +- .../hadoop-mapreduce-client/pom.xml | 5 -- .../hadoop-mapreduce-examples/pom.xml | 4 - hadoop-project/pom.xml | 6 -- hadoop-tools/hadoop-archive-logs/pom.xml | 5 -- .../hadoop/fs/azure/PageBlobInputStream.java | 6 +- .../hadoop/fs/azure/PageBlobOutputStream.java | 7 +- .../hadoop/fs/azure/SelfRenewingLease.java | 6 +- .../fs/azure/SelfThrottlingIntercept.java | 7 +- .../hadoop/fs/azure/SendRequestIntercept.java | 4 - .../hadoop/fs/azure/SimpleKeyProvider.java | 7 +- .../azure/metrics/BandwidthGaugeUpdater.java | 6 +- .../ResponseReceivedMetricUpdater.java | 4 - .../ITestFileSystemOperationsWithThreads.java | 6 +- ...estNativeAzureFileSystemClientLogging.java | 12 +-- .../azure/NativeAzureFileSystemBaseTest.java | 6 +- .../TestShellDecryptionKeyProvider.java | 7 +- .../application/AppCatalogSolrClient.java | 6 +- .../application/YarnServiceClient.java | 6 +- .../linux/runtime/RuncContainerRuntime.java | 7 +- .../runc/ImageTagToManifestPlugin.java | 7 +- .../yarn/server/resourcemanager/RMInfo.java | 7 +- .../webapp/JAXBContextResolver.java | 7 +- ...citySchedulerMultiNodesWithPreemption.java | 8 +- .../pom.xml | 16 ++++ pom.xml | 7 ++ 71 files changed, 223 insertions(+), 532 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java diff --git a/LICENSE-binary b/LICENSE-binary index 432dc5d28f..60fdcb45e8 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11 commons-collections:commons-collections:3.2.2 commons-daemon:commons-daemon:1.0.13 commons-io:commons-io:2.8.0 -commons-logging:commons-logging:1.1.3 commons-net:commons-net:3.9.0 de.ruedigermoeller:fst:2.50 io.grpc:grpc-api:1.26.0 diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 5f0302fd07..41efc183c3 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -180,11 +180,6 @@ jersey-server compile - - commons-logging - commons-logging - compile - log4j log4j diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java index f0a9b0b695..7d792f8dc7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -246,30 +245,6 @@ public static void skipFully(InputStream in, long len) throws IOException { } } - /** - * Close the Closeable objects and ignore any {@link Throwable} or - * null pointers. Must only be used for cleanup in exception handlers. - * - * @param log the log to record problems to at debug level. Can be null. - * @param closeables the objects to close - * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)} - * instead - */ - @Deprecated - public static void cleanup(Log log, java.io.Closeable... closeables) { - for (java.io.Closeable c : closeables) { - if (c != null) { - try { - c.close(); - } catch(Throwable e) { - if (log != null && log.isDebugEnabled()) { - log.debug("Exception in closing " + c, e); - } - } - } - } - } - /** * Close the Closeable objects and ignore any {@link Throwable} or * null pointers. Must only be used for cleanup in exception handlers. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index e2ad16fce2..6785e2f672 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -34,10 +34,6 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Jdk14Logger; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -51,6 +47,8 @@ import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; /** * Change log level in runtime. @@ -340,22 +338,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response out.println(MARKER + "Submitted Class Name: " + logName + "
"); - Log log = LogFactory.getLog(logName); + Logger log = Logger.getLogger(logName); out.println(MARKER + "Log Class: " + log.getClass().getName() +"
"); if (level != null) { out.println(MARKER + "Submitted Level: " + level + "
"); } - if (log instanceof Log4JLogger) { - process(((Log4JLogger)log).getLogger(), level, out); - } - else if (log instanceof Jdk14Logger) { - process(((Jdk14Logger)log).getLogger(), level, out); - } - else { - out.println("Sorry, " + log.getClass() + " not supported.
"); - } + process(log, level, out); } out.println(FORMS); @@ -371,14 +361,14 @@ else if (log instanceof Jdk14Logger) { + "" + ""; - private static void process(org.apache.log4j.Logger log, String level, + private static void process(Logger log, String level, PrintWriter out) throws IOException { if (level != null) { - if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level) + if (!level.equalsIgnoreCase(Level.toLevel(level) .toString())) { out.println(MARKER + "Bad Level : " + level + "
"); } else { - log.setLevel(org.apache.log4j.Level.toLevel(level)); + log.setLevel(Level.toLevel(level)); out.println(MARKER + "Setting Level to " + level + " ...
"); } } @@ -386,21 +376,5 @@ private static void process(org.apache.log4j.Logger log, String level, + "Effective Level: " + log.getEffectiveLevel() + "
"); } - private static void process(java.util.logging.Logger log, String level, - PrintWriter out) throws IOException { - if (level != null) { - String levelToUpperCase = level.toUpperCase(); - try { - log.setLevel(java.util.logging.Level.parse(levelToUpperCase)); - } catch (IllegalArgumentException e) { - out.println(MARKER + "Bad Level : " + level + "
"); - } - out.println(MARKER + "Setting Level to " + level + " ...
"); - } - - java.util.logging.Level lev; - for(; (lev = log.getLevel()) == null; log = log.getParent()); - out.println(MARKER + "Effective Level: " + lev + "
"); - } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java index 726a83da25..57f91886f4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.slf4j.Logger; @@ -75,9 +74,10 @@ public static Exception stopQuietly(Service service) { * @param log the log to warn at * @param service a service; may be null * @return any exception that was caught; null if none was. - * @see ServiceOperations#stopQuietly(Service) + * @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead. */ - public static Exception stopQuietly(Log log, Service service) { + @Deprecated + public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) { try { stop(service); } catch (Exception e) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java deleted file mode 100644 index b2bcbf57ef..0000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.util; - -import org.apache.commons.logging.Log; -import org.slf4j.Logger; - -class LogAdapter { - private Log LOG; - private Logger LOGGER; - - private LogAdapter(Log LOG) { - this.LOG = LOG; - } - - private LogAdapter(Logger LOGGER) { - this.LOGGER = LOGGER; - } - - /** - * @deprecated use {@link #create(Logger)} instead - */ - @Deprecated - public static LogAdapter create(Log LOG) { - return new LogAdapter(LOG); - } - - public static LogAdapter create(Logger LOGGER) { - return new LogAdapter(LOGGER); - } - - public void info(String msg) { - if (LOG != null) { - LOG.info(msg); - } else if (LOGGER != null) { - LOGGER.info(msg); - } - } - - public void warn(String msg, Throwable t) { - if (LOG != null) { - LOG.warn(msg, t); - } else if (LOGGER != null) { - LOGGER.warn(msg, t); - } - } - - public void debug(Throwable t) { - if (LOG != null) { - LOG.debug(t); - } else if (LOGGER != null) { - LOGGER.debug("", t); - } - } - - public void error(String msg) { - if (LOG != null) { - LOG.error(msg); - } else if (LOGGER != null) { - LOGGER.error(msg); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java index 155c4f9c5f..26bcd4a41c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java @@ -36,7 +36,6 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; @@ -222,16 +221,18 @@ public synchronized static void printThreadInfo(PrintStream stream, } private static long previousLogTime = 0; - + /** * Log the current thread stacks at INFO level. * @param log the logger that logs the stack trace * @param title a descriptive title for the call stacks - * @param minInterval the minimum time from the last + * @param minInterval the minimum time from the last + * @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead. */ - public static void logThreadInfo(Log log, - String title, - long minInterval) { + @Deprecated + public static void logThreadInfo(org.apache.commons.logging.Log log, + String title, + long minInterval) { boolean dumpStack = false; if (log.isInfoEnabled()) { synchronized (ReflectionUtils.class) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java index 605352443e..9f112906b2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java @@ -18,10 +18,10 @@ package org.apache.hadoop.util; +import org.slf4j.Logger; import sun.misc.Signal; import sun.misc.SignalHandler; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -42,11 +42,11 @@ public enum SignalLogger { * Our signal handler. */ private static class Handler implements SignalHandler { - final private LogAdapter LOG; + final private Logger log; final private SignalHandler prevHandler; - Handler(String name, LogAdapter LOG) { - this.LOG = LOG; + Handler(String name, Logger log) { + this.log = log; prevHandler = Signal.handle(new Signal(name), this); } @@ -57,7 +57,7 @@ private static class Handler implements SignalHandler { */ @Override public void handle(Signal signal) { - LOG.error("RECEIVED SIGNAL " + signal.getNumber() + + log.error("RECEIVED SIGNAL " + signal.getNumber() + ": SIG" + signal.getName()); prevHandler.handle(signal); } @@ -66,13 +66,9 @@ public void handle(Signal signal) { /** * Register some signal handlers. * - * @param LOG The log4j logfile to use in the signal handlers. + * @param log The log4j logfile to use in the signal handlers. */ - public void register(final Log LOG) { - register(LogAdapter.create(LOG)); - } - - void register(final LogAdapter LOG) { + public void register(final Logger log) { if (registered) { throw new IllegalStateException("Can't re-install the signal handlers."); } @@ -83,15 +79,15 @@ void register(final LogAdapter LOG) { String separator = ""; for (String signalName : SIGNALS) { try { - new Handler(signalName, LOG); + new Handler(signalName, log); bld.append(separator) .append(signalName); separator = ", "; } catch (Exception e) { - LOG.debug(e); + log.debug("Error: ", e); } } bld.append("]"); - LOG.info(bld.toString()); + log.info(bld.toString()); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index b620ba7322..3debd36da7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -740,42 +740,26 @@ public static String toStartupShutdownString(String prefix, String[] msg) { * Print a log message for starting up and shutting down * @param clazz the class of the server * @param args arguments - * @param LOG the target log object + * @param log the target log object */ public static void startupShutdownMessage(Class clazz, String[] args, - final org.apache.commons.logging.Log LOG) { - startupShutdownMessage(clazz, args, LogAdapter.create(LOG)); - } - - /** - * Print a log message for starting up and shutting down - * @param clazz the class of the server - * @param args arguments - * @param LOG the target log object - */ - public static void startupShutdownMessage(Class clazz, String[] args, - final org.slf4j.Logger LOG) { - startupShutdownMessage(clazz, args, LogAdapter.create(LOG)); - } - - static void startupShutdownMessage(Class clazz, String[] args, - final LogAdapter LOG) { + final org.slf4j.Logger log) { final String hostname = NetUtils.getHostname(); final String classname = clazz.getSimpleName(); - LOG.info(createStartupShutdownMessage(classname, hostname, args)); + log.info(createStartupShutdownMessage(classname, hostname, args)); if (SystemUtils.IS_OS_UNIX) { try { - SignalLogger.INSTANCE.register(LOG); + SignalLogger.INSTANCE.register(log); } catch (Throwable t) { - LOG.warn("failed to register any UNIX signal loggers: ", t); + log.warn("failed to register any UNIX signal loggers: ", t); } } ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override public void run() { - LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ + log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); LogManager.shutdown(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java index adc5db87e7..d88730b005 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java @@ -25,8 +25,6 @@ import java.io.IOException; import java.net.URI; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -39,7 +37,8 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -51,8 +50,8 @@ */ public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest { - private static final Log LOG = - LogFactory.getLog(TestViewFileSystemLocalFileSystem.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class); @Override @Before diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java index ac7a1a6899..1e86a91c14 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java @@ -21,8 +21,6 @@ import java.net.URI; import java.net.URISyntaxException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -35,6 +33,8 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -43,8 +43,8 @@ */ public class TestViewFileSystemOverloadSchemeLocalFileSystem { private static final String FILE = "file"; - private static final Log LOG = - LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class); private FileSystem fsTarget; private Configuration conf; private Path targetTestRoot; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java index ea7c8cd4e6..dfcd98801d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.http; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.minikdc.MiniKdc; @@ -53,8 +51,6 @@ */ public class TestHttpServerWithSpnego { - static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class); - private static final String SECRET_STR = "secret"; private static final String HTTP_USER = "HTTP"; private static final String PREFIX = "hadoop.http.authentication."; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java index d41a58782d..519f14b7fd 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java @@ -22,8 +22,6 @@ import com.fasterxml.jackson.databind.node.ContainerNode; import org.junit.Test; import static org.junit.Assert.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.Time; import org.apache.log4j.Appender; import org.apache.log4j.Category; @@ -44,8 +42,6 @@ public class TestLog4Json { - private static final Log LOG = LogFactory.getLog(TestLog4Json.class); - @Test public void testConstruction() throws Throwable { Log4Json l4j = new Log4Json(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index 3af70e9554..636c03a16d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -22,9 +22,6 @@ import java.net.URI; import java.util.concurrent.Callable; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { private final String logName = TestLogLevel.class.getName(); private String clientPrincipal; private String serverPrincipal; - private final Log testlog = LogFactory.getLog(logName); - private final Logger log = ((Log4JLogger)testlog).getLogger(); + private final Logger log = Logger.getLogger(logName); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; private static final String PREFIX = "hadoop.http.authentication."; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 61d5938494..e54971e491 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -49,8 +49,6 @@ import java.util.regex.Pattern; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -117,29 +115,11 @@ public abstract class GenericTestUtils { public static final String ERROR_INVALID_ARGUMENT = "Total wait time should be greater than check interval time"; - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - @SuppressWarnings("unchecked") - public static void disableLog(Log log) { - // We expect that commons-logging is a wrapper around Log4j. - disableLog((Log4JLogger) log); - } - @Deprecated public static Logger toLog4j(org.slf4j.Logger logger) { return LogManager.getLogger(logger.getName()); } - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - public static void disableLog(Log4JLogger log) { - log.getLogger().setLevel(Level.OFF); - } - /** * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead */ @@ -152,45 +132,6 @@ public static void disableLog(org.slf4j.Logger logger) { disableLog(toLog4j(logger)); } - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - @SuppressWarnings("unchecked") - public static void setLogLevel(Log log, Level level) { - // We expect that commons-logging is a wrapper around Log4j. - setLogLevel((Log4JLogger) log, level); - } - - /** - * A helper used in log4j2 migration to accept legacy - * org.apache.commons.logging apis. - *

- * And will be removed after migration. - * - * @param log a log - * @param level level to be set - */ - @Deprecated - public static void setLogLevel(Log log, org.slf4j.event.Level level) { - setLogLevel(log, Level.toLevel(level.toString())); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - public static void setLogLevel(Log4JLogger log, Level level) { - log.getLogger().setLevel(level); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated public static void setLogLevel(Logger logger, Level level) { logger.setLevel(level); } @@ -535,13 +476,15 @@ public static class LogCapturer { private WriterAppender appender; private Logger logger; - public static LogCapturer captureLogs(Log l) { - Logger logger = ((Log4JLogger)l).getLogger(); - return new LogCapturer(logger); + public static LogCapturer captureLogs(org.slf4j.Logger logger) { + if (logger.getName().equals("root")) { + return new LogCapturer(org.apache.log4j.Logger.getRootLogger()); + } + return new LogCapturer(toLog4j(logger)); } - public static LogCapturer captureLogs(org.slf4j.Logger logger) { - return new LogCapturer(toLog4j(logger)); + public static LogCapturer captureLogs(Logger logger) { + return new LogCapturer(logger); } private LogCapturer(Logger logger) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java index e58fb3bffd..109cb191b4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java @@ -18,10 +18,10 @@ package org.apache.hadoop.util; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; +import org.slf4j.LoggerFactory; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -43,7 +43,7 @@ public class TestJarFinder { public void testJar() throws Exception { //picking a class that is for sure in a JAR in the classpath - String jar = JarFinder.getJar(LogFactory.class); + String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java index b61cebc0a6..f6b272e1c6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java @@ -32,9 +32,9 @@ public class TestSignalLogger { @Test(timeout=60000) public void testInstall() throws Exception { Assume.assumeTrue(SystemUtils.IS_OS_UNIX); - SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); + SignalLogger.INSTANCE.register(LOG); try { - SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); + SignalLogger.INSTANCE.register(LOG); Assert.fail("expected IllegalStateException from double registration"); } catch (IllegalStateException e) { // fall through diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index 33d8b3710c..1da5a25ad1 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -63,11 +63,6 @@ mockito-core test - - commons-logging - commons-logging - compile - javax.servlet javax.servlet-api diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index 3337f7d408..b362e001ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common provided - - commons-logging - commons-logging - log4j log4j diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 24599a2b24..39bc6683fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-logging - commons-logging - compile - commons-daemon commons-daemon diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index d4d5c1eb33..9fb868f79f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common provided - - commons-logging - commons-logging - log4j log4j diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index 35b640d406..cd98b635b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -2054,7 +2054,7 @@ private DFSClient getFileDFSClient(final String path) { @Test public void testMkdirsWithCallerContext() throws IOException { GenericTestUtils.LogCapturer auditlog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2092,7 +2092,7 @@ public void testSetBalancerBandwidth() throws Exception { @Test public void testAddClientIpPortToCallerContext() throws IOException { GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientIp and ClientPort are not set on the client. // Set client context. @@ -2127,7 +2127,7 @@ public void testAddClientIpPortToCallerContext() throws IOException { @Test public void testAddClientIdAndCallIdToCallerContext() throws IOException { GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientId and ClientCallId are not set on the client. // Set client context. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 6ade57326e..30a2bc1102 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -440,7 +440,7 @@ public void testSubclusterDown() throws Exception { @Test public void testCallerContextWithMultiDestinations() throws IOException { GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // set client context CallerContext.setCurrent( diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index ab8934f936..5f156499ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -117,11 +117,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io compile - - commons-logging - commons-logging - compile - commons-daemon commons-daemon diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index 051e2d2c52..66685f6cc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -31,8 +31,6 @@ import javax.management.MalformedObjectNameException; import javax.management.ObjectName; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.metrics2.util.MBeans; @@ -58,13 +56,12 @@ public class MetricsLoggerTask implements Runnable { } } - private Log metricsLog; + private org.apache.log4j.Logger metricsLog; private String nodeName; private short maxLogLineLength; - public MetricsLoggerTask(Log metricsLog, String nodeName, - short maxLogLineLength) { - this.metricsLog = metricsLog; + public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) { + this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog); this.nodeName = nodeName; this.maxLogLineLength = maxLogLineLength; } @@ -118,13 +115,8 @@ private String trimLine(String valueStr) { .substring(0, maxLogLineLength) + "..."); } - private static boolean hasAppenders(Log logger) { - if (!(logger instanceof Log4JLogger)) { - // Don't bother trying to determine the presence of appenders. - return true; - } - Log4JLogger log4JLogger = ((Log4JLogger) logger); - return log4JLogger.getLogger().getAllAppenders().hasMoreElements(); + private static boolean hasAppenders(org.apache.log4j.Logger logger) { + return logger.getAllAppenders().hasMoreElements(); } /** @@ -150,13 +142,8 @@ private static Set getFilteredAttributes(MBeanInfo mBeanInfo) { * Make the metrics logger async and add all pre-existing appenders to the * async appender. */ - public static void makeMetricsLoggerAsync(Log metricsLog) { - if (!(metricsLog instanceof Log4JLogger)) { - LOG.warn("Metrics logging will not be async since " - + "the logger is not log4j"); - return; - } - org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger(); + public static void makeMetricsLoggerAsync(String metricsLog) { + org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog); logger.setAdditivity(false); // Don't pollute actual logs with metrics dump @SuppressWarnings("unchecked") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 77e0be6c7b..1c077098a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -35,7 +35,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.zip.Checksum; -import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.StorageType; @@ -73,7 +72,7 @@ **/ class BlockReceiver implements Closeable { public static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; + static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG; @VisibleForTesting static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024; @@ -1398,7 +1397,7 @@ public void close() { public void run() { datanode.metrics.incrDataNodePacketResponderCount(); boolean lastPacketInBlock = false; - final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + final long startTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0; while (isRunning() && !lastPacketInBlock) { long totalAckTimeNanos = 0; boolean isInterrupted = false; @@ -1553,7 +1552,7 @@ private void finalizeBlock(long startTime) throws IOException { // Hold a volume reference to finalize block. try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) { BlockReceiver.this.close(); - endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; + endTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block, dirSyncOnFinalize); } @@ -1564,11 +1563,11 @@ private void finalizeBlock(long startTime) throws IOException { datanode.closeBlock(block, null, replicaInfo.getStorageUuid(), replicaInfo.isOnTransientStorage()); - if (ClientTraceLog.isInfoEnabled() && isClient) { + if (CLIENT_TRACE_LOG.isInfoEnabled() && isClient) { long offset = 0; DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block .getBlockPoolId()); - ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, + CLIENT_TRACE_LOG.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, myAddr, replicaInfo.getVolume(), block.getNumBytes(), "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(), block, endTime - startTime)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 5c4212fea5..b2bc09bc39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -32,7 +32,6 @@ import java.util.Arrays; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -103,7 +102,7 @@ */ class BlockSender implements java.io.Closeable { static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; + static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG; private static final boolean is32Bit = System.getProperty("sun.arch.data.model").equals("32"); /** @@ -784,7 +783,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, // Trigger readahead of beginning of file if configured. manageOsCache(); - final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0; + final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0; try { int maxChunksPerPacket; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; @@ -831,9 +830,9 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, sentEntireByteRange = true; } } finally { - if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) { + if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) { final long endTime = System.nanoTime(); - ClientTraceLog.debug(String.format(clientTraceFmt, totalRead, + CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead, initialOffset, endTime - startTime)); } close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d8149b6f3e..ce56688598 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -140,8 +140,6 @@ import javax.management.ObjectName; import javax.net.SocketFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -318,9 +316,9 @@ public class DataNode extends ReconfigurableBase ", srvID: %s" + // DatanodeRegistration ", blockid: %s" + // block id ", duration(ns): %s"; // duration time - - static final Log ClientTraceLog = - LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); + + static final Logger CLIENT_TRACE_LOG = + LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace"); private static final String USAGE = "Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" + @@ -360,7 +358,7 @@ public class DataNode extends ReconfigurableBase FS_GETSPACEUSED_JITTER_KEY, FS_GETSPACEUSED_CLASSNAME)); - public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); + public static final String METRICS_LOG_NAME = "DataNodeMetricsLog"; private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace."; private final FileIoProvider fileIoProvider; @@ -4060,12 +4058,12 @@ protected void startMetricsLogger() { return; } - MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG); + MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME); // Schedule the periodic logging. metricsLoggerTimer = new ScheduledThreadPoolExecutor(1); metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); - metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG, + metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME, "DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec, TimeUnit.SECONDS); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java index eac1259fb8..913c289cfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; @@ -29,11 +27,14 @@ import java.util.Stack; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Class for maintain a set of lock for fsDataSetImpl. */ public class DataSetLockManager implements DataNodeLockManager { - public static final Log LOG = LogFactory.getLog(DataSetLockManager.class); + public static final Logger LOG = LoggerFactory.getLogger(DataSetLockManager.class); private final HashMap threadCountMap = new HashMap<>(); private final LockMap lockMap = new LockMap(); private boolean isFair = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 7704102301..3cf4bde3d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -21,7 +21,6 @@ import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; import javax.crypto.SecretKey; -import org.apache.commons.logging.Log; import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -105,7 +104,7 @@ */ class DataXceiver extends Receiver implements Runnable { public static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; + static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG; private Peer peer; private final String remoteAddress; // address of remote side @@ -426,10 +425,10 @@ public void requestShortCircuitFds(final ExtendedBlock blk, registeredSlotId); datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId); } - if (ClientTraceLog.isInfoEnabled()) { + if (CLIENT_TRACE_LOG.isInfoEnabled()) { DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk .getBlockPoolId()); - BlockSender.ClientTraceLog.info(String.format( + BlockSender.CLIENT_TRACE_LOG.info(String.format( "src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," + " blockid: %s, srvID: %s, success: %b", blk.getBlockId(), dnR.getDatanodeUuid(), success)); @@ -466,8 +465,8 @@ public void releaseShortCircuitFds(SlotId slotId) throws IOException { bld.build().writeDelimitedTo(socketOut); success = true; } finally { - if (ClientTraceLog.isInfoEnabled()) { - BlockSender.ClientTraceLog.info(String.format( + if (CLIENT_TRACE_LOG.isInfoEnabled()) { + BlockSender.CLIENT_TRACE_LOG.info(String.format( "src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," + " shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b", slotId.getShmId().getHi(), slotId.getShmId().getLo(), @@ -526,9 +525,9 @@ public void requestShortCircuitShm(String clientName) throws IOException { sendShmSuccessResponse(sock, shmInfo); success = true; } finally { - if (ClientTraceLog.isInfoEnabled()) { + if (CLIENT_TRACE_LOG.isInfoEnabled()) { if (success) { - BlockSender.ClientTraceLog.info(String.format( + BlockSender.CLIENT_TRACE_LOG.info(String.format( "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM," + " shmId: %016x%016x, srvID: %s, success: true", @@ -536,7 +535,7 @@ public void requestShortCircuitShm(String clientName) throws IOException { shmInfo.getShmId().getLo(), datanode.getDatanodeUuid())); } else { - BlockSender.ClientTraceLog.info(String.format( + BlockSender.CLIENT_TRACE_LOG.info(String.format( "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM, " + "shmId: n/a, srvID: %s, success: false", @@ -587,13 +586,10 @@ public void readBlock(final ExtendedBlock block, BlockSender blockSender = null; DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId()); - final String clientTraceFmt = - clientName.length() > 0 && ClientTraceLog.isInfoEnabled() - ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, - "", "%d", "HDFS_READ", clientName, "%d", - dnR.getDatanodeUuid(), block, "%d") - : dnR + " Served block " + block + " to " + - remoteAddress; + final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ? + String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ", + clientName, "%d", dnR.getDatanodeUuid(), block, "%d") : + dnR + " Served block " + block + " to " + remoteAddress; try { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 5e4f0d520a..95b855e8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -185,9 +185,6 @@ import javax.management.ObjectName; import javax.management.StandardMBean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -405,7 +402,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, private final String contextFieldSeparator; boolean isAuditEnabled() { - return (!isDefaultAuditLogger || auditLog.isInfoEnabled()) + return (!isDefaultAuditLogger || AUDIT_LOG.isInfoEnabled()) && !auditLoggers.isEmpty(); } @@ -491,8 +488,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ * perm=<permissions (optional)> * */ - public static final Log auditLog = LogFactory.getLog( - FSNamesystem.class.getName() + ".audit"); + public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit"); private final int maxCorruptFileBlocksReturn; private final boolean isPermissionEnabled; @@ -8783,8 +8779,8 @@ public void logAuditEvent(boolean succeeded, String userName, FileStatus status, CallerContext callerContext, UserGroupInformation ugi, DelegationTokenSecretManager dtSecretManager) { - if (auditLog.isDebugEnabled() || - (auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) { + if (AUDIT_LOG.isDebugEnabled() || + (AUDIT_LOG.isInfoEnabled() && !debugCmdSet.contains(cmd))) { final StringBuilder sb = STRING_BUILDER.get(); src = escapeJava(src); dst = escapeJava(dst); @@ -8853,16 +8849,12 @@ public void logAuditEvent(boolean succeeded, String userName, } public void logAuditMessage(String message) { - auditLog.info(message); + AUDIT_LOG.info(message); } } private static void enableAsyncAuditLog(Configuration conf) { - if (!(auditLog instanceof Log4JLogger)) { - LOG.warn("Log4j is required to enable async auditlog"); - return; - } - Logger logger = ((Log4JLogger)auditLog).getLogger(); + Logger logger = AUDIT_LOG; @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); // failsafe against trying to async it more than once diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java index 3325222267..067ea5e9a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -125,15 +122,10 @@ static String memoryInfo() { } static void setLogLevel(Class clazz, Level level) { - final Log log = LogFactory.getLog(clazz); - if (log instanceof Log4JLogger) { - final org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger(); - logger.setLevel(level); - LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", - clazz.getName(), level, logger.getEffectiveLevel()); - } else { - LOG.warn("Failed setLogLevel {} to {}", clazz.getName(), level); - } + final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz); + logger.setLevel(level); + LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level, + logger.getEffectiveLevel()); } static String toCommaSeparatedNumber(long n) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 36ea5c2f64..ddd9fd8087 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -25,8 +25,6 @@ import org.apache.hadoop.util.Preconditions; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -427,8 +425,7 @@ public long getProtocolVersion(String protocol, private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace."; - public static final Log MetricsLog = - LogFactory.getLog("NameNodeMetricsLog"); + public static final String METRICS_LOG_NAME = "NameNodeMetricsLog"; protected FSNamesystem namesystem; protected final NamenodeRole role; @@ -949,13 +946,13 @@ protected void startMetricsLogger(Configuration conf) { return; } - MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog); + MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME); // Schedule the periodic logging. metricsLoggerTimer = new ScheduledThreadPoolExecutor(1); metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy( false); - metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog, + metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME, "NameNode", (short) 128), metricsLoggerPeriodSec, metricsLoggerPeriodSec, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java index c224c4916b..5a9dcb2465 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java @@ -21,8 +21,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -41,6 +39,8 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.junit.Assert.*; @@ -52,7 +52,7 @@ */ @RunWith(Parameterized.class) public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase { - public static final Log LOG = LogFactory.getLog(TestPermission.class); + public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class); private HdfsConfiguration conf; private MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index 427dc43d3b..b16f0237b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -190,7 +190,7 @@ public void testRename2Options() throws Exception { Path path = new Path("/test"); dfs.mkdirs(path); GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog); + GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); dfs.rename(path, new Path("/dir1"), new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); String auditOut = auditLog.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java index a00f67ac3b..848cedd989 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -29,6 +27,8 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.slf4j.event.Level; import java.io.IOException; @@ -47,7 +47,7 @@ * Tests append on erasure coded file. */ public class TestStripedFileAppend { - public static final Log LOG = LogFactory.getLog(TestStripedFileAppend.class); + public static final Logger LOG = LoggerFactory.getLogger(TestStripedFileAppend.class); static { DFSTestUtil.setNameNodeLogLevel(Level.TRACE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 8443c36835..29619cc4e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -32,8 +32,6 @@ import java.util.concurrent.TimeoutException; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -135,8 +133,7 @@ public void testDisableMetricsLogger() throws IOException { public void testMetricsLoggerIsAsync() throws IOException { startDNForTest(true); assertNotNull(dn); - org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG) - .getLogger(); + org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME); @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); assertTrue(appenders.get(0) instanceof AsyncAppender); @@ -156,7 +153,7 @@ public void testMetricsLogOutput() throws IOException, InterruptedException, assertNotNull(dn); final PatternMatchingAppender appender = new PatternMatchingAppender( "^.*FakeMetric.*$"); - addAppender(DataNode.METRICS_LOG, appender); + addAppender(org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME), appender); // Ensure that the supplied pattern was matched. GenericTestUtils.waitFor(new Supplier() { @@ -169,8 +166,7 @@ public Boolean get() { dn.shutdown(); } - private void addAppender(Log log, Appender appender) { - org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger(); + private void addAppender(org.apache.log4j.Logger logger, Appender appender) { @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); ((AsyncAppender) appenders.get(0)).addAppender(appender); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java index c86b04cb77..dad4fa306c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java @@ -26,10 +26,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.log4j.Level; + import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import org.slf4j.event.Level; import java.net.Inet4Address; import java.util.Arrays; @@ -61,7 +62,7 @@ private DefaultAuditLogger makeSpyLogger( Joiner.on(",").join(debugCommands.get())); } logger.initialize(conf); - GenericTestUtils.setLogLevel(FSNamesystem.auditLog, level); + GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, level); return spy(logger); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index 1cc950723b..c00649a9db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -258,7 +258,7 @@ public void testAuditLoggerWithCallContext() throws IOException { conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { - LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); + LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); cluster.waitClusterUp(); final FileSystem fs = cluster.getFileSystem(); final long time = System.currentTimeMillis(); @@ -568,7 +568,7 @@ public void testAuditLogWithRemotePort() throws Exception { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster1 = new MiniDFSCluster.Builder(conf).build(); try { - LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog); + LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); cluster1.waitClusterUp(); FileSystem fs = cluster1.getFileSystem(); long time = System.currentTimeMillis(); @@ -585,7 +585,7 @@ public void testAuditLogWithRemotePort() throws Exception { conf.setBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, true); MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build(); try { - LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog); + LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); cluster2.waitClusterUp(); FileSystem fs = cluster2.getFileSystem(); long time = System.currentTimeMillis(); @@ -606,7 +606,7 @@ public void testCallerContextCharacterEscape() throws IOException { conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { - LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); + LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); cluster.waitClusterUp(); final FileSystem fs = cluster.getFileSystem(); final long time = System.currentTimeMillis(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java index 4d379b177a..d34d6ca737 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java @@ -93,7 +93,7 @@ public void initialize() throws Exception { user2 = UserGroupInformation.createUserForTesting("theEngineer", new String[]{"hadoop"}); - auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog); + auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); proto = cluster.getNameNodeRpc(); fileSys = DFSTestUtil.getFileSystemAs(user1, conf); fs2 = DFSTestUtil.getFileSystemAs(user2, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 2832782caf..54fcc17cdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -32,7 +32,6 @@ import java.util.List; import java.util.regex.Pattern; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -130,7 +129,7 @@ public void setupCluster() throws Exception { util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); assertEquals(1, appenders.size()); @@ -283,7 +282,7 @@ public void testAuditCharacterEscape() throws Exception { /** Sets up log4j logger for auditlogs */ private void setupAuditLogs() throws IOException { - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; // enable logging now that the test is ready to run logger.setLevel(Level.INFO); } @@ -303,7 +302,7 @@ private void configureAuditLogs() throws IOException { disableAuditLog(); PatternLayout layout = new PatternLayout("%m%n"); RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile); - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; logger.addAppender(appender); } @@ -319,7 +318,7 @@ private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe) disableAuditLog(); // Close the appenders and force all logs to be flushed - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; Enumeration appenders = logger.getAllAppenders(); while (appenders.hasMoreElements()) { Appender appender = (Appender)appenders.nextElement(); @@ -352,7 +351,7 @@ private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Patte disableAuditLog(); // Close the appenders and force all logs to be flushed - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; Enumeration appenders = logger.getAllAppenders(); while (appenders.hasMoreElements()) { Appender appender = (Appender)appenders.nextElement(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 60442c6bd0..0f8ca10174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -61,7 +61,6 @@ import java.util.regex.Pattern; import java.util.function.Supplier; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataOutputStream; @@ -252,7 +251,7 @@ private void setupAuditLogs() throws IOException { if (file.exists()) { file.delete(); } - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; logger.removeAllAppenders(); logger.setLevel(Level.INFO); PatternLayout layout = new PatternLayout("%m%n"); @@ -291,7 +290,7 @@ private void verifyAuditLogs() throws IOException { if (reader != null) { reader.close(); } - Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + Logger logger = FSNamesystem.AUDIT_LOG; if (logger != null) { logger.removeAllAppenders(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 9b5e9884c5..7548adbd5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.function.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -70,8 +68,7 @@ public void testDisableMetricsLogger() throws IOException { @Test public void testMetricsLoggerIsAsync() throws IOException { makeNameNode(true); - org.apache.log4j.Logger logger = - ((Log4JLogger) NameNode.MetricsLog).getLogger(); + org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME); @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); assertTrue(appenders.get(0) instanceof AsyncAppender); @@ -90,7 +87,7 @@ public void testMetricsLogOutput() makeNameNode(true); // Log metrics early and often. final PatternMatchingAppender appender = new PatternMatchingAppender("^.*FakeMetric42.*$"); - addAppender(NameNode.MetricsLog, appender); + addAppender(org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME), appender); // Ensure that the supplied pattern was matched. GenericTestUtils.waitFor(new Supplier() { @@ -118,8 +115,7 @@ private NameNode makeNameNode(boolean enableMetricsLogging) return new TestNameNode(conf); } - private void addAppender(Log log, Appender appender) { - org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger(); + private void addAppender(org.apache.log4j.Logger logger, Appender appender) { @SuppressWarnings("unchecked") List appenders = Collections.list(logger.getAllAppenders()); ((AsyncAppender) appenders.get(0)).addAppender(appender); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java index 3f86d4521e..a243255cda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java @@ -45,7 +45,7 @@ */ public class TestDNFencingWithReplication { static { - GenericTestUtils.setLogLevel(FSNamesystem.auditLog, Level.WARN); + GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, org.apache.log4j.Level.WARN); GenericTestUtils.setLogLevel(Server.LOG, Level.ERROR); GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.ERROR); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java index 5324e0cff7..4bd2314130 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java @@ -18,18 +18,18 @@ package org.apache.hadoop.mapreduce.v2.app.speculate.forecast; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.yarn.util.ControlledClock; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.yarn.util.ControlledClock; /** * Testing the statistical model of simple exponential estimator. */ public class TestSimpleExponentialForecast { - private static final Log LOG = - LogFactory.getLog(TestSimpleExponentialForecast.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSimpleExponentialForecast.class); private static long clockTicks = 1000L; private ControlledClock clock; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java index 02e4358a07..36dcce4dfa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java @@ -28,8 +28,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -66,6 +64,8 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test speculation on Mini Cluster. @@ -73,8 +73,7 @@ @Ignore @RunWith(Parameterized.class) public class TestSpeculativeExecOnCluster { - private static final Log LOG = LogFactory - .getLog(TestSpeculativeExecOnCluster.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSpeculativeExecOnCluster.class); private static final int NODE_MANAGERS_COUNT = 2; private static final boolean ENABLE_SPECULATIVE_MAP = true; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index fdcab2f2ff..b9e181d1ae 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -132,11 +132,6 @@ io.netty netty-all - - commons-logging - commons-logging - provided - org.apache.hadoop.thirdparty hadoop-shaded-guava diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index e5426a08b3..fac2ac0561 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -38,10 +38,6 @@ commons-cli commons-cli - - commons-logging - commons-logging - org.apache.hadoop hadoop-mapreduce-client-jobclient diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 73dc8eb290..5887d5b7fa 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -121,7 +121,6 @@ 2.11.0 3.12.0 1.1.3 - 1.1 3.6.1 3.9.0 1.10.0 @@ -1094,11 +1093,6 @@ - - commons-logging - commons-logging-api - ${commons-logging-api.version} - log4j log4j diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml index f6154484ad..bd64495dca 100644 --- a/hadoop-tools/hadoop-archive-logs/pom.xml +++ b/hadoop-tools/hadoop-archive-logs/pom.xml @@ -101,11 +101,6 @@ commons-io provided - - commons-logging - commons-logging - provided - commons-cli commons-cli diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java index 40bf6f4ae1..0d8936582e 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java @@ -30,8 +30,6 @@ import java.io.InputStream; import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; @@ -39,6 +37,8 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.PageRange; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * An input stream that reads file data from a page blob stored @@ -46,7 +46,7 @@ */ final class PageBlobInputStream extends InputStream { - private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class); + private static final Logger LOG = LoggerFactory.getLogger(PageBlobInputStream.class); // The blob we're reading from. private final CloudPageBlobWrapper blob; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java index 3c98405ff9..f77a6b8051 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java @@ -39,8 +39,6 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.classification.VisibleForTesting; @@ -48,7 +46,8 @@ import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.CloudPageBlob; - +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * An output stream that write file data to a page blob stored using ASV's @@ -120,7 +119,7 @@ final class PageBlobOutputStream extends OutputStream implements Syncable, Strea // Whether the stream has been closed. private boolean closed = false; - public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class); + public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class); // Set the minimum page blob file size to 128MB, which is >> the default // block size of 32MB. This default block size is often used as the diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 01ab06cb02..989c3ba6d9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -18,8 +18,6 @@ package org.apache.hadoop.fs.azure; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; import org.apache.hadoop.classification.VisibleForTesting; @@ -27,6 +25,8 @@ import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlob; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicInteger; @@ -58,7 +58,7 @@ public class SelfRenewingLease { // Time to wait to renew lease in milliseconds public static final int LEASE_RENEWAL_PERIOD = 40000; - private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class); + private static final Logger LOG = LoggerFactory.getLogger(SelfRenewingLease.class); // Used to allocate thread serial numbers in thread name private static AtomicInteger threadNumber = new AtomicInteger(0); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java index a9e3df907f..ad71016a74 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java @@ -21,8 +21,6 @@ import java.net.HttpURLConnection; import java.util.Date; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.OperationContext; @@ -30,6 +28,8 @@ import com.microsoft.azure.storage.ResponseReceivedEvent; import com.microsoft.azure.storage.SendingRequestEvent; import com.microsoft.azure.storage.StorageEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /* * Self throttling is implemented by hooking into send & response callbacks @@ -63,8 +63,7 @@ */ @InterfaceAudience.Private public class SelfThrottlingIntercept { - public static final Log LOG = LogFactory - .getLog(SelfThrottlingIntercept.class); + public static final Logger LOG = LoggerFactory.getLogger(SelfThrottlingIntercept.class); private final float readFactor; private final float writeFactor; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java index 924ecd30b3..98f9de7bff 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java @@ -21,8 +21,6 @@ import java.net.HttpURLConnection; import java.security.InvalidKeyException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.Constants.HeaderConstants; @@ -40,8 +38,6 @@ @InterfaceAudience.Private public final class SendRequestIntercept extends StorageEvent { - public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class); - private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*"; /** diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java index 5596f7e67c..64811e13ee 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java @@ -20,8 +20,9 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.ProviderUtils; @@ -32,7 +33,7 @@ */ @InterfaceAudience.Private public class SimpleKeyProvider implements KeyProvider { - private static final Log LOG = LogFactory.getLog(SimpleKeyProvider.class); + private static final Logger LOG = LoggerFactory.getLogger(SimpleKeyProvider.class); protected static final String KEY_ACCOUNT_KEY_PREFIX = "fs.azure.account.key."; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index 699fde7dee..d3fe4aefeb 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -21,8 +21,6 @@ import java.util.ArrayList; import java.util.Date; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; /** @@ -31,9 +29,7 @@ */ @InterfaceAudience.Private public final class BandwidthGaugeUpdater { - public static final Log LOG = LogFactory - .getLog(BandwidthGaugeUpdater.class); - + public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater"; private static final int DEFAULT_WINDOW_SIZE_MS = 1000; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java index de503bf190..4c61f6817c 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java @@ -20,8 +20,6 @@ import java.net.HttpURLConnection; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.Constants.HeaderConstants; @@ -38,8 +36,6 @@ @InterfaceAudience.Private public final class ResponseReceivedMetricUpdater extends StorageEvent { - public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class); - private final AzureFileSystemInstrumentation instrumentation; private final BandwidthGaugeUpdater blockUploadGaugeUpdater; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java index 4389fda393..1e7330fbd0 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java @@ -28,7 +28,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -41,6 +40,8 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations. @@ -70,8 +71,7 @@ public void setUp() throws Exception { fs.initialize(uri, conf); // Capture logs - logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger - .getRootLogger())); + logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME)); } /* diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java index f73a7638a3..476d7a4f01 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java @@ -21,12 +21,13 @@ import java.net.URI; import java.util.StringTokenizer; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.log4j.Logger; + import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test to validate Azure storage client side logging. Tests works only when @@ -94,8 +95,8 @@ private void performWASBOperations() throws Exception { @Test public void testLoggingEnabled() throws Exception { - LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger - .getRootLogger())); + LogCapturer logs = + LogCapturer.captureLogs(LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME)); // Update configuration based on the Test. updateFileSystemConfiguration(true); @@ -116,8 +117,7 @@ protected String getLogOutput(LogCapturer logs) { @Test public void testLoggingDisabled() throws Exception { - LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger - .getRootLogger())); + LogCapturer logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME)); // Update configuration based on the Test. updateFileSystemConfiguration(false); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index 8ac36c299b..9a75ef5533 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -30,8 +30,6 @@ import java.util.EnumSet; import java.util.TimeZone; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -49,6 +47,8 @@ import com.microsoft.azure.storage.AccessCondition; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.CloudBlob; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.readStringFromFile; import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.writeStringToFile; @@ -73,7 +73,7 @@ public abstract class NativeAzureFileSystemBaseTest private static final EnumSet CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE); private static final EnumSet REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE); - public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class); + public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystemBaseTest.class); protected NativeAzureFileSystem fs; @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java index b8df38eed0..1f02741172 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java @@ -23,10 +23,10 @@ import org.junit.Assert; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException; @@ -39,8 +39,7 @@ * */ public class TestShellDecryptionKeyProvider { - public static final Log LOG = LogFactory - .getLog(TestShellDecryptionKeyProvider.class); + public static final Logger LOG = LoggerFactory.getLogger(TestShellDecryptionKeyProvider.class); private static final File TEST_ROOT_DIR = new File(System.getProperty( "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java index b1515a5b6f..ac8dbbac61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java @@ -34,8 +34,6 @@ import org.apache.hadoop.yarn.appcatalog.utils.RandomWord; import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException; import org.apache.hadoop.yarn.service.api.records.Service; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrQuery.ORDER; @@ -48,13 +46,15 @@ import com.fasterxml.jackson.databind.DeserializationFeature; import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Driver class for accessing Solr. */ public class AppCatalogSolrClient { - private static final Log LOG = LogFactory.getLog(AppCatalogSolrClient.class); + private static final Logger LOG = LoggerFactory.getLogger(AppCatalogSolrClient.class); private static String urlString; public AppCatalogSolrClient() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java index 79838a9e4f..185b1c8dde 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java @@ -21,8 +21,6 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.appcatalog.model.AppEntry; @@ -39,13 +37,15 @@ import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.config.ClientConfig; import com.sun.jersey.api.client.config.DefaultClientConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Driver class for calling YARN Resource Manager REST API. */ public class YarnServiceClient { - private static final Log LOG = LogFactory.getLog(YarnServiceClient.class); + private static final Logger LOG = LoggerFactory.getLogger(YarnServiceClient.class); private static Configuration conf = new Configuration(); private static ClientConfig getClientConfig() { ClientConfig config = new DefaultClientConfig(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java index e43f7788d7..2c327c04eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -81,6 +79,8 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP; @@ -136,8 +136,7 @@ @InterfaceStability.Unstable public class RuncContainerRuntime extends OCIContainerRuntime { - private static final Log LOG = LogFactory.getLog( - RuncContainerRuntime.class); + private static final Logger LOG = LoggerFactory.getLogger(RuncContainerRuntime.class); @InterfaceAudience.Private private static final String RUNTIME_TYPE = "RUNC"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java index fbec3ee6f5..457939c9a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java @@ -20,8 +20,6 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -45,6 +43,8 @@ import java.util.concurrent.atomic.AtomicReference; import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL; import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR; @@ -78,8 +78,7 @@ public class ImageTagToManifestPlugin extends AbstractService private String manifestDir; private String localImageTagToHashFile; - private static final Log LOG = LogFactory.getLog( - ImageTagToManifestPlugin.class); + private static final Logger LOG = LoggerFactory.getLogger(ImageTagToManifestPlugin.class); private static final int SHA256_HASH_LENGTH = 64; private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java index 84d49cd25b..f58dd911fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -27,11 +25,14 @@ import javax.management.ObjectName; import javax.management.StandardMBean; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * JMX bean for RM info. */ public class RMInfo implements RMInfoMXBean { - private static final Log LOG = LogFactory.getLog(RMNMInfo.class); + private static final Logger LOG = LoggerFactory.getLogger(RMNMInfo.class); private ResourceManager resourceManager; private ObjectName rmStatusBeanName; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java index 24428b3bb8..2277c36beb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java @@ -22,6 +22,8 @@ import com.google.inject.Singleton; import com.sun.jersey.api.json.JSONConfiguration; import com.sun.jersey.api.json.JSONJAXBContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.*; @@ -29,8 +31,6 @@ import javax.ws.rs.ext.Provider; import javax.xml.bind.JAXBContext; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo; @@ -41,8 +41,7 @@ @Provider public class JAXBContextResolver implements ContextResolver { - private static final Log LOG = - LogFactory.getLog(JAXBContextResolver.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(JAXBContextResolver.class.getName()); private final Map typesContextMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java index dc15857bfa..c895b58b29 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerMultiNodesWithPreemption.java @@ -22,8 +22,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; @@ -49,14 +47,16 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; public class TestCapacitySchedulerMultiNodesWithPreemption { - private static final Log LOG = LogFactory - .getLog(TestCapacitySchedulerMultiNodesWithPreemption.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestCapacitySchedulerMultiNodesWithPreemption.class); private CapacitySchedulerConfiguration conf; private static final String POLICY_CLASS_NAME = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement." diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml index a323eaff85..6f2fce097d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml @@ -104,6 +104,10 @@ com.sun.jersey jersey-json + + commons-logging + commons-logging + @@ -336,6 +340,10 @@ com.sun.jersey jersey-json + + commons-logging + commons-logging + @@ -351,6 +359,10 @@ org.apache.hadoop hadoop-hdfs-client + + commons-logging + commons-logging + @@ -367,6 +379,10 @@ org.apache.hadoop hadoop-hdfs-client + + commons-logging + commons-logging + diff --git a/pom.xml b/pom.xml index 4e2887f1df..51a80c7634 100644 --- a/pom.xml +++ b/pom.xml @@ -288,6 +288,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x org.glassfish.grizzly.** + + true + Use slf4j based Logger + + org.apache.commons.logging.** + +