HADOOP-18206 Cleanup the commons-logging references and restrict its usage in future (#5315)
This commit is contained in:
parent
30f560554d
commit
90de1ff151
@ -250,7 +250,6 @@ commons-codec:commons-codec:1.11
|
||||
commons-collections:commons-collections:3.2.2
|
||||
commons-daemon:commons-daemon:1.0.13
|
||||
commons-io:commons-io:2.8.0
|
||||
commons-logging:commons-logging:1.1.3
|
||||
commons-net:commons-net:3.9.0
|
||||
de.ruedigermoeller:fst:2.50
|
||||
io.grpc:grpc-api:1.26.0
|
||||
|
@ -180,11 +180,6 @@
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
@ -32,7 +32,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -246,30 +245,6 @@ public static void skipFully(InputStream in, long len) throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
*
|
||||
* @param log the log to record problems to at debug level. Can be null.
|
||||
* @param closeables the objects to close
|
||||
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
|
||||
* instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void cleanup(Log log, java.io.Closeable... closeables) {
|
||||
for (java.io.Closeable c : closeables) {
|
||||
if (c != null) {
|
||||
try {
|
||||
c.close();
|
||||
} catch(Throwable e) {
|
||||
if (log != null && log.isDebugEnabled()) {
|
||||
log.debug("Exception in closing " + c, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
|
@ -34,10 +34,6 @@
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Jdk14Logger;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
@ -51,6 +47,8 @@
|
||||
import org.apache.hadoop.util.ServletUtil;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
/**
|
||||
* Change log level in runtime.
|
||||
@ -340,22 +338,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
|
||||
out.println(MARKER
|
||||
+ "Submitted Class Name: <b>" + logName + "</b><br />");
|
||||
|
||||
Log log = LogFactory.getLog(logName);
|
||||
Logger log = Logger.getLogger(logName);
|
||||
out.println(MARKER
|
||||
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
|
||||
if (level != null) {
|
||||
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
|
||||
}
|
||||
|
||||
if (log instanceof Log4JLogger) {
|
||||
process(((Log4JLogger)log).getLogger(), level, out);
|
||||
}
|
||||
else if (log instanceof Jdk14Logger) {
|
||||
process(((Jdk14Logger)log).getLogger(), level, out);
|
||||
}
|
||||
else {
|
||||
out.println("Sorry, " + log.getClass() + " not supported.<br />");
|
||||
}
|
||||
process(log, level, out);
|
||||
}
|
||||
|
||||
out.println(FORMS);
|
||||
@ -371,14 +361,14 @@ else if (log instanceof Jdk14Logger) {
|
||||
+ "<input type='submit' value='Set Log Level' />"
|
||||
+ "</form>";
|
||||
|
||||
private static void process(org.apache.log4j.Logger log, String level,
|
||||
private static void process(Logger log, String level,
|
||||
PrintWriter out) throws IOException {
|
||||
if (level != null) {
|
||||
if (!level.equalsIgnoreCase(org.apache.log4j.Level.toLevel(level)
|
||||
if (!level.equalsIgnoreCase(Level.toLevel(level)
|
||||
.toString())) {
|
||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
||||
} else {
|
||||
log.setLevel(org.apache.log4j.Level.toLevel(level));
|
||||
log.setLevel(Level.toLevel(level));
|
||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
||||
}
|
||||
}
|
||||
@ -386,21 +376,5 @@ private static void process(org.apache.log4j.Logger log, String level,
|
||||
+ "Effective Level: <b>" + log.getEffectiveLevel() + "</b><br />");
|
||||
}
|
||||
|
||||
private static void process(java.util.logging.Logger log, String level,
|
||||
PrintWriter out) throws IOException {
|
||||
if (level != null) {
|
||||
String levelToUpperCase = level.toUpperCase();
|
||||
try {
|
||||
log.setLevel(java.util.logging.Level.parse(levelToUpperCase));
|
||||
} catch (IllegalArgumentException e) {
|
||||
out.println(MARKER + "Bad Level : <b>" + level + "</b><br />");
|
||||
}
|
||||
out.println(MARKER + "Setting Level to " + level + " ...<br />");
|
||||
}
|
||||
|
||||
java.util.logging.Level lev;
|
||||
for(; (lev = log.getLevel()) == null; log = log.getParent());
|
||||
out.println(MARKER + "Effective Level: <b>" + lev + "</b><br />");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Evolving;
|
||||
import org.slf4j.Logger;
|
||||
@ -75,9 +74,10 @@ public static Exception stopQuietly(Service service) {
|
||||
* @param log the log to warn at
|
||||
* @param service a service; may be null
|
||||
* @return any exception that was caught; null if none was.
|
||||
* @see ServiceOperations#stopQuietly(Service)
|
||||
* @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead.
|
||||
*/
|
||||
public static Exception stopQuietly(Log log, Service service) {
|
||||
@Deprecated
|
||||
public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) {
|
||||
try {
|
||||
stop(service);
|
||||
} catch (Exception e) {
|
||||
|
@ -1,78 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
class LogAdapter {
|
||||
private Log LOG;
|
||||
private Logger LOGGER;
|
||||
|
||||
private LogAdapter(Log LOG) {
|
||||
this.LOG = LOG;
|
||||
}
|
||||
|
||||
private LogAdapter(Logger LOGGER) {
|
||||
this.LOGGER = LOGGER;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #create(Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static LogAdapter create(Log LOG) {
|
||||
return new LogAdapter(LOG);
|
||||
}
|
||||
|
||||
public static LogAdapter create(Logger LOGGER) {
|
||||
return new LogAdapter(LOGGER);
|
||||
}
|
||||
|
||||
public void info(String msg) {
|
||||
if (LOG != null) {
|
||||
LOG.info(msg);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.info(msg);
|
||||
}
|
||||
}
|
||||
|
||||
public void warn(String msg, Throwable t) {
|
||||
if (LOG != null) {
|
||||
LOG.warn(msg, t);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.warn(msg, t);
|
||||
}
|
||||
}
|
||||
|
||||
public void debug(Throwable t) {
|
||||
if (LOG != null) {
|
||||
LOG.debug(t);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.debug("", t);
|
||||
}
|
||||
}
|
||||
|
||||
public void error(String msg) {
|
||||
if (LOG != null) {
|
||||
LOG.error(msg);
|
||||
} else if (LOGGER != null) {
|
||||
LOGGER.error(msg);
|
||||
}
|
||||
}
|
||||
}
|
@ -36,7 +36,6 @@
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
@ -222,16 +221,18 @@ public synchronized static void printThreadInfo(PrintStream stream,
|
||||
}
|
||||
|
||||
private static long previousLogTime = 0;
|
||||
|
||||
|
||||
/**
|
||||
* Log the current thread stacks at INFO level.
|
||||
* @param log the logger that logs the stack trace
|
||||
* @param title a descriptive title for the call stacks
|
||||
* @param minInterval the minimum time from the last
|
||||
* @param minInterval the minimum time from the last
|
||||
* @deprecated to be removed with 3.4.0. Use {@link #logThreadInfo(Logger, String, long)} instead.
|
||||
*/
|
||||
public static void logThreadInfo(Log log,
|
||||
String title,
|
||||
long minInterval) {
|
||||
@Deprecated
|
||||
public static void logThreadInfo(org.apache.commons.logging.Log log,
|
||||
String title,
|
||||
long minInterval) {
|
||||
boolean dumpStack = false;
|
||||
if (log.isInfoEnabled()) {
|
||||
synchronized (ReflectionUtils.class) {
|
||||
|
@ -18,10 +18,10 @@
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import sun.misc.Signal;
|
||||
import sun.misc.SignalHandler;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
@ -42,11 +42,11 @@ public enum SignalLogger {
|
||||
* Our signal handler.
|
||||
*/
|
||||
private static class Handler implements SignalHandler {
|
||||
final private LogAdapter LOG;
|
||||
final private Logger log;
|
||||
final private SignalHandler prevHandler;
|
||||
|
||||
Handler(String name, LogAdapter LOG) {
|
||||
this.LOG = LOG;
|
||||
Handler(String name, Logger log) {
|
||||
this.log = log;
|
||||
prevHandler = Signal.handle(new Signal(name), this);
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ private static class Handler implements SignalHandler {
|
||||
*/
|
||||
@Override
|
||||
public void handle(Signal signal) {
|
||||
LOG.error("RECEIVED SIGNAL " + signal.getNumber() +
|
||||
log.error("RECEIVED SIGNAL " + signal.getNumber() +
|
||||
": SIG" + signal.getName());
|
||||
prevHandler.handle(signal);
|
||||
}
|
||||
@ -66,13 +66,9 @@ public void handle(Signal signal) {
|
||||
/**
|
||||
* Register some signal handlers.
|
||||
*
|
||||
* @param LOG The log4j logfile to use in the signal handlers.
|
||||
* @param log The log4j logfile to use in the signal handlers.
|
||||
*/
|
||||
public void register(final Log LOG) {
|
||||
register(LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
void register(final LogAdapter LOG) {
|
||||
public void register(final Logger log) {
|
||||
if (registered) {
|
||||
throw new IllegalStateException("Can't re-install the signal handlers.");
|
||||
}
|
||||
@ -83,15 +79,15 @@ void register(final LogAdapter LOG) {
|
||||
String separator = "";
|
||||
for (String signalName : SIGNALS) {
|
||||
try {
|
||||
new Handler(signalName, LOG);
|
||||
new Handler(signalName, log);
|
||||
bld.append(separator)
|
||||
.append(signalName);
|
||||
separator = ", ";
|
||||
} catch (Exception e) {
|
||||
LOG.debug(e);
|
||||
log.debug("Error: ", e);
|
||||
}
|
||||
}
|
||||
bld.append("]");
|
||||
LOG.info(bld.toString());
|
||||
log.info(bld.toString());
|
||||
}
|
||||
}
|
||||
|
@ -740,42 +740,26 @@ public static String toStartupShutdownString(String prefix, String[] msg) {
|
||||
* Print a log message for starting up and shutting down
|
||||
* @param clazz the class of the server
|
||||
* @param args arguments
|
||||
* @param LOG the target log object
|
||||
* @param log the target log object
|
||||
*/
|
||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final org.apache.commons.logging.Log LOG) {
|
||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
/**
|
||||
* Print a log message for starting up and shutting down
|
||||
* @param clazz the class of the server
|
||||
* @param args arguments
|
||||
* @param LOG the target log object
|
||||
*/
|
||||
public static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final org.slf4j.Logger LOG) {
|
||||
startupShutdownMessage(clazz, args, LogAdapter.create(LOG));
|
||||
}
|
||||
|
||||
static void startupShutdownMessage(Class<?> clazz, String[] args,
|
||||
final LogAdapter LOG) {
|
||||
final org.slf4j.Logger log) {
|
||||
final String hostname = NetUtils.getHostname();
|
||||
final String classname = clazz.getSimpleName();
|
||||
LOG.info(createStartupShutdownMessage(classname, hostname, args));
|
||||
log.info(createStartupShutdownMessage(classname, hostname, args));
|
||||
|
||||
if (SystemUtils.IS_OS_UNIX) {
|
||||
try {
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
SignalLogger.INSTANCE.register(log);
|
||||
} catch (Throwable t) {
|
||||
LOG.warn("failed to register any UNIX signal loggers: ", t);
|
||||
log.warn("failed to register any UNIX signal loggers: ", t);
|
||||
}
|
||||
}
|
||||
ShutdownHookManager.get().addShutdownHook(
|
||||
new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
||||
log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
|
||||
"Shutting down " + classname + " at " + hostname}));
|
||||
LogManager.shutdown();
|
||||
}
|
||||
|
@ -25,8 +25,6 @@
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -39,7 +37,8 @@
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -51,8 +50,8 @@
|
||||
*/
|
||||
|
||||
public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestViewFileSystemLocalFileSystem.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class);
|
||||
|
||||
@Override
|
||||
@Before
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -35,6 +33,8 @@
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -43,8 +43,8 @@
|
||||
*/
|
||||
public class TestViewFileSystemOverloadSchemeLocalFileSystem {
|
||||
private static final String FILE = "file";
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class);
|
||||
private FileSystem fsTarget;
|
||||
private Configuration conf;
|
||||
private Path targetTestRoot;
|
||||
|
@ -17,8 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.http;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
@ -53,8 +51,6 @@
|
||||
*/
|
||||
public class TestHttpServerWithSpnego {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class);
|
||||
|
||||
private static final String SECRET_STR = "secret";
|
||||
private static final String HTTP_USER = "HTTP";
|
||||
private static final String PREFIX = "hadoop.http.authentication.";
|
||||
|
@ -22,8 +22,6 @@
|
||||
import com.fasterxml.jackson.databind.node.ContainerNode;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.log4j.Appender;
|
||||
import org.apache.log4j.Category;
|
||||
@ -44,8 +42,6 @@
|
||||
|
||||
public class TestLog4Json {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
|
||||
|
||||
@Test
|
||||
public void testConstruction() throws Throwable {
|
||||
Log4Json l4j = new Log4Json();
|
||||
|
@ -22,9 +22,6 @@
|
||||
import java.net.URI;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
|
||||
private final String logName = TestLogLevel.class.getName();
|
||||
private String clientPrincipal;
|
||||
private String serverPrincipal;
|
||||
private final Log testlog = LogFactory.getLog(logName);
|
||||
private final Logger log = ((Log4JLogger)testlog).getLogger();
|
||||
private final Logger log = Logger.getLogger(logName);
|
||||
private final static String PRINCIPAL = "loglevel.principal";
|
||||
private final static String KEYTAB = "loglevel.keytab";
|
||||
private static final String PREFIX = "hadoop.http.authentication.";
|
||||
|
@ -49,8 +49,6 @@
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -117,29 +115,11 @@ public abstract class GenericTestUtils {
|
||||
public static final String ERROR_INVALID_ARGUMENT =
|
||||
"Total wait time should be greater than check interval time";
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void disableLog(Log log) {
|
||||
// We expect that commons-logging is a wrapper around Log4j.
|
||||
disableLog((Log4JLogger) log);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public static Logger toLog4j(org.slf4j.Logger logger) {
|
||||
return LogManager.getLogger(logger.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void disableLog(Log4JLogger log) {
|
||||
log.getLogger().setLevel(Level.OFF);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
|
||||
*/
|
||||
@ -152,45 +132,6 @@ public static void disableLog(org.slf4j.Logger logger) {
|
||||
disableLog(toLog4j(logger));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void setLogLevel(Log log, Level level) {
|
||||
// We expect that commons-logging is a wrapper around Log4j.
|
||||
setLogLevel((Log4JLogger) log, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper used in log4j2 migration to accept legacy
|
||||
* org.apache.commons.logging apis.
|
||||
* <p>
|
||||
* And will be removed after migration.
|
||||
*
|
||||
* @param log a log
|
||||
* @param level level to be set
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Log log, org.slf4j.event.Level level) {
|
||||
setLogLevel(log, Level.toLevel(level.toString()));
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Log4JLogger log, Level level) {
|
||||
log.getLogger().setLevel(level);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setLogLevel(Logger logger, Level level) {
|
||||
logger.setLevel(level);
|
||||
}
|
||||
@ -535,13 +476,15 @@ public static class LogCapturer {
|
||||
private WriterAppender appender;
|
||||
private Logger logger;
|
||||
|
||||
public static LogCapturer captureLogs(Log l) {
|
||||
Logger logger = ((Log4JLogger)l).getLogger();
|
||||
return new LogCapturer(logger);
|
||||
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
||||
if (logger.getName().equals("root")) {
|
||||
return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
|
||||
}
|
||||
return new LogCapturer(toLog4j(logger));
|
||||
}
|
||||
|
||||
public static LogCapturer captureLogs(org.slf4j.Logger logger) {
|
||||
return new LogCapturer(toLog4j(logger));
|
||||
public static LogCapturer captureLogs(Logger logger) {
|
||||
return new LogCapturer(logger);
|
||||
}
|
||||
|
||||
private LogCapturer(Logger logger) {
|
||||
|
@ -18,10 +18,10 @@
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
@ -43,7 +43,7 @@ public class TestJarFinder {
|
||||
public void testJar() throws Exception {
|
||||
|
||||
//picking a class that is for sure in a JAR in the classpath
|
||||
String jar = JarFinder.getJar(LogFactory.class);
|
||||
String jar = JarFinder.getJar(LoggerFactory.class);
|
||||
Assert.assertTrue(new File(jar).exists());
|
||||
}
|
||||
|
||||
|
@ -32,9 +32,9 @@ public class TestSignalLogger {
|
||||
@Test(timeout=60000)
|
||||
public void testInstall() throws Exception {
|
||||
Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
|
||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
try {
|
||||
SignalLogger.INSTANCE.register(LogAdapter.create(LOG));
|
||||
SignalLogger.INSTANCE.register(LOG);
|
||||
Assert.fail("expected IllegalStateException from double registration");
|
||||
} catch (IllegalStateException e) {
|
||||
// fall through
|
||||
|
@ -63,11 +63,6 @@
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
|
@ -61,10 +61,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
@ -133,11 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>commons-io</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-daemon</groupId>
|
||||
<artifactId>commons-daemon</artifactId>
|
||||
|
@ -49,10 +49,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<scope>provided</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
@ -2054,7 +2054,7 @@ private DFSClient getFileDFSClient(final String path) {
|
||||
@Test
|
||||
public void testMkdirsWithCallerContext() throws IOException {
|
||||
GenericTestUtils.LogCapturer auditlog =
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
|
||||
// Current callerContext is null
|
||||
assertNull(CallerContext.getCurrent());
|
||||
@ -2092,7 +2092,7 @@ public void testSetBalancerBandwidth() throws Exception {
|
||||
@Test
|
||||
public void testAddClientIpPortToCallerContext() throws IOException {
|
||||
GenericTestUtils.LogCapturer auditLog =
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
|
||||
// 1. ClientIp and ClientPort are not set on the client.
|
||||
// Set client context.
|
||||
@ -2127,7 +2127,7 @@ public void testAddClientIpPortToCallerContext() throws IOException {
|
||||
@Test
|
||||
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
|
||||
GenericTestUtils.LogCapturer auditLog =
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
|
||||
// 1. ClientId and ClientCallId are not set on the client.
|
||||
// Set client context.
|
||||
|
@ -440,7 +440,7 @@ public void testSubclusterDown() throws Exception {
|
||||
@Test
|
||||
public void testCallerContextWithMultiDestinations() throws IOException {
|
||||
GenericTestUtils.LogCapturer auditLog =
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
|
||||
// set client context
|
||||
CallerContext.setCurrent(
|
||||
|
@ -117,11 +117,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<artifactId>commons-io</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-daemon</groupId>
|
||||
<artifactId>commons-daemon</artifactId>
|
||||
|
@ -31,8 +31,6 @@
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
@ -58,13 +56,12 @@ public class MetricsLoggerTask implements Runnable {
|
||||
}
|
||||
}
|
||||
|
||||
private Log metricsLog;
|
||||
private org.apache.log4j.Logger metricsLog;
|
||||
private String nodeName;
|
||||
private short maxLogLineLength;
|
||||
|
||||
public MetricsLoggerTask(Log metricsLog, String nodeName,
|
||||
short maxLogLineLength) {
|
||||
this.metricsLog = metricsLog;
|
||||
public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) {
|
||||
this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog);
|
||||
this.nodeName = nodeName;
|
||||
this.maxLogLineLength = maxLogLineLength;
|
||||
}
|
||||
@ -118,13 +115,8 @@ private String trimLine(String valueStr) {
|
||||
.substring(0, maxLogLineLength) + "...");
|
||||
}
|
||||
|
||||
private static boolean hasAppenders(Log logger) {
|
||||
if (!(logger instanceof Log4JLogger)) {
|
||||
// Don't bother trying to determine the presence of appenders.
|
||||
return true;
|
||||
}
|
||||
Log4JLogger log4JLogger = ((Log4JLogger) logger);
|
||||
return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
|
||||
private static boolean hasAppenders(org.apache.log4j.Logger logger) {
|
||||
return logger.getAllAppenders().hasMoreElements();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,13 +142,8 @@ private static Set<String> getFilteredAttributes(MBeanInfo mBeanInfo) {
|
||||
* Make the metrics logger async and add all pre-existing appenders to the
|
||||
* async appender.
|
||||
*/
|
||||
public static void makeMetricsLoggerAsync(Log metricsLog) {
|
||||
if (!(metricsLog instanceof Log4JLogger)) {
|
||||
LOG.warn("Metrics logging will not be async since "
|
||||
+ "the logger is not log4j");
|
||||
return;
|
||||
}
|
||||
org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
|
||||
public static void makeMetricsLoggerAsync(String metricsLog) {
|
||||
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog);
|
||||
logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -35,7 +35,6 @@
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.FSOutputSummer;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
@ -73,7 +72,7 @@
|
||||
**/
|
||||
class BlockReceiver implements Closeable {
|
||||
public static final Logger LOG = DataNode.LOG;
|
||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
||||
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||
|
||||
@VisibleForTesting
|
||||
static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
|
||||
@ -1398,7 +1397,7 @@ public void close() {
|
||||
public void run() {
|
||||
datanode.metrics.incrDataNodePacketResponderCount();
|
||||
boolean lastPacketInBlock = false;
|
||||
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
|
||||
final long startTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
|
||||
while (isRunning() && !lastPacketInBlock) {
|
||||
long totalAckTimeNanos = 0;
|
||||
boolean isInterrupted = false;
|
||||
@ -1553,7 +1552,7 @@ private void finalizeBlock(long startTime) throws IOException {
|
||||
// Hold a volume reference to finalize block.
|
||||
try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
|
||||
BlockReceiver.this.close();
|
||||
endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
|
||||
endTime = CLIENT_TRACE_LOG.isInfoEnabled() ? System.nanoTime() : 0;
|
||||
block.setNumBytes(replicaInfo.getNumBytes());
|
||||
datanode.data.finalizeBlock(block, dirSyncOnFinalize);
|
||||
}
|
||||
@ -1564,11 +1563,11 @@ private void finalizeBlock(long startTime) throws IOException {
|
||||
|
||||
datanode.closeBlock(block, null, replicaInfo.getStorageUuid(),
|
||||
replicaInfo.isOnTransientStorage());
|
||||
if (ClientTraceLog.isInfoEnabled() && isClient) {
|
||||
if (CLIENT_TRACE_LOG.isInfoEnabled() && isClient) {
|
||||
long offset = 0;
|
||||
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
|
||||
.getBlockPoolId());
|
||||
ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
|
||||
CLIENT_TRACE_LOG.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
|
||||
myAddr, replicaInfo.getVolume(), block.getNumBytes(),
|
||||
"HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(),
|
||||
block, endTime - startTime));
|
||||
|
@ -32,7 +32,6 @@
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
@ -103,7 +102,7 @@
|
||||
*/
|
||||
class BlockSender implements java.io.Closeable {
|
||||
static final Logger LOG = DataNode.LOG;
|
||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
||||
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||
private static final boolean is32Bit =
|
||||
System.getProperty("sun.arch.data.model").equals("32");
|
||||
/**
|
||||
@ -784,7 +783,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream,
|
||||
// Trigger readahead of beginning of file if configured.
|
||||
manageOsCache();
|
||||
|
||||
final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
|
||||
final long startTime = CLIENT_TRACE_LOG.isDebugEnabled() ? System.nanoTime() : 0;
|
||||
try {
|
||||
int maxChunksPerPacket;
|
||||
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
|
||||
@ -831,9 +830,9 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream,
|
||||
sentEntireByteRange = true;
|
||||
}
|
||||
} finally {
|
||||
if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
|
||||
if ((clientTraceFmt != null) && CLIENT_TRACE_LOG.isDebugEnabled()) {
|
||||
final long endTime = System.nanoTime();
|
||||
ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
|
||||
CLIENT_TRACE_LOG.debug(String.format(clientTraceFmt, totalRead,
|
||||
initialOffset, endTime - startTime));
|
||||
}
|
||||
close();
|
||||
|
@ -140,8 +140,6 @@
|
||||
import javax.management.ObjectName;
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -318,9 +316,9 @@ public class DataNode extends ReconfigurableBase
|
||||
", srvID: %s" + // DatanodeRegistration
|
||||
", blockid: %s" + // block id
|
||||
", duration(ns): %s"; // duration time
|
||||
|
||||
static final Log ClientTraceLog =
|
||||
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
|
||||
|
||||
static final Logger CLIENT_TRACE_LOG =
|
||||
LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace");
|
||||
|
||||
private static final String USAGE =
|
||||
"Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
|
||||
@ -360,7 +358,7 @@ public class DataNode extends ReconfigurableBase
|
||||
FS_GETSPACEUSED_JITTER_KEY,
|
||||
FS_GETSPACEUSED_CLASSNAME));
|
||||
|
||||
public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
|
||||
public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
|
||||
|
||||
private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
|
||||
private final FileIoProvider fileIoProvider;
|
||||
@ -4060,12 +4058,12 @@ protected void startMetricsLogger() {
|
||||
return;
|
||||
}
|
||||
|
||||
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG);
|
||||
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
|
||||
|
||||
// Schedule the periodic logging.
|
||||
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
||||
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG,
|
||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
|
||||
"DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
|
||||
TimeUnit.SECONDS);
|
||||
}
|
||||
|
@ -18,8 +18,6 @@
|
||||
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
|
||||
@ -29,11 +27,14 @@
|
||||
import java.util.Stack;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Class for maintain a set of lock for fsDataSetImpl.
|
||||
*/
|
||||
public class DataSetLockManager implements DataNodeLockManager<AutoCloseDataSetLock> {
|
||||
public static final Log LOG = LogFactory.getLog(DataSetLockManager.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(DataSetLockManager.class);
|
||||
private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
|
||||
private final LockMap lockMap = new LockMap();
|
||||
private boolean isFair = true;
|
||||
|
@ -21,7 +21,6 @@
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
||||
import javax.crypto.SecretKey;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
@ -105,7 +104,7 @@
|
||||
*/
|
||||
class DataXceiver extends Receiver implements Runnable {
|
||||
public static final Logger LOG = DataNode.LOG;
|
||||
static final Log ClientTraceLog = DataNode.ClientTraceLog;
|
||||
static final Logger CLIENT_TRACE_LOG = DataNode.CLIENT_TRACE_LOG;
|
||||
|
||||
private Peer peer;
|
||||
private final String remoteAddress; // address of remote side
|
||||
@ -426,10 +425,10 @@ public void requestShortCircuitFds(final ExtendedBlock blk,
|
||||
registeredSlotId);
|
||||
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
|
||||
}
|
||||
if (ClientTraceLog.isInfoEnabled()) {
|
||||
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
|
||||
.getBlockPoolId());
|
||||
BlockSender.ClientTraceLog.info(String.format(
|
||||
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
|
||||
" blockid: %s, srvID: %s, success: %b",
|
||||
blk.getBlockId(), dnR.getDatanodeUuid(), success));
|
||||
@ -466,8 +465,8 @@ public void releaseShortCircuitFds(SlotId slotId) throws IOException {
|
||||
bld.build().writeDelimitedTo(socketOut);
|
||||
success = true;
|
||||
} finally {
|
||||
if (ClientTraceLog.isInfoEnabled()) {
|
||||
BlockSender.ClientTraceLog.info(String.format(
|
||||
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
|
||||
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
|
||||
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
|
||||
@ -526,9 +525,9 @@ public void requestShortCircuitShm(String clientName) throws IOException {
|
||||
sendShmSuccessResponse(sock, shmInfo);
|
||||
success = true;
|
||||
} finally {
|
||||
if (ClientTraceLog.isInfoEnabled()) {
|
||||
if (CLIENT_TRACE_LOG.isInfoEnabled()) {
|
||||
if (success) {
|
||||
BlockSender.ClientTraceLog.info(String.format(
|
||||
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
||||
"op: REQUEST_SHORT_CIRCUIT_SHM," +
|
||||
" shmId: %016x%016x, srvID: %s, success: true",
|
||||
@ -536,7 +535,7 @@ public void requestShortCircuitShm(String clientName) throws IOException {
|
||||
shmInfo.getShmId().getLo(),
|
||||
datanode.getDatanodeUuid()));
|
||||
} else {
|
||||
BlockSender.ClientTraceLog.info(String.format(
|
||||
BlockSender.CLIENT_TRACE_LOG.info(String.format(
|
||||
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
|
||||
"op: REQUEST_SHORT_CIRCUIT_SHM, " +
|
||||
"shmId: n/a, srvID: %s, success: false",
|
||||
@ -587,13 +586,10 @@ public void readBlock(final ExtendedBlock block,
|
||||
BlockSender blockSender = null;
|
||||
DatanodeRegistration dnR =
|
||||
datanode.getDNRegistrationForBP(block.getBlockPoolId());
|
||||
final String clientTraceFmt =
|
||||
clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
|
||||
? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
|
||||
"", "%d", "HDFS_READ", clientName, "%d",
|
||||
dnR.getDatanodeUuid(), block, "%d")
|
||||
: dnR + " Served block " + block + " to " +
|
||||
remoteAddress;
|
||||
final String clientTraceFmt = clientName.length() > 0 && CLIENT_TRACE_LOG.isInfoEnabled() ?
|
||||
String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "", "%d", "HDFS_READ",
|
||||
clientName, "%d", dnR.getDatanodeUuid(), block, "%d") :
|
||||
dnR + " Served block " + block + " to " + remoteAddress;
|
||||
|
||||
try {
|
||||
try {
|
||||
|
@ -185,9 +185,6 @@
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.StandardMBean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -405,7 +402,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
||||
private final String contextFieldSeparator;
|
||||
|
||||
boolean isAuditEnabled() {
|
||||
return (!isDefaultAuditLogger || auditLog.isInfoEnabled())
|
||||
return (!isDefaultAuditLogger || AUDIT_LOG.isInfoEnabled())
|
||||
&& !auditLoggers.isEmpty();
|
||||
}
|
||||
|
||||
@ -491,8 +488,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){
|
||||
* perm=<permissions (optional)>
|
||||
* </code>
|
||||
*/
|
||||
public static final Log auditLog = LogFactory.getLog(
|
||||
FSNamesystem.class.getName() + ".audit");
|
||||
public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit");
|
||||
|
||||
private final int maxCorruptFileBlocksReturn;
|
||||
private final boolean isPermissionEnabled;
|
||||
@ -8783,8 +8779,8 @@ public void logAuditEvent(boolean succeeded, String userName,
|
||||
FileStatus status, CallerContext callerContext, UserGroupInformation ugi,
|
||||
DelegationTokenSecretManager dtSecretManager) {
|
||||
|
||||
if (auditLog.isDebugEnabled() ||
|
||||
(auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
|
||||
if (AUDIT_LOG.isDebugEnabled() ||
|
||||
(AUDIT_LOG.isInfoEnabled() && !debugCmdSet.contains(cmd))) {
|
||||
final StringBuilder sb = STRING_BUILDER.get();
|
||||
src = escapeJava(src);
|
||||
dst = escapeJava(dst);
|
||||
@ -8853,16 +8849,12 @@ public void logAuditEvent(boolean succeeded, String userName,
|
||||
}
|
||||
|
||||
public void logAuditMessage(String message) {
|
||||
auditLog.info(message);
|
||||
AUDIT_LOG.info(message);
|
||||
}
|
||||
}
|
||||
|
||||
private static void enableAsyncAuditLog(Configuration conf) {
|
||||
if (!(auditLog instanceof Log4JLogger)) {
|
||||
LOG.warn("Log4j is required to enable async auditlog");
|
||||
return;
|
||||
}
|
||||
Logger logger = ((Log4JLogger)auditLog).getLogger();
|
||||
Logger logger = AUDIT_LOG;
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
// failsafe against trying to async it more than once
|
||||
|
@ -17,9 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
@ -125,15 +122,10 @@ static String memoryInfo() {
|
||||
}
|
||||
|
||||
static void setLogLevel(Class<?> clazz, Level level) {
|
||||
final Log log = LogFactory.getLog(clazz);
|
||||
if (log instanceof Log4JLogger) {
|
||||
final org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
|
||||
logger.setLevel(level);
|
||||
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}",
|
||||
clazz.getName(), level, logger.getEffectiveLevel());
|
||||
} else {
|
||||
LOG.warn("Failed setLogLevel {} to {}", clazz.getName(), level);
|
||||
}
|
||||
final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
|
||||
logger.setLevel(level);
|
||||
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
|
||||
logger.getEffectiveLevel());
|
||||
}
|
||||
|
||||
static String toCommaSeparatedNumber(long n) {
|
||||
|
@ -25,8 +25,6 @@
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -427,8 +425,7 @@ public long getProtocolVersion(String protocol,
|
||||
|
||||
private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
|
||||
|
||||
public static final Log MetricsLog =
|
||||
LogFactory.getLog("NameNodeMetricsLog");
|
||||
public static final String METRICS_LOG_NAME = "NameNodeMetricsLog";
|
||||
|
||||
protected FSNamesystem namesystem;
|
||||
protected final NamenodeRole role;
|
||||
@ -949,13 +946,13 @@ protected void startMetricsLogger(Configuration conf) {
|
||||
return;
|
||||
}
|
||||
|
||||
MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog);
|
||||
MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
|
||||
|
||||
// Schedule the periodic logging.
|
||||
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
|
||||
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
|
||||
false);
|
||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog,
|
||||
metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG_NAME,
|
||||
"NameNode", (short) 128),
|
||||
metricsLoggerPeriodSec,
|
||||
metricsLoggerPeriodSec,
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumSet;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -41,6 +39,8 @@
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||
import static org.junit.Assert.*;
|
||||
@ -52,7 +52,7 @@
|
||||
*/
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
|
||||
public static final Log LOG = LogFactory.getLog(TestPermission.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);
|
||||
|
||||
private HdfsConfiguration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -190,7 +190,7 @@ public void testRename2Options() throws Exception {
|
||||
Path path = new Path("/test");
|
||||
dfs.mkdirs(path);
|
||||
GenericTestUtils.LogCapturer auditLog =
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
dfs.rename(path, new Path("/dir1"),
|
||||
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
|
||||
String auditOut = auditLog.getOutput();
|
||||
|
@ -17,8 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -29,6 +27,8 @@
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -47,7 +47,7 @@
|
||||
* Tests append on erasure coded file.
|
||||
*/
|
||||
public class TestStripedFileAppend {
|
||||
public static final Log LOG = LogFactory.getLog(TestStripedFileAppend.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(TestStripedFileAppend.class);
|
||||
|
||||
static {
|
||||
DFSTestUtil.setNameNodeLogLevel(Level.TRACE);
|
||||
|
@ -32,8 +32,6 @@
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -135,8 +133,7 @@ public void testDisableMetricsLogger() throws IOException {
|
||||
public void testMetricsLoggerIsAsync() throws IOException {
|
||||
startDNForTest(true);
|
||||
assertNotNull(dn);
|
||||
org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG)
|
||||
.getLogger();
|
||||
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
assertTrue(appenders.get(0) instanceof AsyncAppender);
|
||||
@ -156,7 +153,7 @@ public void testMetricsLogOutput() throws IOException, InterruptedException,
|
||||
assertNotNull(dn);
|
||||
final PatternMatchingAppender appender = new PatternMatchingAppender(
|
||||
"^.*FakeMetric.*$");
|
||||
addAppender(DataNode.METRICS_LOG, appender);
|
||||
addAppender(org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME), appender);
|
||||
|
||||
// Ensure that the supplied pattern was matched.
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@ -169,8 +166,7 @@ public Boolean get() {
|
||||
dn.shutdown();
|
||||
}
|
||||
|
||||
private void addAppender(Log log, Appender appender) {
|
||||
org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
|
||||
private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
((AsyncAppender) appenders.get(0)).addAppender(appender);
|
||||
|
@ -26,10 +26,11 @@
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.Timeout;
|
||||
import org.slf4j.event.Level;
|
||||
|
||||
import java.net.Inet4Address;
|
||||
import java.util.Arrays;
|
||||
@ -61,7 +62,7 @@ private DefaultAuditLogger makeSpyLogger(
|
||||
Joiner.on(",").join(debugCommands.get()));
|
||||
}
|
||||
logger.initialize(conf);
|
||||
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, level);
|
||||
GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, level);
|
||||
return spy(logger);
|
||||
}
|
||||
|
||||
|
@ -258,7 +258,7 @@ public void testAuditLoggerWithCallContext() throws IOException {
|
||||
conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
|
||||
|
||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
|
||||
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
cluster.waitClusterUp();
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
final long time = System.currentTimeMillis();
|
||||
@ -568,7 +568,7 @@ public void testAuditLogWithRemotePort() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster1 = new MiniDFSCluster.Builder(conf).build();
|
||||
try {
|
||||
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
cluster1.waitClusterUp();
|
||||
FileSystem fs = cluster1.getFileSystem();
|
||||
long time = System.currentTimeMillis();
|
||||
@ -585,7 +585,7 @@ public void testAuditLogWithRemotePort() throws Exception {
|
||||
conf.setBoolean(HADOOP_CALLER_CONTEXT_ENABLED_KEY, true);
|
||||
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
|
||||
try {
|
||||
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
LogCapturer auditLog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
cluster2.waitClusterUp();
|
||||
FileSystem fs = cluster2.getFileSystem();
|
||||
long time = System.currentTimeMillis();
|
||||
@ -606,7 +606,7 @@ public void testCallerContextCharacterEscape() throws IOException {
|
||||
conf.setInt(HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY, 40);
|
||||
|
||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
|
||||
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
LogCapturer auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
cluster.waitClusterUp();
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
final long time = System.currentTimeMillis();
|
||||
|
@ -93,7 +93,7 @@ public void initialize() throws Exception {
|
||||
user2 =
|
||||
UserGroupInformation.createUserForTesting("theEngineer",
|
||||
new String[]{"hadoop"});
|
||||
auditlog = LogCapturer.captureLogs(FSNamesystem.auditLog);
|
||||
auditlog = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
|
||||
proto = cluster.getNameNodeRpc();
|
||||
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
|
||||
fs2 = DFSTestUtil.getFileSystemAs(user2, conf);
|
||||
|
@ -32,7 +32,6 @@
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@ -130,7 +129,7 @@ public void setupCluster() throws Exception {
|
||||
util.createFiles(fs, fileName);
|
||||
|
||||
// make sure the appender is what it's supposed to be
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
assertEquals(1, appenders.size());
|
||||
@ -283,7 +282,7 @@ public void testAuditCharacterEscape() throws Exception {
|
||||
|
||||
/** Sets up log4j logger for auditlogs */
|
||||
private void setupAuditLogs() throws IOException {
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
// enable logging now that the test is ready to run
|
||||
logger.setLevel(Level.INFO);
|
||||
}
|
||||
@ -303,7 +302,7 @@ private void configureAuditLogs() throws IOException {
|
||||
disableAuditLog();
|
||||
PatternLayout layout = new PatternLayout("%m%n");
|
||||
RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
logger.addAppender(appender);
|
||||
}
|
||||
|
||||
@ -319,7 +318,7 @@ private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
|
||||
disableAuditLog();
|
||||
|
||||
// Close the appenders and force all logs to be flushed
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
Enumeration<?> appenders = logger.getAllAppenders();
|
||||
while (appenders.hasMoreElements()) {
|
||||
Appender appender = (Appender)appenders.nextElement();
|
||||
@ -352,7 +351,7 @@ private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Patte
|
||||
disableAuditLog();
|
||||
|
||||
// Close the appenders and force all logs to be flushed
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
Enumeration<?> appenders = logger.getAllAppenders();
|
||||
while (appenders.hasMoreElements()) {
|
||||
Appender appender = (Appender)appenders.nextElement();
|
||||
|
@ -61,7 +61,6 @@
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -252,7 +251,7 @@ private void setupAuditLogs() throws IOException {
|
||||
if (file.exists()) {
|
||||
file.delete();
|
||||
}
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
logger.removeAllAppenders();
|
||||
logger.setLevel(Level.INFO);
|
||||
PatternLayout layout = new PatternLayout("%m%n");
|
||||
@ -291,7 +290,7 @@ private void verifyAuditLogs() throws IOException {
|
||||
if (reader != null) {
|
||||
reader.close();
|
||||
}
|
||||
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
|
||||
Logger logger = FSNamesystem.AUDIT_LOG;
|
||||
if (logger != null) {
|
||||
logger.removeAllAppenders();
|
||||
}
|
||||
|
@ -19,8 +19,6 @@
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -70,8 +68,7 @@ public void testDisableMetricsLogger() throws IOException {
|
||||
@Test
|
||||
public void testMetricsLoggerIsAsync() throws IOException {
|
||||
makeNameNode(true);
|
||||
org.apache.log4j.Logger logger =
|
||||
((Log4JLogger) NameNode.MetricsLog).getLogger();
|
||||
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
assertTrue(appenders.get(0) instanceof AsyncAppender);
|
||||
@ -90,7 +87,7 @@ public void testMetricsLogOutput()
|
||||
makeNameNode(true); // Log metrics early and often.
|
||||
final PatternMatchingAppender appender =
|
||||
new PatternMatchingAppender("^.*FakeMetric42.*$");
|
||||
addAppender(NameNode.MetricsLog, appender);
|
||||
addAppender(org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME), appender);
|
||||
|
||||
// Ensure that the supplied pattern was matched.
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@ -118,8 +115,7 @@ private NameNode makeNameNode(boolean enableMetricsLogging)
|
||||
return new TestNameNode(conf);
|
||||
}
|
||||
|
||||
private void addAppender(Log log, Appender appender) {
|
||||
org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
|
||||
private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Appender> appenders = Collections.list(logger.getAllAppenders());
|
||||
((AsyncAppender) appenders.get(0)).addAppender(appender);
|
||||
|
@ -45,7 +45,7 @@
|
||||
*/
|
||||
public class TestDNFencingWithReplication {
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(FSNamesystem.auditLog, Level.WARN);
|
||||
GenericTestUtils.setLogLevel(FSNamesystem.AUDIT_LOG, org.apache.log4j.Level.WARN);
|
||||
GenericTestUtils.setLogLevel(Server.LOG, Level.ERROR);
|
||||
GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.ERROR);
|
||||
}
|
||||
|
@ -18,18 +18,18 @@
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.speculate.forecast;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.yarn.util.ControlledClock;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.yarn.util.ControlledClock;
|
||||
|
||||
/**
|
||||
* Testing the statistical model of simple exponential estimator.
|
||||
*/
|
||||
public class TestSimpleExponentialForecast {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestSimpleExponentialForecast.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestSimpleExponentialForecast.class);
|
||||
|
||||
private static long clockTicks = 1000L;
|
||||
private ControlledClock clock;
|
||||
|
@ -28,8 +28,6 @@
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -66,6 +64,8 @@
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Test speculation on Mini Cluster.
|
||||
@ -73,8 +73,7 @@
|
||||
@Ignore
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestSpeculativeExecOnCluster {
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(TestSpeculativeExecOnCluster.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(TestSpeculativeExecOnCluster.class);
|
||||
|
||||
private static final int NODE_MANAGERS_COUNT = 2;
|
||||
private static final boolean ENABLE_SPECULATIVE_MAP = true;
|
||||
|
@ -132,11 +132,6 @@
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-all</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop.thirdparty</groupId>
|
||||
<artifactId>hadoop-shaded-guava</artifactId>
|
||||
|
@ -38,10 +38,6 @@
|
||||
<groupId>commons-cli</groupId>
|
||||
<artifactId>commons-cli</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
||||
|
@ -121,7 +121,6 @@
|
||||
<commons-io.version>2.11.0</commons-io.version>
|
||||
<commons-lang3.version>3.12.0</commons-lang3.version>
|
||||
<commons-logging.version>1.1.3</commons-logging.version>
|
||||
<commons-logging-api.version>1.1</commons-logging-api.version>
|
||||
<commons-math3.version>3.6.1</commons-math3.version>
|
||||
<commons-net.version>3.9.0</commons-net.version>
|
||||
<commons-text.version>1.10.0</commons-text.version>
|
||||
@ -1094,11 +1093,6 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging-api</artifactId>
|
||||
<version>${commons-logging-api.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
@ -101,11 +101,6 @@
|
||||
<artifactId>commons-io</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-cli</groupId>
|
||||
<artifactId>commons-cli</artifactId>
|
||||
|
@ -30,8 +30,6 @@
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
|
||||
|
||||
@ -39,6 +37,8 @@
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.BlobRequestOptions;
|
||||
import com.microsoft.azure.storage.blob.PageRange;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* An input stream that reads file data from a page blob stored
|
||||
@ -46,7 +46,7 @@
|
||||
*/
|
||||
|
||||
final class PageBlobInputStream extends InputStream {
|
||||
private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(PageBlobInputStream.class);
|
||||
|
||||
// The blob we're reading from.
|
||||
private final CloudPageBlobWrapper blob;
|
||||
|
@ -39,8 +39,6 @@
|
||||
import org.apache.hadoop.fs.Syncable;
|
||||
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
|
||||
import org.apache.commons.lang3.exception.ExceptionUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
@ -48,7 +46,8 @@
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.BlobRequestOptions;
|
||||
import com.microsoft.azure.storage.blob.CloudPageBlob;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* An output stream that write file data to a page blob stored using ASV's
|
||||
@ -120,7 +119,7 @@ final class PageBlobOutputStream extends OutputStream implements Syncable, Strea
|
||||
// Whether the stream has been closed.
|
||||
private boolean closed = false;
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
|
||||
|
||||
// Set the minimum page blob file size to 128MB, which is >> the default
|
||||
// block size of 32MB. This default block size is often used as the
|
||||
|
@ -18,8 +18,6 @@
|
||||
|
||||
package org.apache.hadoop.fs.azure;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
@ -27,6 +25,8 @@
|
||||
import com.microsoft.azure.storage.AccessCondition;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.CloudBlob;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@ -58,7 +58,7 @@ public class SelfRenewingLease {
|
||||
|
||||
// Time to wait to renew lease in milliseconds
|
||||
public static final int LEASE_RENEWAL_PERIOD = 40000;
|
||||
private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SelfRenewingLease.class);
|
||||
|
||||
// Used to allocate thread serial numbers in thread name
|
||||
private static AtomicInteger threadNumber = new AtomicInteger(0);
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.net.HttpURLConnection;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
import com.microsoft.azure.storage.OperationContext;
|
||||
@ -30,6 +28,8 @@
|
||||
import com.microsoft.azure.storage.ResponseReceivedEvent;
|
||||
import com.microsoft.azure.storage.SendingRequestEvent;
|
||||
import com.microsoft.azure.storage.StorageEvent;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/*
|
||||
* Self throttling is implemented by hooking into send & response callbacks
|
||||
@ -63,8 +63,7 @@
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SelfThrottlingIntercept {
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(SelfThrottlingIntercept.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(SelfThrottlingIntercept.class);
|
||||
|
||||
private final float readFactor;
|
||||
private final float writeFactor;
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.net.HttpURLConnection;
|
||||
import java.security.InvalidKeyException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
import com.microsoft.azure.storage.Constants.HeaderConstants;
|
||||
@ -40,8 +38,6 @@
|
||||
@InterfaceAudience.Private
|
||||
public final class SendRequestIntercept extends StorageEvent<SendingRequestEvent> {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class);
|
||||
|
||||
private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*";
|
||||
|
||||
/**
|
||||
|
@ -20,8 +20,9 @@
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.ProviderUtils;
|
||||
@ -32,7 +33,7 @@
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SimpleKeyProvider implements KeyProvider {
|
||||
private static final Log LOG = LogFactory.getLog(SimpleKeyProvider.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SimpleKeyProvider.class);
|
||||
|
||||
protected static final String KEY_ACCOUNT_KEY_PREFIX =
|
||||
"fs.azure.account.key.";
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.util.ArrayList;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
@ -31,9 +29,7 @@
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class BandwidthGaugeUpdater {
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(BandwidthGaugeUpdater.class);
|
||||
|
||||
|
||||
public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater";
|
||||
|
||||
private static final int DEFAULT_WINDOW_SIZE_MS = 1000;
|
||||
|
@ -20,8 +20,6 @@
|
||||
|
||||
import java.net.HttpURLConnection;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
import com.microsoft.azure.storage.Constants.HeaderConstants;
|
||||
@ -38,8 +36,6 @@
|
||||
@InterfaceAudience.Private
|
||||
public final class ResponseReceivedMetricUpdater extends StorageEvent<ResponseReceivedEvent> {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class);
|
||||
|
||||
private final AzureFileSystemInstrumentation instrumentation;
|
||||
private final BandwidthGaugeUpdater blockUploadGaugeUpdater;
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -41,6 +40,8 @@
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations.
|
||||
@ -70,8 +71,7 @@ public void setUp() throws Exception {
|
||||
fs.initialize(uri, conf);
|
||||
|
||||
// Capture logs
|
||||
logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger
|
||||
.getRootLogger()));
|
||||
logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -21,12 +21,13 @@
|
||||
import java.net.URI;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Test to validate Azure storage client side logging. Tests works only when
|
||||
@ -94,8 +95,8 @@ private void performWASBOperations() throws Exception {
|
||||
@Test
|
||||
public void testLoggingEnabled() throws Exception {
|
||||
|
||||
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
|
||||
.getRootLogger()));
|
||||
LogCapturer logs =
|
||||
LogCapturer.captureLogs(LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME));
|
||||
|
||||
// Update configuration based on the Test.
|
||||
updateFileSystemConfiguration(true);
|
||||
@ -116,8 +117,7 @@ protected String getLogOutput(LogCapturer logs) {
|
||||
@Test
|
||||
public void testLoggingDisabled() throws Exception {
|
||||
|
||||
LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger
|
||||
.getRootLogger()));
|
||||
LogCapturer logs = LogCapturer.captureLogs(LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME));
|
||||
|
||||
// Update configuration based on the Test.
|
||||
updateFileSystemConfiguration(false);
|
||||
|
@ -30,8 +30,6 @@
|
||||
import java.util.EnumSet;
|
||||
import java.util.TimeZone;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@ -49,6 +47,8 @@
|
||||
import com.microsoft.azure.storage.AccessCondition;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.blob.CloudBlob;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.readStringFromFile;
|
||||
import static org.apache.hadoop.fs.azure.integration.AzureTestUtils.writeStringToFile;
|
||||
@ -73,7 +73,7 @@ public abstract class NativeAzureFileSystemBaseTest
|
||||
private static final EnumSet<XAttrSetFlag> CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE);
|
||||
private static final EnumSet<XAttrSetFlag> REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE);
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystemBaseTest.class);
|
||||
protected NativeAzureFileSystem fs;
|
||||
|
||||
@Override
|
||||
|
@ -23,10 +23,10 @@
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys;
|
||||
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException;
|
||||
@ -39,8 +39,7 @@
|
||||
*
|
||||
*/
|
||||
public class TestShellDecryptionKeyProvider {
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(TestShellDecryptionKeyProvider.class);
|
||||
public static final Logger LOG = LoggerFactory.getLogger(TestShellDecryptionKeyProvider.class);
|
||||
private static final File TEST_ROOT_DIR = new File(System.getProperty(
|
||||
"test.build.data", "/tmp"), "TestShellDecryptionKeyProvider");
|
||||
|
||||
|
@ -34,8 +34,6 @@
|
||||
import org.apache.hadoop.yarn.appcatalog.utils.RandomWord;
|
||||
import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException;
|
||||
import org.apache.hadoop.yarn.service.api.records.Service;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrQuery.ORDER;
|
||||
@ -48,13 +46,15 @@
|
||||
|
||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Driver class for accessing Solr.
|
||||
*/
|
||||
public class AppCatalogSolrClient {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(AppCatalogSolrClient.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(AppCatalogSolrClient.class);
|
||||
private static String urlString;
|
||||
|
||||
public AppCatalogSolrClient() {
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.appcatalog.model.AppEntry;
|
||||
@ -39,13 +37,15 @@
|
||||
import com.sun.jersey.api.client.UniformInterfaceException;
|
||||
import com.sun.jersey.api.client.config.ClientConfig;
|
||||
import com.sun.jersey.api.client.config.DefaultClientConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Driver class for calling YARN Resource Manager REST API.
|
||||
*/
|
||||
public class YarnServiceClient {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(YarnServiceClient.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(YarnServiceClient.class);
|
||||
private static Configuration conf = new Configuration();
|
||||
private static ClientConfig getClientConfig() {
|
||||
ClientConfig config = new DefaultClientConfig();
|
||||
|
@ -21,8 +21,6 @@
|
||||
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
|
||||
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -81,6 +79,8 @@
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN;
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP;
|
||||
@ -136,8 +136,7 @@
|
||||
@InterfaceStability.Unstable
|
||||
public class RuncContainerRuntime extends OCIContainerRuntime {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
RuncContainerRuntime.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RuncContainerRuntime.class);
|
||||
|
||||
@InterfaceAudience.Private
|
||||
private static final String RUNTIME_TYPE = "RUNC";
|
||||
|
@ -20,8 +20,6 @@
|
||||
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
@ -45,6 +43,8 @@
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL;
|
||||
import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR;
|
||||
@ -78,8 +78,7 @@ public class ImageTagToManifestPlugin extends AbstractService
|
||||
private String manifestDir;
|
||||
private String localImageTagToHashFile;
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(
|
||||
ImageTagToManifestPlugin.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ImageTagToManifestPlugin.class);
|
||||
|
||||
private static final int SHA256_HASH_LENGTH = 64;
|
||||
private static final String ALPHA_NUMERIC = "[a-zA-Z0-9]+";
|
||||
|
@ -17,8 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
@ -27,11 +25,14 @@
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.StandardMBean;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* JMX bean for RM info.
|
||||
*/
|
||||
public class RMInfo implements RMInfoMXBean {
|
||||
private static final Log LOG = LogFactory.getLog(RMNMInfo.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RMNMInfo.class);
|
||||
private ResourceManager resourceManager;
|
||||
private ObjectName rmStatusBeanName;
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
import com.google.inject.Singleton;
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@ -29,8 +31,6 @@
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
|
||||
@ -41,8 +41,7 @@
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(JAXBContextResolver.class.getName());
|
||||
private static final Logger LOG = LoggerFactory.getLogger(JAXBContextResolver.class.getName());
|
||||
|
||||
private final Map<Class, JAXBContext> typesContextMap;
|
||||
|
||||
|
@ -22,8 +22,6 @@
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
@ -49,14 +47,16 @@
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class TestCapacitySchedulerMultiNodesWithPreemption {
|
||||
|
||||
private static final Log LOG = LogFactory
|
||||
.getLog(TestCapacitySchedulerMultiNodesWithPreemption.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestCapacitySchedulerMultiNodesWithPreemption.class);
|
||||
private CapacitySchedulerConfiguration conf;
|
||||
private static final String POLICY_CLASS_NAME =
|
||||
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement."
|
||||
|
@ -104,6 +104,10 @@
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
@ -336,6 +340,10 @@
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
@ -351,6 +359,10 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
@ -367,6 +379,10 @@
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs-client</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
|
7
pom.xml
7
pom.xml
@ -288,6 +288,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/x
|
||||
<bannedImport>org.glassfish.grizzly.**</bannedImport>
|
||||
</bannedImports>
|
||||
</restrictImports>
|
||||
<restrictImports>
|
||||
<includeTestCode>true</includeTestCode>
|
||||
<reason>Use slf4j based Logger</reason>
|
||||
<bannedImports>
|
||||
<bannedImport>org.apache.commons.logging.**</bannedImport>
|
||||
</bannedImports>
|
||||
</restrictImports>
|
||||
</rules>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
Loading…
Reference in New Issue
Block a user