diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml
index 9a060f7502..4deda43279 100644
--- a/hadoop-common-project/hadoop-auth-examples/pom.xml
+++ b/hadoop-common-project/hadoop-auth-examples/pom.xml
@@ -46,6 +46,16 @@
slf4j-api
compile
+
+ log4j
+ log4j
+ runtime
+
+
+ org.slf4j
+ slf4j-log4j12
+ runtime
+
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 4cdd6006a4..433a615c60 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -82,14 +82,14 @@
compile
- org.apache.hadoop
- hadoop-logging
+ log4j
+ log4j
+ runtime
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
+ org.slf4j
+ slf4j-log4j12
+ runtime
org.apache.hadoop
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
index e18982d75f..f9c922caac 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
@@ -15,7 +15,8 @@
import java.util.Random;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
import org.junit.Assert;
import org.junit.Test;
@@ -29,8 +30,9 @@ public class TestRandomSignerSecretProvider {
private final int timeout = 500;
private final long rolloverFrequency = timeout / 2;
- static {
- HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
+ {
+ LogManager.getLogger(
+ RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
}
@Test
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index d81d1eb335..628342e40d 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -19,7 +19,8 @@
import javax.servlet.ServletContext;
import org.apache.curator.test.TestingServer;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -38,8 +39,9 @@ public class TestZKSignerSecretProvider {
private final int timeout = 100;
private final long rolloverFrequency = timeout / 2;
- static {
- HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG");
+ {
+ LogManager.getLogger(
+ RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG);
}
@Before
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 426f7a4af4..a9e15d004d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -419,16 +419,6 @@
lz4-java
provided
-
- org.apache.hadoop
- hadoop-logging
-
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 086665151e..b4eec1fe2c 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -299,7 +299,7 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex}
yarn.ewma.cleanupInterval=300
yarn.ewma.messageAgeLimitSeconds=86400
yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
index cf090eea00..32879597a9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory;
@@ -51,6 +50,8 @@
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
/**
* Change log level in runtime.
@@ -348,7 +349,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
}
if (GenericsUtil.isLog4jLogger(logName)) {
- process(logName, level, out);
+ process(Logger.getLogger(logName), level, out);
} else {
out.println("Sorry, setting log level is only supported for log4j loggers.
");
}
@@ -367,17 +368,19 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
+ ""
+ "";
- private static void process(String log, String level, PrintWriter out) {
+ private static void process(Logger log, String level,
+ PrintWriter out) throws IOException {
if (level != null) {
- try {
- HadoopLoggerUtils.setLogLevel(log, level);
- out.println(MARKER + "Setting Level to " + level + " ...
");
- } catch (IllegalArgumentException e) {
+ if (!level.equalsIgnoreCase(Level.toLevel(level)
+ .toString())) {
out.println(MARKER + "Bad Level : " + level + "
");
+ } else {
+ log.setLevel(Level.toLevel(level));
+ out.println(MARKER + "Setting Level to " + level + " ...
");
}
}
- out.println(MARKER + "Effective Level: " + HadoopLoggerUtils.getEffectiveLevel(log)
- + "
");
+ out.println(MARKER
+ + "Effective Level: " + log.getEffectiveLevel() + "
");
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 3c13feac3e..3debd36da7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -40,8 +40,8 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.net.NetUtils;
+import org.apache.log4j.LogManager;
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
@@ -761,7 +761,7 @@ public static void startupShutdownMessage(Class> clazz, String[] args,
public void run() {
log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
"Shutting down " + classname + " at " + hostname}));
- HadoopLoggerUtils.shutdownLogManager();
+ LogManager.shutdown();
}
}, SHUTDOWN_HOOK_PRIORITY);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 913826f3ee..b3487ef309 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -68,7 +68,6 @@
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
@@ -77,8 +76,10 @@
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.mockito.Mockito;
-import org.slf4j.LoggerFactory;
public class TestConfiguration {
@@ -219,7 +220,9 @@ public void testFinalWarnings() throws Exception {
InputStream in2 = new ByteArrayInputStream(bytes2);
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
try {
// Add the 2 different resources - this should generate a warning
@@ -227,13 +230,17 @@ public void testFinalWarnings() throws Exception {
conf.addResource(in2);
assertEquals("should see the first value", "A", conf.get("prop"));
- String renderedMessage = logCapturer.getOutput();
- assertTrue("did not see expected string inside message " + renderedMessage,
- renderedMessage.contains(
- "an attempt to override final parameter: " + "prop; Ignoring."));
+ List events = appender.getLog();
+ assertEquals("overriding a final parameter should cause logging", 1,
+ events.size());
+ LoggingEvent loggingEvent = events.get(0);
+ String renderedMessage = loggingEvent.getRenderedMessage();
+ assertTrue("did not see expected string inside message "+ renderedMessage,
+ renderedMessage.contains("an attempt to override final parameter: "
+ + "prop; Ignoring."));
} finally {
// Make sure the appender is removed
- logCapturer.stopCapturing();
+ logger.removeAppender(appender);
}
}
@@ -251,7 +258,9 @@ public void testNoFinalWarnings() throws Exception {
InputStream in2 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
try {
// Add the resource twice from a stream - should not generate warnings
@@ -259,15 +268,20 @@ public void testNoFinalWarnings() throws Exception {
conf.addResource(in2);
assertEquals("A", conf.get("prop"));
- String appenderOutput = logCapturer.getOutput();
+ List events = appender.getLog();
+ for (LoggingEvent loggingEvent : events) {
+ System.out.println("Event = " + loggingEvent.getRenderedMessage());
+ }
assertTrue("adding same resource twice should not cause logging",
- appenderOutput.isEmpty());
+ events.isEmpty());
} finally {
// Make sure the appender is removed
- logCapturer.stopCapturing();
+ logger.removeAppender(appender);
}
}
+
+
@Test
public void testFinalWarningsMultiple() throws Exception {
// Make a configuration file with a repeated final property
@@ -281,19 +295,24 @@ public void testFinalWarningsMultiple() throws Exception {
InputStream in1 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
try {
// Add the resource - this should not produce a warning
conf.addResource(in1);
assertEquals("should see the value", "A", conf.get("prop"));
- String appenderOutput = logCapturer.getOutput();
+ List events = appender.getLog();
+ for (LoggingEvent loggingEvent : events) {
+ System.out.println("Event = " + loggingEvent.getRenderedMessage());
+ }
assertTrue("adding same resource twice should not cause logging",
- appenderOutput.isEmpty());
+ events.isEmpty());
} finally {
// Make sure the appender is removed
- logCapturer.stopCapturing();
+ logger.removeAppender(appender);
}
}
@@ -310,20 +329,48 @@ public void testFinalWarningsMultipleOverride() throws Exception {
InputStream in1 = new ByteArrayInputStream(bytes);
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
try {
// Add the resource - this should produce a warning
conf.addResource(in1);
assertEquals("should see the value", "A", conf.get("prop"));
- String renderedMessage = logCapturer.getOutput();
- assertTrue("did not see expected string inside message " + renderedMessage,
- renderedMessage.contains(
- "an attempt to override final parameter: " + "prop; Ignoring."));
+ List events = appender.getLog();
+ assertEquals("overriding a final parameter should cause logging", 1,
+ events.size());
+ LoggingEvent loggingEvent = events.get(0);
+ String renderedMessage = loggingEvent.getRenderedMessage();
+ assertTrue("did not see expected string inside message "+ renderedMessage,
+ renderedMessage.contains("an attempt to override final parameter: "
+ + "prop; Ignoring."));
} finally {
// Make sure the appender is removed
- logCapturer.stopCapturing();
+ logger.removeAppender(appender);
+ }
+ }
+
+ /**
+ * A simple appender for white box testing.
+ */
+ private static class TestAppender extends AppenderSkeleton {
+ private final List log = new ArrayList<>();
+
+ @Override public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override public void close() {
+ }
+
+ public List getLog() {
+ return new ArrayList<>(log);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
index 9e4405f6d1..c016ff0378 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java
@@ -36,9 +36,8 @@
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.log4j.Logger;
import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -48,6 +47,9 @@
public class CompressDecompressTester {
+ private static final Logger logger = Logger
+ .getLogger(CompressDecompressTester.class);
+
private final byte[] originalRawData;
private ImmutableList> pairs = ImmutableList.of();
@@ -486,12 +488,12 @@ else if (compressor.getClass().isAssignableFrom(ZlibCompressor.class)) {
return false;
}
-
+
abstract static class TesterCompressionStrategy {
- protected final Logger logger = LoggerFactory.getLogger(getClass());
+ protected final Logger logger = Logger.getLogger(getClass());
- abstract void assertCompression(String name, Compressor compressor, Decompressor decompressor,
- byte[] originalRawData) throws Exception;
+ abstract void assertCompression(String name, Compressor compressor,
+ Decompressor decompressor, byte[] originalRawData) throws Exception;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
index 99a1ff8181..636c03a16d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.log.LogLevel.CLI;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
@@ -41,11 +40,12 @@
import org.junit.Assert;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLException;
@@ -67,7 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
private final String logName = TestLogLevel.class.getName();
private String clientPrincipal;
private String serverPrincipal;
- private final Logger log = LoggerFactory.getLogger(logName);
+ private final Logger log = Logger.getLogger(logName);
private final static String PRINCIPAL = "loglevel.principal";
private final static String KEYTAB = "loglevel.keytab";
private static final String PREFIX = "hadoop.http.authentication.";
@@ -76,7 +76,7 @@ public class TestLogLevel extends KerberosSecurityTestcase {
public static void setUp() throws Exception {
org.slf4j.Logger logger =
LoggerFactory.getLogger(KerberosAuthenticator.class);
- HadoopLoggerUtils.setLogLevel(logger.getName(), "DEBUG");
+ GenericTestUtils.setLogLevel(logger, Level.DEBUG);
FileUtil.fullyDelete(BASEDIR);
if (!BASEDIR.mkdirs()) {
throw new Exception("unable to create the base directory for testing");
@@ -230,7 +230,7 @@ private void testDynamicLogLevel(final String bindProtocol,
final String connectProtocol, final boolean isSpnego)
throws Exception {
testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego,
- "DEBUG");
+ Level.DEBUG.toString());
}
/**
@@ -250,8 +250,9 @@ private void testDynamicLogLevel(final String bindProtocol,
if (!LogLevel.isValidProtocol(connectProtocol)) {
throw new Exception("Invalid client protocol " + connectProtocol);
}
- String oldLevel = HadoopLoggerUtils.getEffectiveLevel(log.getName());
- Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", "ERROR", oldLevel);
+ Level oldLevel = log.getEffectiveLevel();
+ Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.",
+ Level.ERROR, oldLevel);
// configs needed for SPNEGO at server side
if (isSpnego) {
@@ -287,7 +288,7 @@ public Void call() throws Exception {
});
server.stop();
// restore log level
- HadoopLoggerUtils.setLogLevel(log.getName(), oldLevel.toString());
+ GenericTestUtils.setLogLevel(log, oldLevel);
}
/**
@@ -321,7 +322,7 @@ private void setLevel(String protocol, String authority, String newLevel)
cli.run(setLevelArgs);
assertEquals("new level not equal to expected: ", newLevel.toUpperCase(),
- HadoopLoggerUtils.getEffectiveLevel(log.getName()));
+ log.getEffectiveLevel().toString());
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
index 8cfa14cdab..0dabe468e4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java
@@ -42,9 +42,8 @@
import static org.apache.hadoop.metrics2.lib.Interns.info;
import static org.junit.Assert.assertEquals;
+import org.apache.log4j.Logger;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
@@ -242,7 +241,7 @@ private static class SourceUpdater implements Runnable {
private MetricsSourceAdapter sa = null;
private ScheduledFuture> future = null;
private AtomicBoolean hasError = null;
- private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class);
+ private static final Logger LOG = Logger.getLogger(SourceUpdater.class);
public SourceUpdater(MetricsSourceAdapter sourceAdapter,
AtomicBoolean err) {
@@ -264,7 +263,7 @@ public void run() {
} catch (Exception e) {
// catch all errors
hasError.set(true);
- LOG.error("Something went wrong.", e);
+ LOG.error(e.getStackTrace());
} finally {
if (hasError.get()) {
LOG.error("Hit error, stopping now");
@@ -285,7 +284,7 @@ private static class SourceReader implements Runnable {
private int cnt = 0;
private ScheduledFuture> future = null;
private AtomicBoolean hasError = null;
- private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class);
+ private static final Logger LOG = Logger.getLogger(SourceReader.class);
public SourceReader(
TestMetricsSource source, MetricsSourceAdapter sourceAdapter,
@@ -319,7 +318,7 @@ public void run() {
} catch (Exception e) {
// catch other errors
hasError.set(true);
- LOG.error("Something went wrong.", e);
+ LOG.error(e.getStackTrace());
} finally {
if (hasError.get()) {
future.cancel(false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index b1399712e6..8c1339d38d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
@@ -41,8 +41,8 @@ public class TestShellBasedUnixGroupsMapping {
private static final Logger TESTLOG =
LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class);
- private final LogCapturer shellMappingLog =
- LogCapturer.captureLogs(
+ private final GenericTestUtils.LogCapturer shellMappingLog =
+ GenericTestUtils.LogCapturer.captureLogs(
ShellBasedUnixGroupsMapping.LOG);
private class TestGroupUserNotExist
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
index 6a6fff89c1..a0ce721ecf 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java
@@ -19,8 +19,6 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
import org.junit.BeforeClass;
import org.junit.Test;
@@ -44,7 +42,7 @@ public class TestReloadingX509KeyManager {
private static final String BASEDIR = GenericTestUtils.getTempPath(
TestReloadingX509TrustManager.class.getSimpleName());
- private final LogCapturer reloaderLog = LogCapturer.captureLogs(
+ private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs(
FileMonitoringTimerTask.LOG);
@BeforeClass
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
index 8d2a4c78f5..63589592f3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
@@ -19,7 +19,7 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import java.util.function.Supplier;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
index 839c51c5e1..b7b86b7aa0 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.service;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
@@ -29,7 +29,7 @@
import java.io.PrintWriter;
-import static org.apache.hadoop.logging.LogCapturer.captureLogs;
+import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.times;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 825fc706f4..e54971e491 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -25,6 +25,7 @@
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.PrintStream;
+import java.io.StringWriter;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
@@ -37,6 +38,7 @@
import java.util.Objects;
import java.util.Random;
import java.util.Set;
+import java.util.Enumeration;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
@@ -51,11 +53,17 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
import org.apache.hadoop.util.DurationInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.WriterAppender;
import org.junit.Assert;
import org.junit.Assume;
import org.mockito.invocation.InvocationOnMock;
@@ -107,17 +115,51 @@ public abstract class GenericTestUtils {
public static final String ERROR_INVALID_ARGUMENT =
"Total wait time should be greater than check interval time";
+ @Deprecated
+ public static Logger toLog4j(org.slf4j.Logger logger) {
+ return LogManager.getLogger(logger.getName());
+ }
+
+ /**
+ * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead
+ */
+ @Deprecated
+ public static void disableLog(Logger logger) {
+ logger.setLevel(Level.OFF);
+ }
+
public static void disableLog(org.slf4j.Logger logger) {
- HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF");
+ disableLog(toLog4j(logger));
+ }
+
+ public static void setLogLevel(Logger logger, Level level) {
+ logger.setLevel(level);
+ }
+
+ /**
+ * @deprecated
+ * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead
+ */
+ @Deprecated
+ public static void setLogLevel(org.slf4j.Logger logger, Level level) {
+ setLogLevel(toLog4j(logger), level);
}
public static void setLogLevel(org.slf4j.Logger logger,
org.slf4j.event.Level level) {
- HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString());
+ setLogLevel(toLog4j(logger), Level.toLevel(level.toString()));
}
public static void setRootLogLevel(org.slf4j.event.Level level) {
- HadoopLoggerUtils.setLogLevel("root", level.toString());
+ setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString()));
+ }
+
+ public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) {
+ for (Enumeration> loggers = LogManager.getCurrentLoggers();
+ loggers.hasMoreElements();) {
+ Logger logger = (Logger) loggers.nextElement();
+ logger.setLevel(Level.toLevel(level.toString()));
+ }
}
public static org.slf4j.event.Level toLevel(String level) {
@@ -429,6 +471,47 @@ public void close() throws Exception {
}
}
+ public static class LogCapturer {
+ private StringWriter sw = new StringWriter();
+ private WriterAppender appender;
+ private Logger logger;
+
+ public static LogCapturer captureLogs(org.slf4j.Logger logger) {
+ if (logger.getName().equals("root")) {
+ return new LogCapturer(org.apache.log4j.Logger.getRootLogger());
+ }
+ return new LogCapturer(toLog4j(logger));
+ }
+
+ public static LogCapturer captureLogs(Logger logger) {
+ return new LogCapturer(logger);
+ }
+
+ private LogCapturer(Logger logger) {
+ this.logger = logger;
+ Appender defaultAppender = Logger.getRootLogger().getAppender("stdout");
+ if (defaultAppender == null) {
+ defaultAppender = Logger.getRootLogger().getAppender("console");
+ }
+ final Layout layout = (defaultAppender == null) ? new PatternLayout() :
+ defaultAppender.getLayout();
+ this.appender = new WriterAppender(layout, sw);
+ logger.addAppender(this.appender);
+ }
+
+ public String getOutput() {
+ return sw.toString();
+ }
+
+ public void stopCapturing() {
+ logger.removeAppender(appender);
+ }
+
+ public void clearOutput() {
+ sw.getBuffer().setLength(0);
+ }
+ }
+
/**
* Mockito answer helper that triggers one latch as soon as the
* method is called, then waits on another before continuing.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
index f6f4a448e0..8489e3d24f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java
@@ -26,8 +26,6 @@
import java.util.function.Supplier;
import org.slf4j.event.Level;
-import org.apache.hadoop.logging.LogCapturer;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
index 8375864e5f..98e182236c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java
@@ -22,8 +22,8 @@
import org.junit.Assert;
+import org.apache.log4j.Logger;
import org.junit.Test;
-import org.slf4j.Logger;
public class TestClassUtil {
@Test(timeout=10000)
@@ -35,6 +35,6 @@ public void testFindContainingJar() {
Assert.assertTrue("Containing jar does not exist on file system ",
jarFile.exists());
Assert.assertTrue("Incorrect jar file " + containingJar,
- jarFile.getName().matches("slf4j-api.*[.]jar"));
+ jarFile.getName().matches("log4j.*[.]jar"));
}
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
index ec26af6601..1d1ce893a9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
@@ -28,7 +28,7 @@
import static org.junit.Assert.*;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.assertj.core.api.Assertions;
import org.junit.Before;
import org.junit.Test;
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
index fb6221f270..f43930dd07 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
@@ -28,12 +28,10 @@
import java.util.Random;
import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.util.hash.Hash;
+import org.apache.log4j.Logger;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
@@ -115,7 +113,7 @@ public void test() {
}
interface FilterTesterStrategy {
- Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class);
+ final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
void assertWhat(Filter filter, int numInsertions, int hashId,
ImmutableSet falsePositives);
diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml
index 8a04c4ebcf..96588a22b9 100644
--- a/hadoop-common-project/hadoop-kms/pom.xml
+++ b/hadoop-common-project/hadoop-kms/pom.xml
@@ -53,12 +53,6 @@
hadoop-auth
compile
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
org.apache.hadoop.thirdparty
hadoop-shaded-guava
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 97d854285f..f4c7fbe0b3 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -49,7 +49,6 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Time;
import org.apache.http.client.utils.URIBuilder;
import org.junit.After;
@@ -584,8 +583,8 @@ public Void run() throws Exception {
@Test
public void testStartStopHttpPseudo() throws Exception {
// Make sure bogus errors don't get emitted.
- LogCapturer logs =
- LogCapturer.captureLogs(LoggerFactory.getLogger(
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(
"com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator"));
try {
testStartStop(false, false);
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
index 6e12d946ff..3d0fd7de64 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
@@ -18,24 +18,23 @@
package org.apache.hadoop.crypto.key.kms.server;
import java.io.ByteArrayOutputStream;
-import java.io.File;
import java.io.FilterOutputStream;
+import java.io.InputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-
+import org.apache.hadoop.util.ThreadUtil;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -68,23 +67,24 @@ public void setOutputStream(OutputStream out) {
public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS);
@Before
- public void setUp() throws IOException, URISyntaxException {
+ public void setUp() throws IOException {
originalOut = System.err;
memOut = new ByteArrayOutputStream();
filterOut = new FilterOut(memOut);
capturedOut = new PrintStream(filterOut);
System.setErr(capturedOut);
- URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties");
- File file = Paths.get(url.toURI()).toFile();
- HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath());
+ InputStream is =
+ ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties");
+ PropertyConfigurator.configure(is);
+ IOUtils.closeStream(is);
Configuration conf = new Configuration();
this.kmsAudit = new KMSAudit(conf);
}
@After
- public void cleanUp() throws Exception {
+ public void cleanUp() {
System.setErr(originalOut);
- HadoopLoggerUtils.resetConfiguration();
+ LogManager.resetConfiguration();
kmsAudit.shutdown();
}
diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 304d1e4515..0000000000
--- a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml
deleted file mode 100644
index 20af2bee76..0000000000
--- a/hadoop-common-project/hadoop-logging/pom.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
-
-
-
- hadoop-project
- org.apache.hadoop
- 3.4.0-SNAPSHOT
- ../../hadoop-project
-
- 4.0.0
-
- hadoop-logging
- 3.4.0-SNAPSHOT
- jar
-
- Apache Hadoop Logging
- Logging Support for Apache Hadoop project
-
-
- UTF-8
-
-
-
-
- org.apache.hadoop
- hadoop-annotations
- provided
-
-
- org.apache.commons
- commons-lang3
-
-
- org.slf4j
- slf4j-api
-
-
- junit
- junit
- test
-
-
- org.slf4j
- slf4j-log4j12
- test
-
-
- log4j
- log4j
- provided
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-source-plugin
-
-
- prepare-package
-
- jar
-
-
-
-
- true
-
-
-
- org.apache.maven.plugins
- maven-jar-plugin
-
-
- prepare-jar
- prepare-package
-
- jar
-
-
-
- prepare-test-jar
- prepare-package
-
- test-jar
-
-
-
-
-
- org.apache.rat
- apache-rat-plugin
-
-
- dev-support/findbugsExcludeFile.xml
-
-
-
-
- com.github.spotbugs
- spotbugs-maven-plugin
-
- ${basedir}/dev-support/findbugsExcludeFile.xml
-
-
-
-
-
-
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
deleted file mode 100644
index b0bd2e31fc..0000000000
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.logging;
-
-import java.io.FileInputStream;
-import java.io.Flushable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.util.Enumeration;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.log4j.Appender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PropertyConfigurator;
-
-/**
- * Hadoop's internal class that access log4j APIs directly.
- *
- * This class will depend on log4j directly, so callers should not use this class directly to avoid
- * introducing log4j dependencies to downstream users. Please call the methods in
- * {@link HadoopLoggerUtils}, as they will call the methods here through reflection.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-final class HadoopInternalLog4jUtils {
-
- private HadoopInternalLog4jUtils() {
- }
-
- static void setLogLevel(String loggerName, String levelName) {
- if (loggerName == null) {
- throw new IllegalArgumentException("logger name cannot be null");
- }
- Logger logger = loggerName.equalsIgnoreCase("root") ?
- LogManager.getRootLogger() :
- LogManager.getLogger(loggerName);
- Level level = Level.toLevel(levelName.toUpperCase());
- if (!level.toString().equalsIgnoreCase(levelName)) {
- throw new IllegalArgumentException("Unsupported log level " + levelName);
- }
- logger.setLevel(level);
- }
-
- static void shutdownLogManager() {
- LogManager.shutdown();
- }
-
- static String getEffectiveLevel(String loggerName) {
- Logger logger = loggerName.equalsIgnoreCase("root") ?
- LogManager.getRootLogger() :
- LogManager.getLogger(loggerName);
- return logger.getEffectiveLevel().toString();
- }
-
- static void resetConfiguration() {
- LogManager.resetConfiguration();
- }
-
- static void updateLog4jConfiguration(Class> targetClass, String log4jPath) throws Exception {
- Properties customProperties = new Properties();
- try (FileInputStream fs = new FileInputStream(log4jPath);
- InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
- customProperties.load(fs);
- Properties originalProperties = new Properties();
- originalProperties.load(is);
- for (Map.Entry
+
+ org.slf4j
+ slf4j-log4j12
+ compile
+
junit
junit
diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml
index b0fb88874c..f167a079a9 100644
--- a/hadoop-common-project/pom.xml
+++ b/hadoop-common-project/pom.xml
@@ -38,7 +38,6 @@
hadoop-minikdc
hadoop-kms
hadoop-registry
- hadoop-logging
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 9a1226ea38..b362e001ea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -86,12 +86,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
netty-all
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
org.mock-server
mockserver-netty
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
index d0b8653426..1fe6dcad93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
@@ -31,7 +31,6 @@
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Lists;
import org.junit.Assert;
import org.junit.Test;
@@ -62,8 +61,8 @@ public HttpURLConnection configure(HttpURLConnection conn)
public void testSSLInitFailure() throws Exception {
Configuration conf = new Configuration();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
- LogCapturer logs =
- LogCapturer.captureLogs(
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(URLConnectionFactory.class));
URLConnectionFactory.newDefaultURLConnectionFactory(conf);
Assert.assertTrue("Expected log for ssl init failure not found!",
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
index b9aae62bd8..a5bf5c1c31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml
@@ -182,12 +182,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
junit-jupiter-params
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
index 9f74337d7a..0741f1aed4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX;
import static org.junit.Assert.assertEquals;
@@ -49,8 +48,8 @@ public class TestRouterRefreshFairnessPolicyController {
private static final Logger LOG =
LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class);
- private final LogCapturer controllerLog =
- LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
+ private final GenericTestUtils.LogCapturer controllerLog =
+ GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG);
private StateStoreDFSCluster cluster;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
index d4f6827135..1f5770b1dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import org.slf4j.LoggerFactory;
@@ -179,7 +179,7 @@ public void testHandlerAllocationConcurrentConfigured() {
private void verifyInstantiationError(Configuration conf, int handlerCount,
int totalDedicatedHandlers) {
- LogCapturer logs = LogCapturer
+ GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
.captureLogs(LoggerFactory.getLogger(
StaticRouterRpcFairnessPolicyController.class));
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
index bb81eaa070..9ee9692aad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@@ -54,7 +55,6 @@
import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@@ -322,7 +322,11 @@ private void verifyUrlSchemes(String scheme, Configuration conf, int httpRequest
int httpsRequests, int requestsPerService) {
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final LogVerificationAppender appender =
+ new LogVerificationAppender();
+ final org.apache.log4j.Logger logger =
+ org.apache.log4j.Logger.getRootLogger();
+ logger.addAppender(appender);
GenericTestUtils.setRootLogLevel(Level.DEBUG);
// Setup and start the Router
@@ -343,11 +347,8 @@ private void verifyUrlSchemes(String scheme, Configuration conf, int httpRequest
heartbeatService.getNamenodeStatusReport();
}
}
- assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
- "JMX URL: https://"));
- assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
- "JMX URL: http://"));
- logCapturer.stopCapturing();
+ assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://"));
+ assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://"));
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index 3db20a6e18..d3d3421619 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -135,8 +135,6 @@
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
@@ -2069,8 +2067,8 @@ private DFSClient getFileDFSClient(final String path) {
@Test
public void testMkdirsWithCallerContext() throws IOException {
- LogCapturer auditlog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditlog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
@@ -2096,8 +2094,8 @@ public void testMkdirsWithCallerContext() throws IOException {
@Test
public void testRealUserPropagationInCallerContext()
throws IOException, InterruptedException {
- LogCapturer auditlog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditlog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
@@ -2141,8 +2139,8 @@ public void testSetBalancerBandwidth() throws Exception {
@Test
public void testAddClientIpPortToCallerContext() throws IOException {
- LogCapturer auditLog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditLog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientIp and ClientPort are not set on the client.
// Set client context.
@@ -2176,8 +2174,8 @@ public void testAddClientIpPortToCallerContext() throws IOException {
@Test
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
- LogCapturer auditLog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditLog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientId and ClientCallId are not set on the client.
// Set client context.
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index caecb697d6..336ea39138 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -72,8 +72,6 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
import org.junit.Test;
import org.slf4j.event.Level;
@@ -278,10 +276,12 @@ public void testProxyRenameFiles() throws IOException, InterruptedException {
@Test
public void testPreviousBlockNotNull()
throws IOException, URISyntaxException {
- final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog);
+ final GenericTestUtils.LogCapturer stateChangeLog =
+ GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG);
- final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG);
+ final GenericTestUtils.LogCapturer nameNodeLog =
+ GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG);
GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG);
final FederationRPCMetrics metrics = getRouterContext().
@@ -454,8 +454,8 @@ public void testSubclusterDown() throws Exception {
@Test
public void testCallerContextWithMultiDestinations() throws IOException {
- LogCapturer auditLog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditLog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// set client context
CallerContext.setCurrent(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 5c2df9acf4..8632c567aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -310,4 +310,14 @@
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index a8922cbcff..5f156499ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -164,12 +164,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
hadoop-minikdc
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
org.mockito
mockito-core
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
index a361a280e3..21c01cebd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
@@ -31,8 +31,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.metrics2.util.MBeans;
/**
@@ -113,8 +111,11 @@ private String trimLine(String valueStr) {
.substring(0, maxLogLineLength) + "...");
}
+ // TODO : hadoop-logging module to hide log4j implementation details, this method
+ // can directly call utility from hadoop-logging.
private static boolean hasAppenders(Logger logger) {
- return HadoopLoggerUtils.hasAppenders(logger.getName());
+ return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
+ .hasMoreElements();
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
index 4e8daf319a..ab301104f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
@@ -32,11 +32,11 @@
import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor;
import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -110,13 +110,13 @@ static void setHaConf(String nsId, Configuration conf) {
}
static void initLogLevels() {
- Util.setLogLevel(FSImage.class, "TRACE");
- Util.setLogLevel(FileJournalManager.class, "TRACE");
+ Util.setLogLevel(FSImage.class, Level.TRACE);
+ Util.setLogLevel(FileJournalManager.class, Level.TRACE);
- Util.setLogLevel(GSet.class, "OFF");
- Util.setLogLevel(BlockManager.class, "OFF");
- Util.setLogLevel(DatanodeManager.class, "OFF");
- Util.setLogLevel(TopMetrics.class, "OFF");
+ Util.setLogLevel(GSet.class, Level.OFF);
+ Util.setLogLevel(BlockManager.class, Level.OFF);
+ Util.setLogLevel(DatanodeManager.class, Level.OFF);
+ Util.setLogLevel(TopMetrics.class, Level.OFF);
}
static class Util {
@@ -127,10 +127,11 @@ static String memoryInfo() {
+ ", max=" + StringUtils.byteDesc(runtime.maxMemory());
}
- static void setLogLevel(Class> clazz, String level) {
- HadoopLoggerUtils.setLogLevel(clazz.getName(), level);
+ static void setLogLevel(Class> clazz, Level level) {
+ final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz);
+ logger.setLevel(level);
LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level,
- HadoopLoggerUtils.getEffectiveLevel(clazz.getName()));
+ logger.getEffectiveLevel());
}
static String toCommaSeparatedNumber(long n) {
diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
similarity index 98%
rename from hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
index 2abfffb474..276e5b0987 100644
--- a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.logging.appenders;
+package org.apache.hadoop.hdfs.util;
import java.io.IOException;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
new file mode 100644
index 0000000000..10ef47bbbc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.log4j.spi.ThrowableInformation;
+
+/**
+ * Used to verify that certain exceptions or messages are present in log output.
+ */
+public class LogVerificationAppender extends AppenderSkeleton {
+ private final List log = new ArrayList();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ public List getLog() {
+ return new ArrayList(log);
+ }
+
+ public int countExceptionsWithMessage(final String text) {
+ int count = 0;
+ for (LoggingEvent e: getLog()) {
+ ThrowableInformation t = e.getThrowableInformation();
+ if (t != null) {
+ String m = t.getThrowable().getMessage();
+ if (m.contains(text)) {
+ count++;
+ }
+ }
+ }
+ return count;
+ }
+
+ public int countLinesWithMessage(final String text) {
+ int count = 0;
+ for (LoggingEvent e: getLog()) {
+ String msg = e.getRenderedMessage();
+ if (msg != null && msg.contains(text)) {
+ count++;
+ }
+ }
+ return count;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
index 75ad5bd862..b16f0237b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
@@ -33,8 +33,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.logging.LogCapturer;
-
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestDFSRename {
@@ -190,8 +189,8 @@ public void testRename2Options() throws Exception {
final DistributedFileSystem dfs = cluster.getFileSystem();
Path path = new Path("/test");
dfs.mkdirs(path);
- LogCapturer auditLog =
- LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
+ GenericTestUtils.LogCapturer auditLog =
+ GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
dfs.rename(path, new Path("/dir1"),
new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH});
String auditOut = auditLog.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index 80424a388b..5469ebbb75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -45,9 +45,9 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Logger;
import org.junit.Test;
import static org.junit.Assert.*;
@@ -317,7 +317,9 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
"imageMD5Digest", "22222222222222222222222222222222");
// Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
// Upgrade should now fail
try {
@@ -329,10 +331,9 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
if (!msg.contains("Failed to load FSImage file")) {
throw ioe;
}
- int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
+ int md5failures = appender.countExceptionsWithMessage(
" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
- logCapturer.stopCapturing();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
index c792386c0e..c57ef941f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -48,7 +48,7 @@ public static void setup() throws IOException {
@Test(timeout = 60000)
public void testDfsClient() throws IOException, InterruptedException {
- LogCapturer logs = LogCapturer.captureLogs(LoggerFactory
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory
.getLogger(DataStreamer.class));
byte[] toWrite = new byte[PACKET_SIZE];
new Random(1).nextBytes(toWrite);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index 4299c11196..f9336fcfdc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -51,7 +51,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.junit.After;
import org.junit.Before;
@@ -168,9 +168,9 @@ private void testEncryptedRead(String algorithm, String cipherSuite,
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(SaslDataTransferServer.class));
- LogCapturer logs1 = LogCapturer.captureLogs(
+ LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -239,7 +239,7 @@ public void testClientThatDoesNotSupportEncryption() throws IOException {
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataNode.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
@@ -457,9 +457,9 @@ private void testEncryptedWrite(int numDns) throws IOException {
fs = getFileSystem(conf);
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(SaslDataTransferServer.class));
- LogCapturer logs1 = LogCapturer.captureLogs(
+ LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataTransferSaslUtil.class));
try {
writeTestDataToFile(fs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index c6561287bb..3dd0b7eb99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -54,7 +54,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
@@ -138,7 +138,7 @@ public void testServerSaslNoClientSasl() throws Exception {
HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(DataNode.class));
try {
doTest(clientConf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
index 84b7c8f224..82b8b58769 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.junit.After;
import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 5d2a927064..d69051c8d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -56,7 +56,7 @@
import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
index 7e926a994f..d32cde8347 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java
@@ -28,7 +28,6 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.Whitebox;
import org.assertj.core.api.Assertions;
@@ -236,8 +235,8 @@ public void testCheckSafeMode8() throws Exception {
public void testCheckSafeMode9() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000);
- LogCapturer logs =
- LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
fsn, true, conf);
String content = logs.getOutput();
@@ -248,8 +247,8 @@ public void testCheckSafeMode9() throws Exception {
public void testCheckSafeMode10(){
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1);
- LogCapturer logs =
- LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG);
BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm,
fsn, true, conf);
String content = logs.getOutput();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index 87c83836e7..ea7347f9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -58,7 +58,7 @@
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
@@ -575,7 +575,7 @@ public void testPendingReConstructionBlocksForSameDN() throws Exception {
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
DFSTestUtil.setNameNodeLogLevel(Level.DEBUG);
- LogCapturer logs = LogCapturer
+ LogCapturer logs = GenericTestUtils.LogCapturer
.captureLogs(LoggerFactory.getLogger("BlockStateChange"));
BlockManager bm = cluster.getNamesystem().getBlockManager();
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index c4b5f7aa6a..20163cc5fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -21,6 +21,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
@@ -40,7 +41,6 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
-import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.fs.ContentSummary;
@@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -66,15 +67,16 @@
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
-import org.slf4j.LoggerFactory;
@RunWith(Parameterized.class)
public class TestReplicationPolicy extends BaseReplicationPolicyTest {
@@ -505,26 +507,26 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
}
-
- final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
-
+
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
// try to choose NUM_OF_DATANODES which is more than actually available
// nodes.
DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length);
assertEquals(targets.length, dataNodes.length - 2);
- boolean isFound = false;
- for (String logLine : logCapturer.getOutput().split("\n")) {
- // Suppose to place replicas on each node but two data nodes are not
- // available for placing replica, so here we expect a short of 2
- if(logLine.contains("WARN") && logLine.contains("in need of 2")) {
- isFound = true;
- break;
- }
- }
- assertTrue("Could not find the block placement log specific to 2 datanodes not being "
- + "available for placing replicas", isFound);
- logCapturer.stopCapturing();
+ final List log = appender.getLog();
+ assertNotNull(log);
+ assertFalse(log.size() == 0);
+ final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+
+ assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
+ // Suppose to place replicas on each node but two data nodes are not
+ // available for placing replica, so here we expect a short of 2
+ assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+
resetHeartbeatForStorages();
}
@@ -1708,14 +1710,17 @@ public void testMaxLoad() {
@Test
public void testChosenFailureForStorageType() {
- final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1,
dataNodes[0], new ArrayList(), false, null,
BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy(
HdfsConstants.StoragePolicy.COLD.value()), null);
assertEquals(0, targets.length);
assertNotEquals(0,
- StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE"));
+ appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE"));
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index 13efcf783a..73201ba605 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections;
+import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeoutException;
@@ -38,15 +39,19 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
+import java.util.function.Supplier;
+
/**
* Test periodic logging of DataNode metrics.
*/
@@ -123,13 +128,13 @@ public void testDisableMetricsLogger() throws IOException {
}
@Test
- @SuppressWarnings("unchecked")
public void testMetricsLoggerIsAsync() throws IOException {
startDNForTest(true);
assertNotNull(dn);
- assertTrue(Collections.list(
- org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders())
- .get(0) instanceof org.apache.log4j.AsyncAppender);
+ org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME);
+ @SuppressWarnings("unchecked")
+ List appenders = Collections.list(logger.getAllAppenders());
+ assertTrue(appenders.get(0) instanceof AsyncAppender);
}
/**
@@ -144,15 +149,27 @@ public void testMetricsLogOutput() throws IOException, InterruptedException,
metricsProvider);
startDNForTest(true);
assertNotNull(dn);
- LogCapturer logCapturer =
- LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME));
+ final PatternMatchingAppender appender =
+ (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME)
+ .getAppender("PATTERNMATCHERAPPENDER");
+
// Ensure that the supplied pattern was matched.
- GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
- 1000, 60000);
- logCapturer.stopCapturing();
+ GenericTestUtils.waitFor(new Supplier() {
+ @Override
+ public Boolean get() {
+ return appender.isMatched();
+ }
+ }, 1000, 60000);
+
dn.shutdown();
}
+ private void addAppender(org.apache.log4j.Logger logger, Appender appender) {
+ @SuppressWarnings("unchecked")
+ List appenders = Collections.list(logger.getAllAppenders());
+ ((AsyncAppender) appenders.get(0)).addAppender(appender);
+ }
+
public interface TestFakeMetricMXBean {
int getFakeMetric();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 82d7a81574..74c70cec76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -27,6 +27,7 @@
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -76,9 +77,10 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
@@ -412,9 +414,14 @@ public void testRetainBlockOnPersistentStorage() throws Exception {
@Test(timeout=600000)
public void testScanDirectoryStructureWarn() throws Exception {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
//add a logger stream to check what has printed to log
+ ByteArrayOutputStream loggerStream = new ByteArrayOutputStream();
+ org.apache.log4j.Logger rootLogger =
+ org.apache.log4j.Logger.getRootLogger();
GenericTestUtils.setRootLogLevel(Level.INFO);
+ WriterAppender writerAppender =
+ new WriterAppender(new SimpleLayout(), loggerStream);
+ rootLogger.addAppender(writerAppender);
Configuration conf = getConfiguration();
cluster = new MiniDFSCluster
@@ -445,7 +452,7 @@ public void testScanDirectoryStructureWarn() throws Exception {
scan(1, 1, 0, 1, 0, 0, 0);
//ensure the warn log not appear and missing block log do appear
- String logContent = logCapturer.getOutput();
+ String logContent = new String(loggerStream.toByteArray());
String missingBlockWarn = "Deleted a metadata file" +
" for the deleted block";
String dirStructureWarnLog = " found in invalid directory." +
@@ -457,7 +464,6 @@ public void testScanDirectoryStructureWarn() throws Exception {
LOG.info("check pass");
} finally {
- logCapturer.stopCapturing();
if (scanner != null) {
scanner.shutdown();
scanner = null;
@@ -520,7 +526,7 @@ public void testRegularBlock() throws Exception {
client = cluster.getFileSystem().getClient();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
// log trace
- LogCapturer logCapturer = LogCapturer.
+ GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.
captureLogs(NameNode.stateChangeLog);
// Add files with 5 blocks
createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
index c7fc71f537..8b1a6c0814 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import net.jcip.annotations.NotThreadSafe;
-
-import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
@@ -53,6 +51,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
@@ -80,10 +79,10 @@
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
+import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -394,7 +393,9 @@ public void testFilesExceedMaxLockedMemory() throws Exception {
}
// nth file should hit a capacity exception
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1]));
GenericTestUtils.waitFor(new Supplier() {
@@ -402,12 +403,11 @@ public void testFilesExceedMaxLockedMemory() throws Exception {
public Boolean get() {
// check the log reported by FsDataSetCache
// in the case that cache capacity is exceeded.
- int lines = StringUtils.countMatches(logCapturer.getOutput(),
+ int lines = appender.countLinesWithMessage(
"could not reserve more bytes in the cache: ");
return lines > 0;
}
}, 500, 30000);
- logCapturer.stopCapturing();
// Also check the metrics for the failure
assertTrue("Expected more than 0 failed cache attempts",
fsd.getNumBlocksFailedToCache() > 0);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 8f3ef447a6..073bb532dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -16,7 +16,6 @@
*/
package org.apache.hadoop.hdfs.server.diskbalancer;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Preconditions;
import java.util.function.Supplier;
import org.apache.commons.codec.digest.DigestUtils;
@@ -322,7 +321,7 @@ public void testDiskBalancerWithFedClusterWithOneNameServiceEmpty() throws
0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
- LogCapturer logCapturer = LogCapturer
+ GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(DiskBalancer.LOG);
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
new file mode 100644
index 0000000000..f099dfae73
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.regex.Pattern;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * An appender that matches logged messages against the given
+ * regular expression.
+ */
+public class PatternMatchingAppender extends AppenderSkeleton {
+ private final Pattern pattern;
+ private volatile boolean matched;
+
+ public PatternMatchingAppender() {
+ this.pattern = Pattern.compile("^.*FakeMetric.*$");
+ this.matched = false;
+ }
+
+ public boolean isMatched() {
+ return matched;
+ }
+
+ @Override
+ protected void append(LoggingEvent event) {
+ if (pattern.matcher(event.getMessage().toString()).matches()) {
+ matched = true;
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 617f38a63f..c00649a9db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.util.Lists;
import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index fec16c13fd..d34d6ca737 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -41,7 +41,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 953d1ef7c0..0f73669675 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -24,6 +24,7 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
@@ -38,9 +39,12 @@
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
+import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
@@ -103,7 +107,6 @@ public TestAuditLogs(boolean useAsyncEdits) {
UserGroupInformation userGroupInfo;
@Before
- @SuppressWarnings("unchecked")
public void setupCluster() throws Exception {
// must configure prior to instantiating the namesystem because it
// will reconfigure the logger if async is enabled
@@ -119,9 +122,11 @@ public void setupCluster() throws Exception {
util.createFiles(fs, fileName);
// make sure the appender is what it's supposed to be
- assertTrue(Collections.list(org.apache.log4j.Logger.getLogger(
- "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders())
- .get(0) instanceof org.apache.log4j.AsyncAppender);
+ Logger logger = org.apache.log4j.Logger.getLogger(
+ "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit");
+ @SuppressWarnings("unchecked")
+ List appenders = Collections.list(logger.getAllAppenders());
+ assertTrue(appenders.get(0) instanceof AsyncAppender);
fnames = util.getFileNames(fileName);
util.waitReplication(fs, fileName, (short)3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index ccc6be33c9..d675dcda98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -82,7 +82,7 @@
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
@@ -863,7 +863,7 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception {
savedSd = sd;
}
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(Storage.class));
try {
// try to lock the storage that's already locked
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 73aee349da..771caefd20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -49,7 +49,7 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index c68ad18570..17803a0786 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -83,7 +83,6 @@
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
@@ -91,6 +90,9 @@
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -1715,13 +1717,36 @@ public void testResetThreadLocalCachedOps() throws IOException {
}
}
+ class TestAppender extends AppenderSkeleton {
+ private final List log = new ArrayList<>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ public List getLog() {
+ return new ArrayList<>(log);
+ }
+ }
+
/**
*
* @throws Exception
*/
@Test
public void testReadActivelyUpdatedLog() throws Exception {
- final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final TestAppender appender = new TestAppender();
+ LogManager.getRootLogger().addAppender(appender);
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
@@ -1769,16 +1794,21 @@ public void testReadActivelyUpdatedLog() throws Exception {
rwf.close();
events.poll();
- for (String logLine : logCapturer.getOutput().split("\n")) {
- if (logLine != null && logLine.contains("Caught exception after reading")) {
+ String pattern = "Caught exception after reading (.*) ops";
+ Pattern r = Pattern.compile(pattern);
+ final List log = appender.getLog();
+ for (LoggingEvent event : log) {
+ Matcher m = r.matcher(event.getRenderedMessage());
+ if (m.find()) {
fail("Should not try to read past latest syned edit log op");
}
}
+
} finally {
if (cluster != null) {
cluster.shutdown();
}
- logCapturer.stopCapturing();
+ LogManager.getRootLogger().removeAppender(appender);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
index fb484cd3ea..3b15c2db7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
@@ -26,8 +26,6 @@
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
import org.junit.Assert;
import org.junit.Test;
@@ -120,8 +118,8 @@ public void testDumpEdits() throws IOException {
op3.setTransactionId(3);
buffer.writeOp(op3, fakeLogVersion);
- LogCapturer logs =
- LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
+ GenericTestUtils.LogCapturer logs =
+ GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG);
try {
buffer.close();
fail();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 860e6b0b25..89193ca663 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -64,7 +64,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.FakeTimer;
import org.slf4j.event.Level;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
index afb049156e..f0ae181016 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.util.FakeTimer;
import org.apache.hadoop.util.Time;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
index 08c9240f26..9c77f9d92b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java
@@ -29,8 +29,6 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -60,7 +58,7 @@ private interface Procedure {
private MiniDFSCluster cluster;
private FileSystem fs;
private UserGroupInformation userGroupInfo;
- private LogCapturer logs;
+ private GenericTestUtils.LogCapturer logs;
@Before
public void setUp() throws Exception {
@@ -78,7 +76,7 @@ public void setUp() throws Exception {
userGroupInfo = UserGroupInformation.createUserForTesting("bob",
new String[] {"hadoop"});
- logs = LogCapturer.captureLogs(FSNamesystem.LOG);
+ logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG);
GenericTestUtils
.setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()),
org.slf4j.event.Level.INFO);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 96650a4d5e..a312b03168 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -115,7 +115,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.util.ToolRunner;
import org.junit.After;
import org.junit.AfterClass;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index 651d4f31c9..464fdfcd6c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -18,13 +18,15 @@
package org.apache.hadoop.hdfs.server.namenode;
+import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
import org.junit.Rule;
import org.junit.Test;
@@ -32,6 +34,7 @@
import java.io.IOException;
import java.util.Collections;
+import java.util.List;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
@@ -61,12 +64,12 @@ public void testDisableMetricsLogger() throws IOException {
}
@Test
- @SuppressWarnings("unchecked")
public void testMetricsLoggerIsAsync() throws IOException {
makeNameNode(true);
org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME);
- assertTrue(Collections.list(logger.getAllAppenders()).get(0)
- instanceof org.apache.log4j.AsyncAppender);
+ @SuppressWarnings("unchecked")
+ List appenders = Collections.list(logger.getAllAppenders());
+ assertTrue(appenders.get(0) instanceof AsyncAppender);
}
/**
@@ -77,14 +80,20 @@ public void testMetricsLoggerIsAsync() throws IOException {
public void testMetricsLogOutput()
throws IOException, InterruptedException, TimeoutException {
TestFakeMetric metricsProvider = new TestFakeMetric();
- MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider);
+ MBeans.register(this.getClass().getSimpleName(),
+ "DummyMetrics", metricsProvider);
makeNameNode(true); // Log metrics early and often.
- LogCapturer logCapturer =
- LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME));
+ final PatternMatchingAppender appender =
+ (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME)
+ .getAppender("PATTERNMATCHERAPPENDER");
- GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"),
- 1000, 60000);
- logCapturer.stopCapturing();
+ // Ensure that the supplied pattern was matched.
+ GenericTestUtils.waitFor(new Supplier() {
+ @Override
+ public Boolean get() {
+ return appender.isMatched();
+ }
+ }, 1000, 60000);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
index 8750154077..073ee37781 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
@@ -28,8 +28,7 @@
import org.junit.Test;
import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
public class TestNameNodeResourcePolicy {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 7ea0b24f2b..67c8f3c18f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -68,12 +69,12 @@
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -523,8 +524,10 @@ private void testImageChecksum(boolean compress) throws Exception {
// Corrupt the md5 files in all the namedirs
corruptFSImageMD5(true);
- // Attach our own log appender so we can verify output
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ // Attach our own log appender so we can verify output
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
// Try to start a new cluster
LOG.info("\n===========================================\n" +
@@ -538,13 +541,10 @@ private void testImageChecksum(boolean compress) throws Exception {
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Failed to load FSImage file", ioe);
-
- int md5failures =
- org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(),
- " is corrupt with MD5 checksum of ");
+ int md5failures = appender.countExceptionsWithMessage(
+ " is corrupt with MD5 checksum of ");
// Two namedirs, so should have seen two failures
assertEquals(2, md5failures);
- logCapturer.stopCapturing();
}
} finally {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
index 7376237a4c..0e83bec11f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
@@ -43,7 +43,7 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -197,7 +197,7 @@ public void testSharedEditsMissingLogs() throws Exception {
// Trying to bootstrap standby should now fail since the edit
// logs aren't available in the shared dir.
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(BootstrapStandby.class));
try {
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 6fa979d039..168273117b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -44,7 +44,6 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.Whitebox;
import org.junit.After;
import org.junit.Before;
@@ -144,7 +143,7 @@ public void testObserverReadProxyProviderWithDT() throws Exception {
() -> (DistributedFileSystem) FileSystem.get(conf));
GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG);
- LogCapturer logCapture = LogCapturer
+ GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer
.captureLogs(ObserverReadProxyProvider.LOG);
try {
dfs.access(new Path("/"), FsAction.READ);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
index 3dbadcaaf0..513f60cb1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.common.Util;
@@ -47,12 +48,12 @@
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.ipc.StandbyException;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ThreadUtil;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -298,38 +299,39 @@ public void testStandbyAndObserverState() throws Exception {
@Test(timeout = 30000)
public void testCheckpointBeforeNameNodeInitializationIsComplete()
throws Exception {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ final LogVerificationAppender appender = new LogVerificationAppender();
+ final org.apache.log4j.Logger logger = org.apache.log4j.Logger
+ .getRootLogger();
+ logger.addAppender(appender);
- try {
- // Transition 2 to observer
- cluster.transitionToObserver(2);
- doEdits(0, 10);
- // After a rollEditLog, Standby(nn1)'s next checkpoint would be
- // ahead of observer(nn2).
- nns[0].getRpcServer().rollEditLog();
+ // Transition 2 to observer
+ cluster.transitionToObserver(2);
+ doEdits(0, 10);
+ // After a rollEditLog, Standby(nn1)'s next checkpoint would be
+ // ahead of observer(nn2).
+ nns[0].getRpcServer().rollEditLog();
- NameNode nn2 = nns[2];
- FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
+ NameNode nn2 = nns[2];
+ FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
- // After standby creating a checkpoint, it will try to push the image to
- // active and all observer, updating it's own txid to the most recent.
- HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
- HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
+ // After standby creating a checkpoint, it will try to push the image to
+ // active and all observer, updating it's own txid to the most recent.
+ HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
+ HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
- NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
- cluster.transitionToStandby(2);
+ NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
+ cluster.transitionToStandby(2);
+ logger.removeAppender(appender);
- for (String logLine : logCapturer.getOutput().split("\n")) {
- if (logLine != null && logLine.contains("PutImage failed") && logLine.contains(
- "FSImage has not been set in the NameNode.")) {
- //Logs have the expected exception.
- return;
- }
+ for (LoggingEvent event : appender.getLog()) {
+ String message = event.getRenderedMessage();
+ if (message.contains("PutImage failed") &&
+ message.contains("FSImage has not been set in the NameNode.")) {
+ //Logs have the expected exception.
+ return;
}
- fail("Expected exception not present in logs.");
- } finally {
- logCapturer.stopCapturing();
}
+ fail("Expected exception not present in logs.");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 3741bbf015..58d72f14d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -93,7 +93,7 @@
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.ExitUtil;
import org.junit.After;
@@ -1372,7 +1372,7 @@ public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception {
Path filePath = new Path("/zeroSizeFile");
DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0);
fs.setReplication(filePath, (short) 3);
- LogCapturer logs = LogCapturer.captureLogs(
+ LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class));
fs.setStoragePolicy(filePath, "COLD");
fs.satisfyStoragePolicy(filePath);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index b739b25f35..368deef402 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -22,6 +22,9 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+# Only to be used for testing
+log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender
+
#
# NameNode metrics logging.
# The default is to retain two namenode-metrics.log files up to 64MB each.
@@ -29,10 +32,10 @@ log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%
# TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
# log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA
+namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER
log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB
log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log
@@ -45,10 +48,10 @@ log4j.appender.ASYNCNNMETRICSRFA.maxBackupIndex=1
# TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as
# log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref
-datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA
+datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER
log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
log4j.additivity.DataNodeMetricsLog=false
-log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n
log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB
log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log
@@ -69,7 +72,7 @@ hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender
+log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender
log4j.appender.ASYNCAUDITAPPENDER.blocking=false
log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256
log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
index 142c1ab31d..e3b3511c0c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
@@ -124,12 +124,6 @@
assertj-core
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index cb5f3edd05..15682eeefc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -36,10 +36,9 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
-
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent;
import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
import org.junit.After;
@@ -108,10 +107,12 @@
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -127,6 +128,29 @@ public FileStatus getFileStatus(Path f) throws IOException {
}
}
+ private static class TestAppender extends AppenderSkeleton {
+
+ private final List logEvents = new CopyOnWriteArrayList<>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ protected void append(LoggingEvent arg0) {
+ logEvents.add(arg0);
+ }
+
+ private List getLogEvents() {
+ return logEvents;
+ }
+ }
+
@BeforeClass
public static void setupBeforeClass() {
ResourceUtils.resetResourceTypes(new Configuration());
@@ -1700,10 +1724,11 @@ public void testReducerMemoryRequestOverriding() {
for (String memoryName : ImmutableList.of(
MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
- final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
- LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+ TestAppender testAppender = new TestAppender();
+ final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
try {
TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear();
+ logger.addAppender(testAppender);
EventHandler eventHandler = mock(EventHandler.class);
Clock clock = SystemClock.getInstance();
JobConf jobConf = new JobConf();
@@ -1716,11 +1741,13 @@ public void testReducerMemoryRequestOverriding() {
getResourceInfoFromContainerRequest(taImpl, eventHandler).
getMemorySize();
assertEquals(3072, memorySize);
- assertTrue(logCapturer.getOutput().contains(
- "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is "
- + "overriding the mapreduce.reduce.memory.mb=2048 configuration"));
+ assertTrue(testAppender.getLogEvents().stream()
+ .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " +
+ "mapreduce.reduce.resource." + memoryName + "=3Gi is " +
+ "overriding the mapreduce.reduce.memory.mb=2048 configuration")
+ .equals(e.getMessage())));
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(testAppender);
}
}
}
@@ -1782,9 +1809,10 @@ public void testReducerCpuRequestDefaultMemory() {
@Test
public void testReducerCpuRequestOverriding() {
- final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class);
- final LogCapturer logCapturer = LogCapturer.captureLogs(logger);
+ TestAppender testAppender = new TestAppender();
+ final Logger logger = Logger.getLogger(TaskAttemptImpl.class);
try {
+ logger.addAppender(testAppender);
EventHandler eventHandler = mock(EventHandler.class);
Clock clock = SystemClock.getInstance();
JobConf jobConf = new JobConf();
@@ -1797,11 +1825,13 @@ public void testReducerCpuRequestOverriding() {
getResourceInfoFromContainerRequest(taImpl, eventHandler).
getVirtualCores();
assertEquals(7, vCores);
- assertTrue(logCapturer.getOutput().contains(
- "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the "
- + "mapreduce.reduce.cpu.vcores=9 configuration"));
+ assertTrue(testAppender.getLogEvents().stream().anyMatch(
+ e -> e.getLevel() == Level.WARN && ("Configuration " +
+ "mapreduce.reduce.resource.vcores=7 is overriding the " +
+ "mapreduce.reduce.cpu.vcores=9 configuration").equals(
+ e.getMessage())));
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(testAppender);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index d124c97e9d..7530428d75 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -72,12 +72,6 @@
assertj-core
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
index 43ab170160..a0223dedd6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
@@ -23,10 +23,12 @@
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
+import java.io.Flushable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
+import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@@ -42,13 +44,16 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SecureIOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.util.ProcessTree;
import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.log4j.Appender;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
@@ -271,7 +276,42 @@ public static synchronized void syncLogsShutdown(
}
// flush & close all appenders
- HadoopLoggerUtils.shutdownLogManager();
+ LogManager.shutdown();
+ }
+
+ @SuppressWarnings("unchecked")
+ public static synchronized void syncLogs() {
+ // flush standard streams
+ //
+ System.out.flush();
+ System.err.flush();
+
+ // flush flushable appenders
+ //
+ final Logger rootLogger = Logger.getRootLogger();
+ flushAppenders(rootLogger);
+ final Enumeration allLoggers = rootLogger.getLoggerRepository().
+ getCurrentLoggers();
+ while (allLoggers.hasMoreElements()) {
+ final Logger l = allLoggers.nextElement();
+ flushAppenders(l);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void flushAppenders(Logger l) {
+ final Enumeration allAppenders = l.getAllAppenders();
+ while (allAppenders.hasMoreElements()) {
+ final Appender a = allAppenders.nextElement();
+ if (a instanceof Flushable) {
+ try {
+ ((Flushable) a).flush();
+ } catch (IOException ioe) {
+ System.err.println(a + ": Failed to flush!"
+ + StringUtils.stringifyException(ioe));
+ }
+ }
+ }
}
public static ScheduledExecutorService createLogSyncer() {
@@ -296,7 +336,7 @@ public void run() {
new Runnable() {
@Override
public void run() {
- HadoopLoggerUtils.syncLogs();
+ TaskLog.syncLogs();
}
}, 0L, 5L, TimeUnit.SECONDS);
return scheduler;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
index f83835f538..e91b4c1e85 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java
@@ -28,19 +28,24 @@
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.LineNumberReader;
+import java.io.StringReader;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapred.TaskReport;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.WriterAppender;
import org.mockito.stubbing.Answer;
-import org.slf4j.LoggerFactory;
/**
* Test to make sure that command line output for
@@ -68,53 +73,55 @@ public void setUp() throws IOException {
@Test
public void testJobMonitorAndPrint() throws Exception {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class));
- try {
- JobStatus jobStatus_1 =
- new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING,
- JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url",
- true);
- JobStatus jobStatus_2 =
- new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH,
- "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true);
+ JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
+ 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
+ "tmp-queue", "tmp-jobfile", "tmp-url", true);
+ JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
+ 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
+ "tmp-queue", "tmp-jobfile", "tmp-url", true);
- doAnswer((Answer) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when(
- job).getTaskCompletionEvents(anyInt(), anyInt());
+ doAnswer((Answer) invocation ->
+ TaskCompletionEvent.EMPTY_ARRAY).when(job)
+ .getTaskCompletionEvents(anyInt(), anyInt());
- doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
- when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
+ doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
+ when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
+ // setup the logger to capture all logs
+ Layout layout =
+ Logger.getRootLogger().getAppender("stdout").getLayout();
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ WriterAppender appender = new WriterAppender(layout, os);
+ appender.setThreshold(Level.ALL);
+ Logger qlogger = Logger.getLogger(Job.class);
+ qlogger.addAppender(appender);
- job.monitorAndPrintJob();
+ job.monitorAndPrintJob();
- boolean foundHundred = false;
- boolean foundComplete = false;
- boolean foundUber = false;
- String uberModeMatch = "uber mode : true";
- String progressMatch = "map 100% reduce 100%";
- String completionMatch = "completed successfully";
- for (String logLine : logCapturer.getOutput().split("\n")) {
- if (logLine.contains(uberModeMatch)) {
- foundUber = true;
- }
- if (logLine.contains(progressMatch)) {
- foundHundred = true;
- }
- if (logLine.contains(completionMatch)) {
- foundComplete = true;
- }
- if (foundUber && foundHundred && foundComplete) {
- break;
- }
+ qlogger.removeAppender(appender);
+ LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
+ String line;
+ boolean foundHundred = false;
+ boolean foundComplete = false;
+ boolean foundUber = false;
+ String uberModeMatch = "uber mode : true";
+ String progressMatch = "map 100% reduce 100%";
+ String completionMatch = "completed successfully";
+ while ((line = r.readLine()) != null) {
+ if (line.contains(uberModeMatch)) {
+ foundUber = true;
}
- assertTrue(foundUber);
- assertTrue(foundHundred);
- assertTrue(foundComplete);
-
- System.out.println("The output of job.toString() is : \n" + job.toString());
- assertTrue(job.toString().contains("Number of maps: 5\n"));
- assertTrue(job.toString().contains("Number of reduces: 5\n"));
- } finally {
- logCapturer.stopCapturing();
+ foundHundred = line.contains(progressMatch);
+ if (foundHundred)
+ break;
}
+ line = r.readLine();
+ foundComplete = line.contains(completionMatch);
+ assertTrue(foundUber);
+ assertTrue(foundHundred);
+ assertTrue(foundComplete);
+
+ System.out.println("The output of job.toString() is : \n" + job.toString());
+ assertTrue(job.toString().contains("Number of maps: 5\n"));
+ assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 632e972d5a..17358a37da 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -128,12 +128,6 @@
assertj-core
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
index 063f185d3d..0bdc721217 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
@@ -34,6 +34,7 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -44,6 +45,7 @@
import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
@@ -53,7 +55,6 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobPriority;
import org.apache.hadoop.mapreduce.JobStatus.State;
@@ -109,6 +110,13 @@
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Level;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -136,6 +144,29 @@ public class TestYARNRunner {
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%"));
private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource";
+ private static class TestAppender extends AppenderSkeleton {
+
+ private final List logEvents = new CopyOnWriteArrayList<>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ protected void append(LoggingEvent arg0) {
+ logEvents.add(arg0);
+ }
+
+ private List getLogEvents() {
+ return logEvents;
+ }
+ }
+
private YARNRunner yarnRunner;
private ResourceMgrDelegate resourceMgrDelegate;
private YarnConfiguration conf;
@@ -518,48 +549,38 @@ public void testAMAdminCommandOpts() throws Exception {
assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex);
}
}
-
@Test(timeout=20000)
public void testWarnCommandOpts() throws Exception {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
- try {
- JobConf jobConf = new JobConf();
-
- jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
- "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
- jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
-
- YARNRunner yarnRunner = new YARNRunner(jobConf);
-
- @SuppressWarnings("unused")
- ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf);
-
- boolean isFoundOne = false;
- boolean isFoundTwo = false;
- for (String logLine : logCapturer.getOutput().split("\n")) {
- if (logLine == null) {
- continue;
- }
- if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
- + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no "
- + "longer function if hadoop native libraries are used. These values "
- + "should be set as part of the LD_LIBRARY_PATH in the app master JVM "
- + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) {
- isFoundOne = true;
- }
- if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in "
- + "yarn.app.mapreduce.am.command-opts can cause programs to no longer "
- + "function if hadoop native libraries are used. These values should "
- + "be set as part of the LD_LIBRARY_PATH in the app master JVM env "
- + "using yarn.app.mapreduce.am.env config settings.")) {
- isFoundTwo = true;
- }
- }
- assertTrue(isFoundOne);
- assertTrue(isFoundTwo);
- } finally {
- logCapturer.stopCapturing();
- }
+ org.apache.log4j.Logger logger =
+ org.apache.log4j.Logger.getLogger(YARNRunner.class);
+
+ ByteArrayOutputStream bout = new ByteArrayOutputStream();
+ Layout layout = new SimpleLayout();
+ Appender appender = new WriterAppender(layout, bout);
+ logger.addAppender(appender);
+
+ JobConf jobConf = new JobConf();
+
+ jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo");
+ jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar");
+
+ YARNRunner yarnRunner = new YARNRunner(jobConf);
+
+ @SuppressWarnings("unused")
+ ApplicationSubmissionContext submissionContext =
+ buildSubmitContext(yarnRunner, jobConf);
+
+ String logMsg = bout.toString();
+ assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
+ "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " +
+ "longer function if hadoop native libraries are used. These values " +
+ "should be set as part of the LD_LIBRARY_PATH in the app master JVM " +
+ "env using yarn.app.mapreduce.am.admin.user.env config settings."));
+ assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " +
+ "yarn.app.mapreduce.am.command-opts can cause programs to no longer " +
+ "function if hadoop native libraries are used. These values should " +
+ "be set as part of the LD_LIBRARY_PATH in the app master JVM env " +
+ "using yarn.app.mapreduce.am.env config settings."));
}
@Test(timeout=20000)
@@ -975,7 +996,10 @@ public void testAMRMemoryRequestOverriding() throws Exception {
for (String memoryName : ImmutableList.of(
MRJobConfig.RESOURCE_TYPE_NAME_MEMORY,
MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class));
+ TestAppender testAppender = new TestAppender();
+ org.apache.log4j.Logger logger =
+ org.apache.log4j.Logger.getLogger(YARNRunner.class);
+ logger.addAppender(testAppender);
try {
JobConf jobConf = new JobConf();
jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi");
@@ -993,17 +1017,13 @@ public void testAMRMemoryRequestOverriding() throws Exception {
long memorySize = resourceRequest.getCapability().getMemorySize();
Assert.assertEquals(3072, memorySize);
- boolean isLogFound = false;
- for (String logLine : logCapturer.getOutput().split("\n")) {
- if (logLine != null && logLine.contains("WARN") && logLine.contains(
- "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is "
- + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) {
- isLogFound = true;
- }
- }
- assertTrue("Log line could not be found", isLogFound);
+ assertTrue(testAppender.getLogEvents().stream().anyMatch(
+ e -> e.getLevel() == Level.WARN && ("Configuration " +
+ "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " +
+ "overriding the yarn.app.mapreduce.am.resource.mb=2048 " +
+ "configuration").equals(e.getMessage())));
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(testAppender);
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
index cc93e5629d..338f1172b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java
@@ -29,6 +29,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.HadoopTestCase;
import org.apache.hadoop.mapred.JobConf;
+import org.apache.log4j.Level;
+import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
@@ -74,10 +76,12 @@ protected void setup(Context context) throws IOException,
mapJavaOpts,
mapJavaOpts, MAP_OPTS_VAL);
}
-
- String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO");
- assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
- "OFF");
+
+ Level logLevel =
+ Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL,
+ Level.INFO.toString()));
+ assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " +
+ logLevel, logLevel, Level.OFF);
}
}
@@ -104,10 +108,12 @@ protected void setup(Context context)
reduceJavaOpts,
reduceJavaOpts, REDUCE_OPTS_VAL);
}
-
- String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO");
- assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel,
- "OFF");
+
+ Level logLevel =
+ Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL,
+ Level.INFO.toString()));
+ assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " +
+ logLevel, logLevel, Level.OFF);
}
}
@@ -121,9 +127,9 @@ private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds,
conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL);
conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL);
}
-
- conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF");
- conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF");
+
+ conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString());
+ conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString());
Job job = MapReduceTestUtil.createJob(conf, inDir, outDir,
numMaps, numReds);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
index d1fc8c04aa..9e58d460d1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
@@ -25,7 +25,6 @@
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Assert;
@@ -51,6 +50,8 @@
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -63,7 +64,8 @@ public class TestJHSSecurity {
@Test
public void testDelegationToken() throws Exception {
- HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+ org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
+ rootLogger.setLevel(Level.DEBUG);
final YarnConfiguration conf = new YarnConfiguration(new JobConf());
// Just a random principle
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
index f653ce7c0c..43d3abe4f8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java
@@ -99,6 +99,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -556,9 +557,9 @@ private void testJobClassloader(boolean useCustomClasses) throws IOException,
systemClasses);
}
sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB);
- sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
- sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
- sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL");
+ sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
+ sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
+ sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString());
sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class");
final SleepJob sleepJob = new SleepJob();
sleepJob.setConf(sleepConf);
@@ -855,11 +856,11 @@ public void testContainerRollingLog() throws IOException,
final SleepJob sleepJob = new SleepJob();
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
- sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL");
+ sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
final long userLogKb = 4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
- sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL");
+ sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
final long amLogKb = 7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3ebab5a30b..c4dfd2f9d7 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1944,18 +1944,6 @@
log4j-web
${log4j2.version}
-
- org.apache.hadoop
- hadoop-logging
- ${hadoop.version}
-
-
- org.apache.hadoop
- hadoop-logging
- ${hadoop.version}
- test
- test-jar
-
diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml
index 373b5a07df..e8c5fb78ef 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -349,12 +349,7 @@
hamcrest-library
test
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
+
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
index 2a124c1c99..1e7330fbd0 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java
@@ -32,7 +32,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
index 6acab8fe2a..476d7a4f01 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.Test;
import org.slf4j.Logger;
diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml
index 06c2e192f0..5194e51d81 100644
--- a/hadoop-tools/hadoop-distcp/pom.xml
+++ b/hadoop-tools/hadoop-distcp/pom.xml
@@ -81,12 +81,6 @@
hadoop-hdfs-client
provided
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
org.apache.hadoop
hadoop-hdfs
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
index d54fbaa86f..aa42cb968d 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
@@ -41,7 +41,6 @@
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.tools.CopyListingFileStatus;
import org.apache.hadoop.tools.DistCp;
import org.apache.hadoop.tools.DistCpConstants;
@@ -702,8 +701,8 @@ public void testDistCpWithIterator() throws Exception {
GenericTestUtils
.createFiles(remoteFS, source, getDepth(), getWidth(), getWidth());
- LogCapturer log =
- LogCapturer.captureLogs(SimpleCopyListing.LOG);
+ GenericTestUtils.LogCapturer log =
+ GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG);
String options = "-useiterator -update -delete" + getDefaultCLIOptions();
DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(),
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
index 661573f9d8..02fd48a071 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
@@ -27,10 +27,11 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.tools.rumen.datatypes.*;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
/**
* A default parser for MapReduce job configuration properties.
@@ -82,7 +83,7 @@ public class MapReduceJobPropertiesParser implements JobPropertyParser {
// turn off the warning w.r.t deprecated mapreduce keys
static {
- HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF");
+ Logger.getLogger(Configuration.class).setLevel(Level.OFF);
}
// Accepts a key if there is a corresponding key in the current mapreduce
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index d901513f2c..81e888472d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -576,6 +576,16 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index b41923ef9d..a15c78e426 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -63,7 +63,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@@ -127,6 +126,7 @@
import org.apache.hadoop.yarn.util.TimelineServiceHelper;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import org.apache.log4j.LogManager;
import org.apache.hadoop.classification.VisibleForTesting;
import com.sun.jersey.api.client.ClientHandlerException;
@@ -403,7 +403,7 @@ public static void main(String[] args) {
result = appMaster.finish();
} catch (Throwable t) {
LOG.error("Error running ApplicationMaster", t);
- HadoopLoggerUtils.shutdownLogManager();
+ LogManager.shutdown();
ExitUtil.terminate(1, t);
} finally {
if (appMaster != null) {
@@ -529,7 +529,7 @@ public boolean init(String[] args) throws ParseException, IOException {
//Check whether customer log4j.properties file exists
if (fileExist(log4jPath)) {
try {
- HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class,
+ Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class,
log4jPath);
} catch (Exception e) {
LOG.warn("Can not set up custom log4j properties. " + e);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index dc23682f1a..098f3981cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -52,7 +52,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -452,7 +451,7 @@ public boolean init(String[] args) throws ParseException {
if (cliParser.hasOption("log_properties")) {
String log4jPath = cliParser.getOptionValue("log_properties");
try {
- HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath);
+ Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath);
} catch (Exception e) {
LOG.warn("Can not set up custom log4j properties. " + e);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
new file mode 100644
index 0000000000..0301a6880f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.applications.distributedshell;
+
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
+
+public class Log4jPropertyHelper {
+
+ public static void updateLog4jConfiguration(Class> targetClass,
+ String log4jPath) throws Exception {
+ Properties customProperties = new Properties();
+ try (
+ FileInputStream fs = new FileInputStream(log4jPath);
+ InputStream is = targetClass.getResourceAsStream("/log4j.properties")) {
+ customProperties.load(fs);
+ Properties originalProperties = new Properties();
+ originalProperties.load(is);
+ for (Entry
-
- org.apache.hadoop
- hadoop-logging
- test
- test-jar
-
-
org.apache.hadoop
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 80cc9fc8fd..dc69eba2bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.yarn.api.records.SchedulingRequest;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.log4j.Logger;
import java.util.Collections;
import java.util.HashMap;
@@ -41,9 +42,6 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.LongBinaryOperator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* In-memory mapping between applications/container-tags and nodes/racks.
* Required by constrained affinity/anti-affinity and cardinality placement.
@@ -52,7 +50,8 @@
@InterfaceStability.Unstable
public class AllocationTagsManager {
- private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class);
+ private static final Logger LOG = Logger.getLogger(
+ AllocationTagsManager.class);
private ReentrantReadWriteLock.ReadLock readLock;
private ReentrantReadWriteLock.WriteLock writeLock;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
index 15e2d34b00..c17d4f6d7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
-import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender;
+import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 12b017a921..9a85315628 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -30,7 +30,6 @@
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -232,8 +231,8 @@ public void testFederationStateStoreServiceInitialHeartbeatDelay() throws Except
conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10);
conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId());
- LogCapturer logCapture =
- LogCapturer.captureLogs(FederationStateStoreService.LOG);
+ GenericTestUtils.LogCapturer logCapture =
+ GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG);
final MockRM rm = new MockRM(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index dc2d18d552..a1989d5c0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -28,13 +28,17 @@
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import org.apache.hadoop.logging.LogCapturer;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@@ -79,7 +83,6 @@
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.slf4j.LoggerFactory;
public class TestSystemMetricsPublisherForV2 {
@@ -298,15 +301,42 @@ public void testPublishContainerMetrics() throws Exception {
@Test(timeout = 10000)
public void testPutEntityWhenNoCollector() throws Exception {
// Validating the logs as DrainDispatcher won't throw exception
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ class TestAppender extends AppenderSkeleton {
+ private final List log = new ArrayList<>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(final LoggingEvent loggingEvent) {
+ log.add(loggingEvent);
+ }
+
+ @Override
+ public void close() {
+ }
+
+ public List getLog() {
+ return new ArrayList<>(log);
+ }
+ }
+
+ TestAppender appender = new TestAppender();
+ final Logger logger = Logger.getRootLogger();
+ logger.addAppender(appender);
+
try {
RMApp app = createRMApp(ApplicationId.newInstance(0, 1));
metricsPublisher.appCreated(app, app.getStartTime());
dispatcher.await();
- assertFalse("Dispatcher Crashed",
- logCapturer.getOutput().contains("Error in dispatcher thread"));
+ for (LoggingEvent event : appender.getLog()) {
+ assertFalse("Dispatcher Crashed",
+ event.getRenderedMessage().contains("Error in dispatcher thread"));
+ }
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(appender);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
index 07630f5461..2e7b01ed50 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
@@ -18,11 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
import org.junit.Test;
import java.io.IOException;
import java.util.Map;
@@ -156,7 +157,7 @@ public void testPreemptionToBalanceUsedPlusPendingLessThanGuaranteed()
@Test
public void testPreemptionToBalanceWithVcoreResource() throws IOException {
- HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+ Logger.getRootLogger().setLevel(Level.DEBUG);
String labelsConfig = "=100:100,true"; // default partition
String nodesConfig = "n1="; // only one node
String queuesConfig =
@@ -194,7 +195,7 @@ public void testPreemptionToBalanceWithVcoreResource() throws IOException {
@Test
public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException {
- HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+ Logger.getRootLogger().setLevel(Level.DEBUG);
String labelsConfig = "=100:100,true"; // default partition
String nodesConfig = "n1="; // only one node
String queuesConfig =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
index c6066fd208..024ec86f7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -16,7 +16,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework;
-import org.apache.hadoop.logging.HadoopLoggerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions;
import org.slf4j.Logger;
@@ -111,7 +110,8 @@ private void resetResourceInformationMap() {
public void setup() {
resetResourceInformationMap();
- HadoopLoggerUtils.setLogLevel("root", "DEBUG");
+ org.apache.log4j.Logger.getRootLogger().setLevel(
+ org.apache.log4j.Level.DEBUG);
conf = new CapacitySchedulerConfiguration(new Configuration(false));
conf.setLong(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
index c5add68f8e..6aaa15f3e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java
@@ -25,10 +25,9 @@
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import static junit.framework.TestCase.fail;
@@ -38,7 +37,8 @@
* the invariant throws in case the invariants are not respected.
*/
public class TestMetricsInvariantChecker {
- public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class);
+ public final static Logger LOG =
+ Logger.getLogger(TestMetricsInvariantChecker.class);
private MetricsSystem metricsSystem;
private MetricsInvariantChecker ic;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 68bbc94f97..38fbcd8415 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -19,7 +19,6 @@
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.logging.LogCapturer;
import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
@@ -30,13 +29,19 @@
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
-import org.slf4j.LoggerFactory;
import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue;
import static org.junit.Assert.assertEquals;
@@ -49,6 +54,29 @@ public class TestFairSchedulerConfiguration {
private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
+ private static class TestAppender extends AppenderSkeleton {
+
+ private final List logEvents = new CopyOnWriteArrayList<>();
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ protected void append(LoggingEvent arg0) {
+ logEvents.add(arg0);
+ }
+
+ private List getLogEvents() {
+ return logEvents;
+ }
+ }
+
@Rule
public ExpectedException exception = ExpectedException.none();
@@ -723,7 +751,9 @@ private void initResourceTypes() {
@Test
public void testMemoryIncrementConfiguredViaMultipleProperties() {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender testAppender = new TestAppender();
+ Logger logger = LogManager.getRootLogger();
+ logger.addAppender(testAppender);
try {
Configuration conf = new Configuration();
conf.set("yarn.scheduler.increment-allocation-mb", "7");
@@ -733,19 +763,23 @@ public void testMemoryIncrementConfiguredViaMultipleProperties() {
FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
Resource increment = fsc.getIncrementAllocation();
Assert.assertEquals(13L, increment.getMemorySize());
- assertTrue("Warning message is not logged when specifying memory "
- + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
- + "yarn.resource-types.memory-mb.increment-allocation=13 is "
- + "overriding the yarn.scheduler.increment-allocation-mb=7 "
- + "property"));
+ assertTrue("Warning message is not logged when specifying memory " +
+ "increment via multiple properties",
+ testAppender.getLogEvents().stream().anyMatch(
+ e -> e.getLevel() == Level.WARN && ("Configuration " +
+ "yarn.resource-types.memory-mb.increment-allocation=13 is " +
+ "overriding the yarn.scheduler.increment-allocation-mb=7 " +
+ "property").equals(e.getMessage())));
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(testAppender);
}
}
@Test
public void testCpuIncrementConfiguredViaMultipleProperties() {
- LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root"));
+ TestAppender testAppender = new TestAppender();
+ Logger logger = LogManager.getRootLogger();
+ logger.addAppender(testAppender);
try {
Configuration conf = new Configuration();
conf.set("yarn.scheduler.increment-allocation-vcores", "7");
@@ -755,13 +789,15 @@ public void testCpuIncrementConfiguredViaMultipleProperties() {
FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
Resource increment = fsc.getIncrementAllocation();
Assert.assertEquals(13, increment.getVirtualCores());
- assertTrue("Warning message is not logged when specifying CPU vCores "
- + "increment via multiple properties", logCapturer.getOutput().contains("Configuration "
- + "yarn.resource-types.vcores.increment-allocation=13 is "
- + "overriding the yarn.scheduler.increment-allocation-vcores=7 "
- + "property"));
+ assertTrue("Warning message is not logged when specifying CPU vCores " +
+ "increment via multiple properties",
+ testAppender.getLogEvents().stream().anyMatch(
+ e -> e.getLevel() == Level.WARN && ("Configuration " +
+ "yarn.resource-types.vcores.increment-allocation=13 is " +
+ "overriding the yarn.scheduler.increment-allocation-vcores=7 " +
+ "property").equals(e.getMessage())));
} finally {
- logCapturer.stopCapturing();
+ logger.removeAppender(testAppender);
}
}
}