From 03a499821c9676da0896ca864074dfb8fbdefd6e Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Fri, 2 Jun 2023 18:07:34 -0700 Subject: [PATCH] HADOOP-18207. Introduce hadoop-logging module (#5503) Reviewed-by: Duo Zhang --- .../hadoop-auth-examples/pom.xml | 10 -- hadoop-common-project/hadoop-auth/pom.xml | 12 +- .../util/TestRandomSignerSecretProvider.java | 8 +- .../util/TestZKSignerSecretProvider.java | 8 +- hadoop-common-project/hadoop-common/pom.xml | 10 ++ .../src/main/conf/log4j.properties | 2 +- .../java/org/apache/hadoop/log/LogLevel.java | 21 ++- .../org/apache/hadoop/util/StringUtils.java | 4 +- .../apache/hadoop/conf/TestConfiguration.java | 91 +++-------- .../io/compress/CompressDecompressTester.java | 14 +- .../org/apache/hadoop/log/TestLogLevel.java | 19 ++- .../impl/TestMetricsSourceAdapter.java | 11 +- .../TestShellBasedUnixGroupsMapping.java | 6 +- .../ssl/TestReloadingX509KeyManager.java | 4 +- .../ssl/TestReloadingX509TrustManager.java | 2 +- .../hadoop/service/TestServiceOperations.java | 4 +- .../apache/hadoop/test/GenericTestUtils.java | 91 +---------- .../hadoop/test/TestGenericTestUtils.java | 2 + .../org/apache/hadoop/util/TestClassUtil.java | 4 +- .../hadoop/util/TestReflectionUtils.java | 2 +- .../util/bloom/BloomFilterCommonTester.java | 6 +- hadoop-common-project/hadoop-kms/pom.xml | 6 + .../hadoop/crypto/key/kms/server/TestKMS.java | 5 +- .../crypto/key/kms/server/TestKMSAudit.java | 24 +-- .../dev-support/findbugsExcludeFile.xml | 23 +++ hadoop-common-project/hadoop-logging/pom.xml | 125 +++++++++++++++ .../logging/HadoopInternalLog4jUtils.java | 145 ++++++++++++++++++ .../hadoop/logging/HadoopLoggerUtils.java | 142 +++++++++++++++++ .../logging/appenders}/AsyncRFAAppender.java | 2 +- .../Log4jWarningErrorMetricsAppender.java | 49 +++--- .../apache/hadoop/logging/LogCapturer.java | 65 ++++++++ .../hadoop/logging/test/TestSyncLogs.java | 37 +++++ .../src/test/resources/log4j.properties | 18 +++ hadoop-common-project/hadoop-minikdc/pom.xml | 5 - hadoop-common-project/pom.xml | 1 + .../hadoop-hdfs-client/pom.xml | 6 + .../hdfs/web/TestURLConnectionFactory.java | 5 +- hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml | 6 + ...RouterRefreshFairnessPolicyController.java | 5 +- ...TestRouterRpcFairnessPolicyController.java | 4 +- .../router/TestRouterNamenodeMonitoring.java | 15 +- .../federation/router/TestRouterRpc.java | 18 ++- .../router/TestRouterRpcMultiDestination.java | 12 +- .../dev-support/findbugsExcludeFile.xml | 10 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 + .../hdfs/server/common/MetricsLoggerTask.java | 7 +- .../server/namenode/FsImageValidation.java | 21 ++- .../hadoop/hdfs/LogVerificationAppender.java | 75 --------- .../org/apache/hadoop/hdfs/TestDFSRename.java | 7 +- .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 9 +- .../apache/hadoop/hdfs/TestDataStream.java | 4 +- .../hadoop/hdfs/TestEncryptedTransfer.java | 12 +- .../sasl/TestSaslDataTransfer.java | 4 +- .../server/TestJournaledEditsCache.java | 2 +- .../balancer/TestBalancerWithHANameNodes.java | 2 +- .../TestBlockManagerSafeMode.java | 9 +- .../TestPendingReconstruction.java | 4 +- .../TestReplicationPolicy.java | 45 +++--- .../datanode/TestDataNodeMetricsLogger.java | 37 ++--- .../server/datanode/TestDirectoryScanner.java | 16 +- .../fsdataset/impl/TestFsDatasetCache.java | 12 +- .../server/diskbalancer/TestDiskBalancer.java | 3 +- .../namenode/PatternMatchingAppender.java | 58 ------- .../hdfs/server/namenode/TestAuditLogger.java | 2 +- .../namenode/TestAuditLoggerWithCommands.java | 2 +- .../hdfs/server/namenode/TestAuditLogs.java | 15 +- .../hdfs/server/namenode/TestCheckpoint.java | 4 +- .../namenode/TestDiskspaceQuotaUpdate.java | 2 +- .../hdfs/server/namenode/TestEditLog.java | 40 +---- .../namenode/TestEditsDoubleBuffer.java | 6 +- .../server/namenode/TestFSEditLogLoader.java | 2 +- .../server/namenode/TestFSNamesystemLock.java | 2 +- .../namenode/TestFSNamesystemLockReport.java | 6 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 2 +- .../namenode/TestNameNodeMetricsLogger.java | 29 ++-- .../namenode/TestNameNodeResourcePolicy.java | 3 +- .../hdfs/server/namenode/TestStartup.java | 16 +- .../namenode/ha/TestBootstrapStandby.java | 4 +- .../ha/TestDelegationTokensWithHA.java | 3 +- .../namenode/ha/TestStandbyCheckpoints.java | 54 ++++--- .../TestExternalStoragePolicySatisfier.java | 4 +- .../src/test/resources/log4j.properties | 13 +- .../hadoop-mapreduce-client-app/pom.xml | 6 + .../v2/app/job/impl/TestTaskAttempt.java | 62 ++------ .../hadoop-mapreduce-client-core/pom.xml | 6 + .../org/apache/hadoop/mapred/TaskLog.java | 46 +----- .../mapreduce/TestJobMonitorAndPrint.java | 95 ++++++------ .../hadoop-mapreduce-client-jobclient/pom.xml | 6 + .../apache/hadoop/mapred/TestYARNRunner.java | 124 +++++++-------- .../apache/hadoop/mapreduce/TestChild.java | 28 ++-- .../mapreduce/security/TestJHSSecurity.java | 6 +- .../hadoop/mapreduce/v2/TestMRJobs.java | 11 +- hadoop-project/pom.xml | 12 ++ hadoop-tools/hadoop-azure/pom.xml | 7 +- .../ITestFileSystemOperationsWithThreads.java | 2 +- ...estNativeAzureFileSystemClientLogging.java | 2 +- hadoop-tools/hadoop-distcp/pom.xml | 6 + .../contract/AbstractContractDistCpTest.java | 5 +- .../util/MapReduceJobPropertiesParser.java | 5 +- .../dev-support/findbugs-exclude.xml | 10 -- .../distributedshell/ApplicationMaster.java | 6 +- .../applications/distributedshell/Client.java | 3 +- .../distributedshell/Log4jPropertyHelper.java | 48 ------ .../client/TestSecureApiServiceClient.java | 6 +- .../yarn/service/component/TestComponent.java | 3 - .../TestAggregatedLogDeletionService.java | 4 +- .../TestLog4jWarningErrorMetricsAppender.java | 1 + .../webapp/NavBlock.java | 2 +- .../server/webapp/ErrorsAndWarningsBlock.java | 2 +- .../server/nodemanager/webapp/NavBlock.java | 2 +- .../TestContainersMonitorResourceChange.java | 6 +- .../pom.xml | 7 + .../constraint/AllocationTagsManager.java | 7 +- .../resourcemanager/webapp/NavBlock.java | 2 +- .../TestFederationRMStateStoreService.java | 5 +- .../TestSystemMetricsPublisherForV2.java | 42 +---- ...acityPreemptionPolicyPreemptToBalance.java | 7 +- ...CapacityPreemptionPolicyMockFramework.java | 4 +- .../TestMetricsInvariantChecker.java | 6 +- .../fair/TestFairSchedulerConfiguration.java | 68 ++------ 120 files changed, 1165 insertions(+), 1106 deletions(-) create mode 100644 hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml create mode 100644 hadoop-common-project/hadoop-logging/pom.xml create mode 100644 hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java create mode 100644 hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java rename {hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util => hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders}/AsyncRFAAppender.java (98%) rename {hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util => hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders}/Log4jWarningErrorMetricsAppender.java (93%) create mode 100644 hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java create mode 100644 hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java create mode 100644 hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java delete mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java diff --git a/hadoop-common-project/hadoop-auth-examples/pom.xml b/hadoop-common-project/hadoop-auth-examples/pom.xml index 4deda43279..9a060f7502 100644 --- a/hadoop-common-project/hadoop-auth-examples/pom.xml +++ b/hadoop-common-project/hadoop-auth-examples/pom.xml @@ -46,16 +46,6 @@ slf4j-api compile - - log4j - log4j - runtime - - - org.slf4j - slf4j-log4j12 - runtime - diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 433a615c60..4cdd6006a4 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -82,14 +82,14 @@ compile - log4j - log4j - runtime + org.apache.hadoop + hadoop-logging - org.slf4j - slf4j-log4j12 - runtime + org.apache.hadoop + hadoop-logging + test + test-jar org.apache.hadoop diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java index f9c922caac..e18982d75f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java @@ -15,8 +15,7 @@ import java.util.Random; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.junit.Assert; import org.junit.Test; @@ -30,9 +29,8 @@ public class TestRandomSignerSecretProvider { private final int timeout = 500; private final long rolloverFrequency = timeout / 2; - { - LogManager.getLogger( - RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); + static { + HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); } @Test diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java index 628342e40d..d81d1eb335 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java @@ -19,8 +19,7 @@ import javax.servlet.ServletContext; import org.apache.curator.test.TestingServer; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -39,9 +38,8 @@ public class TestZKSignerSecretProvider { private final int timeout = 100; private final long rolloverFrequency = timeout / 2; - { - LogManager.getLogger( - RolloverSignerSecretProvider.LOG.getName()).setLevel(Level.DEBUG); + static { + HadoopLoggerUtils.setLogLevel(RolloverSignerSecretProvider.LOG.getName(), "DEBUG"); } @Before diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 6c6d3ec5bf..58006c011d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -411,6 +411,16 @@ lz4-java provided + + org.apache.hadoop + hadoop-logging + + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index b4eec1fe2c..086665151e 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -299,7 +299,7 @@ log4j.appender.NMAUDIT.MaxBackupIndex=${nm.audit.log.maxbackupindex} yarn.ewma.cleanupInterval=300 yarn.ewma.messageAgeLimitSeconds=86400 yarn.ewma.maxUniqueMessages=250 -log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender +log4j.appender.EWMA=org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval} log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds} log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index 32879597a9..cf090eea00 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -42,6 +42,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; @@ -50,8 +51,6 @@ import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; /** * Change log level in runtime. @@ -349,7 +348,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response } if (GenericsUtil.isLog4jLogger(logName)) { - process(Logger.getLogger(logName), level, out); + process(logName, level, out); } else { out.println("Sorry, setting log level is only supported for log4j loggers.
"); } @@ -368,19 +367,17 @@ public void doGet(HttpServletRequest request, HttpServletResponse response + "" + ""; - private static void process(Logger log, String level, - PrintWriter out) throws IOException { + private static void process(String log, String level, PrintWriter out) { if (level != null) { - if (!level.equalsIgnoreCase(Level.toLevel(level) - .toString())) { - out.println(MARKER + "Bad Level : " + level + "
"); - } else { - log.setLevel(Level.toLevel(level)); + try { + HadoopLoggerUtils.setLogLevel(log, level); out.println(MARKER + "Setting Level to " + level + " ...
"); + } catch (IllegalArgumentException e) { + out.println(MARKER + "Bad Level : " + level + "
"); } } - out.println(MARKER - + "Effective Level: " + log.getEffectiveLevel() + "
"); + out.println(MARKER + "Effective Level: " + HadoopLoggerUtils.getEffectiveLevel(log) + + "
"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 3debd36da7..3c13feac3e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -40,8 +40,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; -import org.apache.log4j.LogManager; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -761,7 +761,7 @@ public static void startupShutdownMessage(Class clazz, String[] args, public void run() { log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); - LogManager.shutdown(); + HadoopLoggerUtils.shutdownLogManager(); } }, SHUTDOWN_HOOK_PRIORITY); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index b3487ef309..913826f3ee 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -68,6 +68,7 @@ import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProviderFactory; @@ -76,10 +77,8 @@ import static org.apache.hadoop.util.PlatformName.IBM_JAVA; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.mockito.Mockito; +import org.slf4j.LoggerFactory; public class TestConfiguration { @@ -220,9 +219,7 @@ public void testFinalWarnings() throws Exception { InputStream in2 = new ByteArrayInputStream(bytes2); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the 2 different resources - this should generate a warning @@ -230,17 +227,13 @@ public void testFinalWarnings() throws Exception { conf.addResource(in2); assertEquals("should see the first value", "A", conf.get("prop")); - List events = appender.getLog(); - assertEquals("overriding a final parameter should cause logging", 1, - events.size()); - LoggingEvent loggingEvent = events.get(0); - String renderedMessage = loggingEvent.getRenderedMessage(); - assertTrue("did not see expected string inside message "+ renderedMessage, - renderedMessage.contains("an attempt to override final parameter: " - + "prop; Ignoring.")); + String renderedMessage = logCapturer.getOutput(); + assertTrue("did not see expected string inside message " + renderedMessage, + renderedMessage.contains( + "an attempt to override final parameter: " + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } @@ -258,9 +251,7 @@ public void testNoFinalWarnings() throws Exception { InputStream in2 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource twice from a stream - should not generate warnings @@ -268,20 +259,15 @@ public void testNoFinalWarnings() throws Exception { conf.addResource(in2); assertEquals("A", conf.get("prop")); - List events = appender.getLog(); - for (LoggingEvent loggingEvent : events) { - System.out.println("Event = " + loggingEvent.getRenderedMessage()); - } + String appenderOutput = logCapturer.getOutput(); assertTrue("adding same resource twice should not cause logging", - events.isEmpty()); + appenderOutput.isEmpty()); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } - - @Test public void testFinalWarningsMultiple() throws Exception { // Make a configuration file with a repeated final property @@ -295,24 +281,19 @@ public void testFinalWarningsMultiple() throws Exception { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource - this should not produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - List events = appender.getLog(); - for (LoggingEvent loggingEvent : events) { - System.out.println("Event = " + loggingEvent.getRenderedMessage()); - } + String appenderOutput = logCapturer.getOutput(); assertTrue("adding same resource twice should not cause logging", - events.isEmpty()); + appenderOutput.isEmpty()); } finally { // Make sure the appender is removed - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } @@ -329,48 +310,20 @@ public void testFinalWarningsMultipleOverride() throws Exception { InputStream in1 = new ByteArrayInputStream(bytes); // Attach our own log appender so we can verify output - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { // Add the resource - this should produce a warning conf.addResource(in1); assertEquals("should see the value", "A", conf.get("prop")); - List events = appender.getLog(); - assertEquals("overriding a final parameter should cause logging", 1, - events.size()); - LoggingEvent loggingEvent = events.get(0); - String renderedMessage = loggingEvent.getRenderedMessage(); - assertTrue("did not see expected string inside message "+ renderedMessage, - renderedMessage.contains("an attempt to override final parameter: " - + "prop; Ignoring.")); + String renderedMessage = logCapturer.getOutput(); + assertTrue("did not see expected string inside message " + renderedMessage, + renderedMessage.contains( + "an attempt to override final parameter: " + "prop; Ignoring.")); } finally { // Make sure the appender is removed - logger.removeAppender(appender); - } - } - - /** - * A simple appender for white box testing. - */ - private static class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override public boolean requiresLayout() { - return false; - } - - @Override protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override public void close() { - } - - public List getLog() { - return new ArrayList<>(log); + logCapturer.stopCapturing(); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java index c016ff0378..9e4405f6d1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/CompressDecompressTester.java @@ -36,8 +36,9 @@ import org.apache.hadoop.io.compress.zlib.ZlibCompressor; import org.apache.hadoop.io.compress.zlib.ZlibFactory; import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.log4j.Logger; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -47,9 +48,6 @@ public class CompressDecompressTester { - private static final Logger logger = Logger - .getLogger(CompressDecompressTester.class); - private final byte[] originalRawData; private ImmutableList> pairs = ImmutableList.of(); @@ -488,12 +486,12 @@ else if (compressor.getClass().isAssignableFrom(ZlibCompressor.class)) { return false; } - + abstract static class TesterCompressionStrategy { - protected final Logger logger = Logger.getLogger(getClass()); + protected final Logger logger = LoggerFactory.getLogger(getClass()); - abstract void assertCompression(String name, Compressor compressor, - Decompressor decompressor, byte[] originalRawData) throws Exception; + abstract void assertCompression(String name, Compressor compressor, Decompressor decompressor, + byte[] originalRawData) throws Exception; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index 636c03a16d..99a1ff8181 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.log.LogLevel.CLI; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AuthenticationFilterInitializer; @@ -40,12 +41,11 @@ import org.junit.Assert; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.net.ssl.SSLException; @@ -67,7 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { private final String logName = TestLogLevel.class.getName(); private String clientPrincipal; private String serverPrincipal; - private final Logger log = Logger.getLogger(logName); + private final Logger log = LoggerFactory.getLogger(logName); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; private static final String PREFIX = "hadoop.http.authentication."; @@ -76,7 +76,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { public static void setUp() throws Exception { org.slf4j.Logger logger = LoggerFactory.getLogger(KerberosAuthenticator.class); - GenericTestUtils.setLogLevel(logger, Level.DEBUG); + HadoopLoggerUtils.setLogLevel(logger.getName(), "DEBUG"); FileUtil.fullyDelete(BASEDIR); if (!BASEDIR.mkdirs()) { throw new Exception("unable to create the base directory for testing"); @@ -230,7 +230,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, final boolean isSpnego) throws Exception { testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - Level.DEBUG.toString()); + "DEBUG"); } /** @@ -250,9 +250,8 @@ private void testDynamicLogLevel(final String bindProtocol, if (!LogLevel.isValidProtocol(connectProtocol)) { throw new Exception("Invalid client protocol " + connectProtocol); } - Level oldLevel = log.getEffectiveLevel(); - Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", - Level.ERROR, oldLevel); + String oldLevel = HadoopLoggerUtils.getEffectiveLevel(log.getName()); + Assert.assertNotEquals("Get default Log Level which shouldn't be ERROR.", "ERROR", oldLevel); // configs needed for SPNEGO at server side if (isSpnego) { @@ -288,7 +287,7 @@ public Void call() throws Exception { }); server.stop(); // restore log level - GenericTestUtils.setLogLevel(log, oldLevel); + HadoopLoggerUtils.setLogLevel(log.getName(), oldLevel.toString()); } /** @@ -322,7 +321,7 @@ private void setLevel(String protocol, String authority, String newLevel) cli.run(setLevelArgs); assertEquals("new level not equal to expected: ", newLevel.toUpperCase(), - log.getEffectiveLevel().toString()); + HadoopLoggerUtils.getEffectiveLevel(log.getName())); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java index 0dabe468e4..8cfa14cdab 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSourceAdapter.java @@ -42,8 +42,9 @@ import static org.apache.hadoop.metrics2.lib.Interns.info; import static org.junit.Assert.assertEquals; -import org.apache.log4j.Logger; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; @@ -241,7 +242,7 @@ private static class SourceUpdater implements Runnable { private MetricsSourceAdapter sa = null; private ScheduledFuture future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = Logger.getLogger(SourceUpdater.class); + private static final Logger LOG = LoggerFactory.getLogger(SourceUpdater.class); public SourceUpdater(MetricsSourceAdapter sourceAdapter, AtomicBoolean err) { @@ -263,7 +264,7 @@ public void run() { } catch (Exception e) { // catch all errors hasError.set(true); - LOG.error(e.getStackTrace()); + LOG.error("Something went wrong.", e); } finally { if (hasError.get()) { LOG.error("Hit error, stopping now"); @@ -284,7 +285,7 @@ private static class SourceReader implements Runnable { private int cnt = 0; private ScheduledFuture future = null; private AtomicBoolean hasError = null; - private static final Logger LOG = Logger.getLogger(SourceReader.class); + private static final Logger LOG = LoggerFactory.getLogger(SourceReader.class); public SourceReader( TestMetricsSource source, MetricsSourceAdapter sourceAdapter, @@ -318,7 +319,7 @@ public void run() { } catch (Exception e) { // catch other errors hasError.set(true); - LOG.error(e.getStackTrace()); + LOG.error("Something went wrong.", e); } finally { if (hasError.get()) { future.cancel(false); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java index 8c1339d38d..b1399712e6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.ExitCodeException; @@ -41,8 +41,8 @@ public class TestShellBasedUnixGroupsMapping { private static final Logger TESTLOG = LoggerFactory.getLogger(TestShellBasedUnixGroupsMapping.class); - private final GenericTestUtils.LogCapturer shellMappingLog = - GenericTestUtils.LogCapturer.captureLogs( + private final LogCapturer shellMappingLog = + LogCapturer.captureLogs( ShellBasedUnixGroupsMapping.LOG); private class TestGroupUserNotExist diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java index a0ce721ecf..6a6fff89c1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -19,6 +19,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.BeforeClass; import org.junit.Test; @@ -42,7 +44,7 @@ public class TestReloadingX509KeyManager { private static final String BASEDIR = GenericTestUtils.getTempPath( TestReloadingX509TrustManager.class.getSimpleName()); - private final GenericTestUtils.LogCapturer reloaderLog = GenericTestUtils.LogCapturer.captureLogs( + private final LogCapturer reloaderLog = LogCapturer.captureLogs( FileMonitoringTimerTask.LOG); @BeforeClass diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java index 63589592f3..8d2a4c78f5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java @@ -19,7 +19,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import java.util.function.Supplier; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java index b7b86b7aa0..839c51c5e1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java @@ -18,7 +18,7 @@ package org.apache.hadoop.service; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -29,7 +29,7 @@ import java.io.PrintWriter; -import static org.apache.hadoop.test.GenericTestUtils.LogCapturer.captureLogs; +import static org.apache.hadoop.logging.LogCapturer.captureLogs; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index e54971e491..825fc706f4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -25,7 +25,6 @@ import java.io.InputStreamReader; import java.io.OutputStream; import java.io.PrintStream; -import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; @@ -38,7 +37,6 @@ import java.util.Objects; import java.util.Random; import java.util.Set; -import java.util.Enumeration; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; @@ -53,17 +51,11 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.log4j.Appender; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; import org.junit.Assert; import org.junit.Assume; import org.mockito.invocation.InvocationOnMock; @@ -115,51 +107,17 @@ public abstract class GenericTestUtils { public static final String ERROR_INVALID_ARGUMENT = "Total wait time should be greater than check interval time"; - @Deprecated - public static Logger toLog4j(org.slf4j.Logger logger) { - return LogManager.getLogger(logger.getName()); - } - - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - public static void disableLog(Logger logger) { - logger.setLevel(Level.OFF); - } - public static void disableLog(org.slf4j.Logger logger) { - disableLog(toLog4j(logger)); - } - - public static void setLogLevel(Logger logger, Level level) { - logger.setLevel(level); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - public static void setLogLevel(org.slf4j.Logger logger, Level level) { - setLogLevel(toLog4j(logger), level); + HadoopLoggerUtils.setLogLevel(logger.getName(), "OFF"); } public static void setLogLevel(org.slf4j.Logger logger, org.slf4j.event.Level level) { - setLogLevel(toLog4j(logger), Level.toLevel(level.toString())); + HadoopLoggerUtils.setLogLevel(logger.getName(), level.toString()); } public static void setRootLogLevel(org.slf4j.event.Level level) { - setLogLevel(LogManager.getRootLogger(), Level.toLevel(level.toString())); - } - - public static void setCurrentLoggersLogLevel(org.slf4j.event.Level level) { - for (Enumeration loggers = LogManager.getCurrentLoggers(); - loggers.hasMoreElements();) { - Logger logger = (Logger) loggers.nextElement(); - logger.setLevel(Level.toLevel(level.toString())); - } + HadoopLoggerUtils.setLogLevel("root", level.toString()); } public static org.slf4j.event.Level toLevel(String level) { @@ -471,47 +429,6 @@ public void close() throws Exception { } } - public static class LogCapturer { - private StringWriter sw = new StringWriter(); - private WriterAppender appender; - private Logger logger; - - public static LogCapturer captureLogs(org.slf4j.Logger logger) { - if (logger.getName().equals("root")) { - return new LogCapturer(org.apache.log4j.Logger.getRootLogger()); - } - return new LogCapturer(toLog4j(logger)); - } - - public static LogCapturer captureLogs(Logger logger) { - return new LogCapturer(logger); - } - - private LogCapturer(Logger logger) { - this.logger = logger; - Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); - if (defaultAppender == null) { - defaultAppender = Logger.getRootLogger().getAppender("console"); - } - final Layout layout = (defaultAppender == null) ? new PatternLayout() : - defaultAppender.getLayout(); - this.appender = new WriterAppender(layout, sw); - logger.addAppender(this.appender); - } - - public String getOutput() { - return sw.toString(); - } - - public void stopCapturing() { - logger.removeAppender(appender); - } - - public void clearOutput() { - sw.getBuffer().setLength(0); - } - } - /** * Mockito answer helper that triggers one latch as soon as the * method is called, then waits on another before continuing. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java index 8489e3d24f..f6f4a448e0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestGenericTestUtils.java @@ -26,6 +26,8 @@ import java.util.function.Supplier; import org.slf4j.event.Level; +import org.apache.hadoop.logging.LogCapturer; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java index 98e182236c..8375864e5f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClassUtil.java @@ -22,8 +22,8 @@ import org.junit.Assert; -import org.apache.log4j.Logger; import org.junit.Test; +import org.slf4j.Logger; public class TestClassUtil { @Test(timeout=10000) @@ -35,6 +35,6 @@ public void testFindContainingJar() { Assert.assertTrue("Containing jar does not exist on file system ", jarFile.exists()); Assert.assertTrue("Incorrect jar file " + containingJar, - jarFile.getName().matches("log4j.*[.]jar")); + jarFile.getName().matches("slf4j-api.*[.]jar")); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java index 1d1ce893a9..ec26af6601 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java @@ -28,7 +28,7 @@ import static org.junit.Assert.*; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.assertj.core.api.Assertions; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java index f43930dd07..fb6221f270 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java @@ -28,10 +28,12 @@ import java.util.Random; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.util.hash.Hash; -import org.apache.log4j.Logger; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; @@ -113,7 +115,7 @@ public void test() { } interface FilterTesterStrategy { - final Logger logger = Logger.getLogger(FilterTesterStrategy.class); + Logger logger = LoggerFactory.getLogger(FilterTesterStrategy.class); void assertWhat(Filter filter, int numInsertions, int hashId, ImmutableSet falsePositives); diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 96588a22b9..8a04c4ebcf 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -53,6 +53,12 @@ hadoop-auth compile + + org.apache.hadoop + hadoop-logging + test + test-jar + org.apache.hadoop.thirdparty hadoop-shaded-guava diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index f4c7fbe0b3..97d854285f 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -49,6 +49,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.http.client.utils.URIBuilder; import org.junit.After; @@ -583,8 +584,8 @@ public Void run() throws Exception { @Test public void testStartStopHttpPseudo() throws Exception { // Make sure bogus errors don't get emitted. - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( + LogCapturer logs = + LogCapturer.captureLogs(LoggerFactory.getLogger( "com.sun.jersey.server.wadl.generators.AbstractWadlGeneratorGrammarGenerator")); try { testStartStop(false, false); diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java index 3d0fd7de64..6e12d946ff 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java @@ -18,23 +18,24 @@ package org.apache.hadoop.crypto.key.kms.server; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.FilterOutputStream; -import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Paths; import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; -import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ThreadUtil; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; + import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -67,24 +68,23 @@ public void setOutputStream(OutputStream out) { public final Timeout testTimeout = new Timeout(180000L, TimeUnit.MILLISECONDS); @Before - public void setUp() throws IOException { + public void setUp() throws IOException, URISyntaxException { originalOut = System.err; memOut = new ByteArrayOutputStream(); filterOut = new FilterOut(memOut); capturedOut = new PrintStream(filterOut); System.setErr(capturedOut); - InputStream is = - ThreadUtil.getResourceAsStream("log4j-kmsaudit.properties"); - PropertyConfigurator.configure(is); - IOUtils.closeStream(is); + URL url = getClass().getClassLoader().getResource("log4j-kmsaudit.properties"); + File file = Paths.get(url.toURI()).toFile(); + HadoopLoggerUtils.updateLog4jConfiguration(KMSAudit.class, file.getAbsolutePath()); Configuration conf = new Configuration(); this.kmsAudit = new KMSAudit(conf); } @After - public void cleanUp() { + public void cleanUp() throws Exception { System.setErr(originalOut); - LogManager.resetConfiguration(); + HadoopLoggerUtils.resetConfiguration(); kmsAudit.shutdown(); } diff --git a/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml new file mode 100644 index 0000000000..304d1e4515 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-common-project/hadoop-logging/pom.xml b/hadoop-common-project/hadoop-logging/pom.xml new file mode 100644 index 0000000000..20af2bee76 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/pom.xml @@ -0,0 +1,125 @@ + + + + + + hadoop-project + org.apache.hadoop + 3.4.0-SNAPSHOT + ../../hadoop-project + + 4.0.0 + + hadoop-logging + 3.4.0-SNAPSHOT + jar + + Apache Hadoop Logging + Logging Support for Apache Hadoop project + + + UTF-8 + + + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.commons + commons-lang3 + + + org.slf4j + slf4j-api + + + junit + junit + test + + + org.slf4j + slf4j-log4j12 + test + + + log4j + log4j + provided + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + prepare-package + + jar + + + + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + prepare-jar + prepare-package + + jar + + + + prepare-test-jar + prepare-package + + test-jar + + + + + + org.apache.rat + apache-rat-plugin + + + dev-support/findbugsExcludeFile.xml + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + + \ No newline at end of file diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java new file mode 100644 index 0000000000..b0bd2e31fc --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopInternalLog4jUtils.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.logging; + +import java.io.FileInputStream; +import java.io.Flushable; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Enumeration; +import java.util.Map; +import java.util.Properties; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.log4j.Appender; +import org.apache.log4j.Level; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; + +/** + * Hadoop's internal class that access log4j APIs directly. + *

+ * This class will depend on log4j directly, so callers should not use this class directly to avoid + * introducing log4j dependencies to downstream users. Please call the methods in + * {@link HadoopLoggerUtils}, as they will call the methods here through reflection. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +final class HadoopInternalLog4jUtils { + + private HadoopInternalLog4jUtils() { + } + + static void setLogLevel(String loggerName, String levelName) { + if (loggerName == null) { + throw new IllegalArgumentException("logger name cannot be null"); + } + Logger logger = loggerName.equalsIgnoreCase("root") ? + LogManager.getRootLogger() : + LogManager.getLogger(loggerName); + Level level = Level.toLevel(levelName.toUpperCase()); + if (!level.toString().equalsIgnoreCase(levelName)) { + throw new IllegalArgumentException("Unsupported log level " + levelName); + } + logger.setLevel(level); + } + + static void shutdownLogManager() { + LogManager.shutdown(); + } + + static String getEffectiveLevel(String loggerName) { + Logger logger = loggerName.equalsIgnoreCase("root") ? + LogManager.getRootLogger() : + LogManager.getLogger(loggerName); + return logger.getEffectiveLevel().toString(); + } + + static void resetConfiguration() { + LogManager.resetConfiguration(); + } + + static void updateLog4jConfiguration(Class targetClass, String log4jPath) throws Exception { + Properties customProperties = new Properties(); + try (FileInputStream fs = new FileInputStream(log4jPath); + InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { + customProperties.load(fs); + Properties originalProperties = new Properties(); + originalProperties.load(is); + for (Map.Entry entry : customProperties.entrySet()) { + originalProperties.setProperty(entry.getKey().toString(), entry.getValue().toString()); + } + LogManager.resetConfiguration(); + PropertyConfigurator.configure(originalProperties); + } + } + + static boolean hasAppenders(String logger) { + return Logger.getLogger(logger) + .getAllAppenders() + .hasMoreElements(); + } + + @SuppressWarnings("unchecked") + static void syncLogs() { + // flush standard streams + // + System.out.flush(); + System.err.flush(); + + // flush flushable appenders + // + final Logger rootLogger = Logger.getRootLogger(); + flushAppenders(rootLogger); + final Enumeration allLoggers = rootLogger.getLoggerRepository(). + getCurrentLoggers(); + while (allLoggers.hasMoreElements()) { + final Logger l = allLoggers.nextElement(); + flushAppenders(l); + } + } + + @SuppressWarnings("unchecked") + private static void flushAppenders(Logger l) { + final Enumeration allAppenders = l.getAllAppenders(); + while (allAppenders.hasMoreElements()) { + final Appender a = allAppenders.nextElement(); + if (a instanceof Flushable) { + try { + ((Flushable) a).flush(); + } catch (IOException ioe) { + System.err.println(a + ": Failed to flush!" + + stringifyException(ioe)); + } + } + } + } + + private static String stringifyException(Throwable e) { + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + e.printStackTrace(printWriter); + printWriter.close(); + return stringWriter.toString(); + } + +} diff --git a/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java new file mode 100644 index 0000000000..1d0bea1733 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/HadoopLoggerUtils.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.logging; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A bridge class for operating on logging framework, such as changing log4j log level, etc. + * Will call the methods in {@link HadoopInternalLog4jUtils} to perform operations on log4j level. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class HadoopLoggerUtils { + + private static final String INTERNAL_UTILS_CLASS = + "org.apache.hadoop.logging.HadoopInternalLog4jUtils"; + + private HadoopLoggerUtils() { + } + + private static Method getMethod(String methodName, Class... args) { + try { + Class clazz = Class.forName(INTERNAL_UTILS_CLASS); + return clazz.getDeclaredMethod(methodName, args); + } catch (ClassNotFoundException | NoSuchMethodException e) { + throw new AssertionError("should not happen", e); + } + } + + private static void throwUnchecked(Throwable throwable) { + if (throwable instanceof RuntimeException) { + throw (RuntimeException) throwable; + } + if (throwable instanceof Error) { + throw (Error) throwable; + } + } + + public static void shutdownLogManager() { + Method method = getMethod("shutdownLogManager"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void setLogLevel(String loggerName, String levelName) { + Method method = getMethod("setLogLevel", String.class, String.class); + try { + method.invoke(null, loggerName, levelName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static String getEffectiveLevel(String loggerName) { + Method method = getMethod("getEffectiveLevel", String.class); + try { + return (String) method.invoke(null, loggerName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void resetConfiguration() { + Method method = getMethod("resetConfiguration"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static void updateLog4jConfiguration(Class targetClass, String log4jPath) { + Method method = getMethod("updateLog4jConfiguration", Class.class, String.class); + try { + method.invoke(null, targetClass, log4jPath); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public static boolean hasAppenders(String logger) { + Method method = getMethod("hasAppenders", String.class); + try { + return (Boolean) method.invoke(null, logger); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + + public synchronized static void syncLogs() { + Method method = getMethod("syncLogs"); + try { + method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("Failed to execute, should not happen", e.getCause()); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java similarity index 98% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java index 276e5b0987..2abfffb474 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/AsyncRFAAppender.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.logging.appenders; import java.io.IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java similarity index 93% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java rename to hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java index fffc8a857c..cf7a2bfe0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java +++ b/hadoop-common-project/hadoop-logging/src/main/java/org/apache/hadoop/logging/appenders/Log4jWarningErrorMetricsAppender.java @@ -16,12 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.yarn.util; +package org.apache.hadoop.logging.appenders; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -113,16 +111,13 @@ public Log4jWarningErrorMetricsAppender() { /** * Create an appender to keep track of the errors and warnings logged by the * system. - * - * @param cleanupIntervalSeconds - * the interval at which old messages are purged to prevent the - * message stores from growing unbounded - * @param messageAgeLimitSeconds - * the maximum age of a message in seconds before it is purged from - * the store - * @param maxUniqueMessages - * the maximum number of unique messages of each type we keep before - * we start purging + * + * @param cleanupIntervalSeconds the interval at which old messages are purged to prevent the + * message stores from growing unbounded. + * @param messageAgeLimitSeconds the maximum age of a message in seconds before it is purged from + * the store. + * @param maxUniqueMessages the maximum number of unique messages of each type we keep before + * we start purging. */ public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds, long messageAgeLimitSeconds, int maxUniqueMessages) { @@ -143,6 +138,20 @@ public Log4jWarningErrorMetricsAppender(int cleanupIntervalSeconds, this.setThreshold(Level.WARN); } + private static String join(CharSequence separator, String[] strings) { + StringBuilder sb = new StringBuilder(); + boolean first = true; + for (String s : strings) { + if (first) { + first = false; + } else { + sb.append(separator); + } + sb.append(s); + } + return sb.toString(); + } + /** * {@inheritDoc} */ @@ -151,7 +160,7 @@ protected void append(LoggingEvent event) { String message = event.getRenderedMessage(); String[] throwableStr = event.getThrowableStrRep(); if (throwableStr != null) { - message = message + "\n" + StringUtils.join("\n", throwableStr); + message = message + "\n" + join("\n", throwableStr); message = org.apache.commons.lang3.StringUtils.left(message, MAX_MESSAGE_SIZE); } @@ -232,7 +241,7 @@ public boolean requiresLayout() { * getErrorMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -248,7 +257,7 @@ public List getErrorCounts(List cutoffs) { * getWarningMessagesAndCounts since the message store is purged at regular * intervals to prevent it from growing without bounds, while the store for * the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -285,7 +294,7 @@ private List getCounts(SortedMap map, * differ from the ones provided by getErrorCounts since the message store is * purged at regular intervals to prevent it from growing without bounds, * while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -304,7 +313,7 @@ private List getCounts(SortedMap map, * may differ from the ones provided by getWarningCounts since the message * store is purged at regular intervals to prevent it from growing without * bounds, while the store for the counts is purged less frequently. - * + * * @param cutoffs * list of timestamp cutoffs(in seconds) for which the counts are * desired @@ -322,7 +331,7 @@ private List> getElementsAndCounts( SortedSet purgeInformation) { if (purgeInformation.size() > maxUniqueMessages) { ErrorAndWarningsCleanup cleanup = new ErrorAndWarningsCleanup(); - long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); + long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanup.cleanupMessages(map, purgeInformation, cutoff, maxUniqueMessages); } @@ -379,7 +388,7 @@ class ErrorAndWarningsCleanup extends TimerTask { @Override public void run() { - long cutoff = Time.now() - (messageAgeLimitSeconds * 1000); + long cutoff = System.currentTimeMillis() - (messageAgeLimitSeconds * 1000); cutoff = (cutoff / 1000); cleanupMessages(errors, errorsPurgeInformation, cutoff, maxUniqueMessages); cleanupMessages(warnings, warningsPurgeInformation, cutoff, diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java new file mode 100644 index 0000000000..45f5d0ca02 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/LogCapturer.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.logging; + +import java.io.StringWriter; + +import org.apache.log4j.Appender; +import org.apache.log4j.Layout; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.WriterAppender; + +public class LogCapturer { + private final StringWriter sw = new StringWriter(); + private final Appender appender; + private final Logger logger; + + public static LogCapturer captureLogs(org.slf4j.Logger logger) { + if (logger.getName().equals("root")) { + return new LogCapturer(Logger.getRootLogger()); + } + return new LogCapturer(LogManager.getLogger(logger.getName())); + } + + private LogCapturer(Logger logger) { + this.logger = logger; + Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); + if (defaultAppender == null) { + defaultAppender = Logger.getRootLogger().getAppender("console"); + } + final Layout layout = + (defaultAppender == null) ? new PatternLayout() : defaultAppender.getLayout(); + this.appender = new WriterAppender(layout, sw); + logger.addAppender(this.appender); + } + + public String getOutput() { + return sw.toString(); + } + + public void stopCapturing() { + logger.removeAppender(appender); + } + + public void clearOutput() { + sw.getBuffer().setLength(0); + } +} diff --git a/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java new file mode 100644 index 0000000000..4bafb5a315 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/java/org/apache/hadoop/logging/test/TestSyncLogs.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.logging.test; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.logging.HadoopLoggerUtils; + +public class TestSyncLogs { + + private static final Logger LOG = LoggerFactory.getLogger(TestSyncLogs.class); + + @Test + public void testSyncLogs() { + LOG.info("Testing log sync"); + HadoopLoggerUtils.syncLogs(); + } + +} diff --git a/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties new file mode 100644 index 0000000000..ff1468cf43 --- /dev/null +++ b/hadoop-common-project/hadoop-logging/src/test/resources/log4j.properties @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# log4j configuration used during build and unit tests + +log4j.rootLogger=debug,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index c292aebbe3..d2e993343a 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -38,11 +38,6 @@ org.apache.kerby kerb-simplekdc - - org.slf4j - slf4j-log4j12 - compile - junit junit diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index f167a079a9..b0fb88874c 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -38,6 +38,7 @@ hadoop-minikdc hadoop-kms hadoop-registry + hadoop-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index b362e001ea..9a1226ea38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -86,6 +86,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> netty-all test + + org.apache.hadoop + hadoop-logging + test + test-jar + org.mock-server mockserver-netty diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java index 1fe6dcad93..d0b8653426 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java @@ -31,6 +31,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Assert; import org.junit.Test; @@ -61,8 +62,8 @@ public HttpURLConnection configure(HttpURLConnection conn) public void testSSLInitFailure() throws Exception { Configuration conf = new Configuration(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo"); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = + LogCapturer.captureLogs( LoggerFactory.getLogger(URLConnectionFactory.class)); URLConnectionFactory.newDefaultURLConnectionFactory(conf); Assert.assertTrue("Expected log for ssl init failure not found!", diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index a5bf5c1c31..b9aae62bd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -182,6 +182,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-params test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java index 0741f1aed4..9f74337d7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRefreshFairnessPolicyController.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod; import org.apache.hadoop.hdfs.server.federation.router.RouterRpcClient; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIR_HANDLER_COUNT_KEY_PREFIX; import static org.junit.Assert.assertEquals; @@ -48,8 +49,8 @@ public class TestRouterRefreshFairnessPolicyController { private static final Logger LOG = LoggerFactory.getLogger(TestRouterRefreshFairnessPolicyController.class); - private final GenericTestUtils.LogCapturer controllerLog = - GenericTestUtils.LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); + private final LogCapturer controllerLog = + LogCapturer.captureLogs(AbstractRouterRpcFairnessPolicyController.LOG); private StateStoreDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java index 1f5770b1dd..d4f6827135 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterRpcFairnessPolicyController.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.federation.router.FederationUtil; import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.junit.Test; import org.slf4j.LoggerFactory; @@ -179,7 +179,7 @@ public void testHandlerAllocationConcurrentConfigured() { private void verifyInstantiationError(Configuration conf, int handlerCount, int totalDedicatedHandlers) { - GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer + LogCapturer logs = LogCapturer .captureLogs(LoggerFactory.getLogger( StaticRouterRpcFairnessPolicyController.class)); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java index 9ee9692aad..bb81eaa070 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; @@ -55,6 +54,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -322,11 +322,7 @@ private void verifyUrlSchemes(String scheme, Configuration conf, int httpRequest int httpsRequests, int requestsPerService) { // Attach our own log appender so we can verify output - final LogVerificationAppender appender = - new LogVerificationAppender(); - final org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); GenericTestUtils.setRootLogLevel(Level.DEBUG); // Setup and start the Router @@ -347,8 +343,11 @@ private void verifyUrlSchemes(String scheme, Configuration conf, int httpRequest heartbeatService.getNamenodeStatusReport(); } } - assertEquals(httpsRequests * 2, appender.countLinesWithMessage("JMX URL: https://")); - assertEquals(httpRequests * 2, appender.countLinesWithMessage("JMX URL: http://")); + assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + "JMX URL: https://")); + assertEquals(2, org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + "JMX URL: http://")); + logCapturer.stopCapturing(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index d3d3421619..3db20a6e18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -135,6 +135,8 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.AfterClass; @@ -2067,8 +2069,8 @@ private DFSClient getFileDFSClient(final String path) { @Test public void testMkdirsWithCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditlog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditlog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2094,8 +2096,8 @@ public void testMkdirsWithCallerContext() throws IOException { @Test public void testRealUserPropagationInCallerContext() throws IOException, InterruptedException { - GenericTestUtils.LogCapturer auditlog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditlog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // Current callerContext is null assertNull(CallerContext.getCurrent()); @@ -2139,8 +2141,8 @@ public void testSetBalancerBandwidth() throws Exception { @Test public void testAddClientIpPortToCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientIp and ClientPort are not set on the client. // Set client context. @@ -2174,8 +2176,8 @@ public void testAddClientIpPortToCallerContext() throws IOException { @Test public void testAddClientIdAndCallIdToCallerContext() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // 1. ClientId and ClientCallId are not set on the client. // Set client context. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 336ea39138..caecb697d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -72,6 +72,8 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Test; import org.slf4j.event.Level; @@ -276,12 +278,10 @@ public void testProxyRenameFiles() throws IOException, InterruptedException { @Test public void testPreviousBlockNotNull() throws IOException, URISyntaxException { - final GenericTestUtils.LogCapturer stateChangeLog = - GenericTestUtils.LogCapturer.captureLogs(NameNode.stateChangeLog); + final LogCapturer stateChangeLog = LogCapturer.captureLogs(NameNode.stateChangeLog); GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG); - final GenericTestUtils.LogCapturer nameNodeLog = - GenericTestUtils.LogCapturer.captureLogs(NameNode.LOG); + final LogCapturer nameNodeLog = LogCapturer.captureLogs(NameNode.LOG); GenericTestUtils.setLogLevel(NameNode.LOG, Level.DEBUG); final FederationRPCMetrics metrics = getRouterContext(). @@ -454,8 +454,8 @@ public void testSubclusterDown() throws Exception { @Test public void testCallerContextWithMultiDestinations() throws IOException { - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); // set client context CallerContext.setCurrent( diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 8632c567aa..5c2df9acf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -310,14 +310,4 @@ - - - - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 5f156499ee..a8922cbcff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -164,6 +164,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-minikdc test + + org.apache.hadoop + hadoop-logging + test + test-jar + org.mockito mockito-core diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index 21c01cebd4..a361a280e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -31,6 +31,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.metrics2.util.MBeans; /** @@ -111,11 +113,8 @@ private String trimLine(String valueStr) { .substring(0, maxLogLineLength) + "..."); } - // TODO : hadoop-logging module to hide log4j implementation details, this method - // can directly call utility from hadoop-logging. private static boolean hasAppenders(Logger logger) { - return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders() - .hasMoreElements(); + return HadoopLoggerUtils.hasAppenders(logger.getName()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java index ab301104f2..4e8daf319a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java @@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor; import org.apache.hadoop.hdfs.server.namenode.visitor.INodeCountVisitor.Counts; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -110,13 +110,13 @@ static void setHaConf(String nsId, Configuration conf) { } static void initLogLevels() { - Util.setLogLevel(FSImage.class, Level.TRACE); - Util.setLogLevel(FileJournalManager.class, Level.TRACE); + Util.setLogLevel(FSImage.class, "TRACE"); + Util.setLogLevel(FileJournalManager.class, "TRACE"); - Util.setLogLevel(GSet.class, Level.OFF); - Util.setLogLevel(BlockManager.class, Level.OFF); - Util.setLogLevel(DatanodeManager.class, Level.OFF); - Util.setLogLevel(TopMetrics.class, Level.OFF); + Util.setLogLevel(GSet.class, "OFF"); + Util.setLogLevel(BlockManager.class, "OFF"); + Util.setLogLevel(DatanodeManager.class, "OFF"); + Util.setLogLevel(TopMetrics.class, "OFF"); } static class Util { @@ -127,11 +127,10 @@ static String memoryInfo() { + ", max=" + StringUtils.byteDesc(runtime.maxMemory()); } - static void setLogLevel(Class clazz, Level level) { - final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(clazz); - logger.setLevel(level); + static void setLogLevel(Class clazz, String level) { + HadoopLoggerUtils.setLogLevel(clazz.getName(), level); LOG.info("setLogLevel {} to {}, getEffectiveLevel() = {}", clazz.getName(), level, - logger.getEffectiveLevel()); + HadoopLoggerUtils.getEffectiveLevel(clazz.getName())); } static String toCommaSeparatedNumber(long n) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java deleted file mode 100644 index 10ef47bbbc..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.LoggingEvent; -import org.apache.log4j.spi.ThrowableInformation; - -/** - * Used to verify that certain exceptions or messages are present in log output. - */ -public class LogVerificationAppender extends AppenderSkeleton { - private final List log = new ArrayList(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList(log); - } - - public int countExceptionsWithMessage(final String text) { - int count = 0; - for (LoggingEvent e: getLog()) { - ThrowableInformation t = e.getThrowableInformation(); - if (t != null) { - String m = t.getThrowable().getMessage(); - if (m.contains(text)) { - count++; - } - } - } - return count; - } - - public int countLinesWithMessage(final String text) { - int count = 0; - for (LoggingEvent e: getLog()) { - String msg = e.getRenderedMessage(); - if (msg != null && msg.contains(text)) { - count++; - } - } - return count; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java index b16f0237b1..75ad5bd862 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Test; public class TestDFSRename { @@ -189,8 +190,8 @@ public void testRename2Options() throws Exception { final DistributedFileSystem dfs = cluster.getFileSystem(); Path path = new Path("/test"); dfs.mkdirs(path); - GenericTestUtils.LogCapturer auditLog = - GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); + LogCapturer auditLog = + LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG); dfs.rename(path, new Path("/dir1"), new Rename[] {Rename.OVERWRITE, Rename.TO_TRASH}); String auditOut = auditLog.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 5469ebbb75..80424a388b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.IllegalReservedPathException; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Logger; import org.junit.Test; import static org.junit.Assert.*; @@ -317,9 +317,7 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { "imageMD5Digest", "22222222222222222222222222222222"); // Attach our own log appender so we can verify output - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); // Upgrade should now fail try { @@ -331,9 +329,10 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { if (!msg.contains("Failed to load FSImage file")) { throw ioe; } - int md5failures = appender.countExceptionsWithMessage( + int md5failures = org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), " is corrupt with MD5 checksum of "); assertEquals("Upgrade did not fail with bad MD5", 1, md5failures); + logCapturer.stopCapturing(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java index c57ef941f0..c792386c0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -48,7 +48,7 @@ public static void setup() throws IOException { @Test(timeout = 60000) public void testDfsClient() throws IOException, InterruptedException { - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory + LogCapturer logs = LogCapturer.captureLogs(LoggerFactory .getLogger(DataStreamer.class)); byte[] toWrite = new byte[PACKET_SIZE]; new Random(1).nextBytes(toWrite); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index f9336fcfdc..4299c11196 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.junit.After; import org.junit.Before; @@ -168,9 +168,9 @@ private void testEncryptedRead(String algorithm, String cipherSuite, FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs1 = LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -239,7 +239,7 @@ public void testClientThatDoesNotSupportEncryption() throws IOException { Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); @@ -457,9 +457,9 @@ private void testEncryptedWrite(int numDns) throws IOException { fs = getFileSystem(conf); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(SaslDataTransferServer.class)); - LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs1 = LogCapturer.captureLogs( LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index 3dd0b7eb99..c6561287bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -54,7 +54,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.After; import org.junit.Assert; import org.junit.Rule; @@ -138,7 +138,7 @@ public void testServerSaslNoClientSasl() throws Exception { HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(DataNode.class)); try { doTest(clientConf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java index 82b8b58769..84b7c8f224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index d69051c8d7..5d2a927064 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java index d32cde8347..7e926a994f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManagerSafeMode.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.assertj.core.api.Assertions; @@ -235,8 +236,8 @@ public void testCheckSafeMode8() throws Exception { public void testCheckSafeMode9() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, 3000); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + LogCapturer logs = + LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); @@ -247,8 +248,8 @@ public void testCheckSafeMode9() throws Exception { public void testCheckSafeMode10(){ Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_RECHECK_INTERVAL_KEY, -1); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(BlockManagerSafeMode.LOG); + LogCapturer logs = + LogCapturer.captureLogs(BlockManagerSafeMode.LOG); BlockManagerSafeMode blockManagerSafeMode = new BlockManagerSafeMode(bm, fsn, true, conf); String content = logs.getOutput(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index ea7347f9e5..87c83836e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.mockito.Mockito; import org.slf4j.LoggerFactory; @@ -575,7 +575,7 @@ public void testPendingReConstructionBlocksForSameDN() throws Exception { new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); DFSTestUtil.setNameNodeLogLevel(Level.DEBUG); - LogCapturer logs = GenericTestUtils.LogCapturer + LogCapturer logs = LogCapturer .captureLogs(LoggerFactory.getLogger("BlockStateChange")); BlockManager bm = cluster.getNamesystem().getBlockManager(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 20163cc5fa..c4b5f7aa6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -41,6 +40,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.fs.ContentSummary; @@ -49,7 +49,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; @@ -67,16 +66,15 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.TestINodeFile; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.slf4j.LoggerFactory; @RunWith(Parameterized.class) public class TestReplicationPolicy extends BaseReplicationPolicyTest { @@ -507,26 +505,26 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } - - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); + // try to choose NUM_OF_DATANODES which is more than actually available // nodes. DatanodeStorageInfo[] targets = chooseTarget(dataNodes.length); assertEquals(targets.length, dataNodes.length - 2); - final List log = appender.getLog(); - assertNotNull(log); - assertFalse(log.size() == 0); - final LoggingEvent lastLogEntry = log.get(log.size() - 1); - - assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); - // Suppose to place replicas on each node but two data nodes are not - // available for placing replica, so here we expect a short of 2 - assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); - + boolean isFound = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + // Suppose to place replicas on each node but two data nodes are not + // available for placing replica, so here we expect a short of 2 + if(logLine.contains("WARN") && logLine.contains("in need of 2")) { + isFound = true; + break; + } + } + assertTrue("Could not find the block placement log specific to 2 datanodes not being " + + "available for placing replicas", isFound); + logCapturer.stopCapturing(); resetHeartbeatForStorages(); } @@ -1710,17 +1708,14 @@ public void testMaxLoad() { @Test public void testChosenFailureForStorageType() { - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 1, dataNodes[0], new ArrayList(), false, null, BLOCK_SIZE, TestBlockStoragePolicy.POLICY_SUITE.getPolicy( HdfsConstants.StoragePolicy.COLD.value()), null); assertEquals(0, targets.length); assertNotEquals(0, - appender.countLinesWithMessage("NO_REQUIRED_STORAGE_TYPE")); + StringUtils.countMatches(logCapturer.getOutput(), "NO_REQUIRED_STORAGE_TYPE")); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 73201ba605..13efcf783a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collections; -import java.util.List; import java.util.Random; import java.util.concurrent.TimeoutException; @@ -39,19 +38,15 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; import org.junit.After; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; -import java.util.function.Supplier; - /** * Test periodic logging of DataNode metrics. */ @@ -128,13 +123,13 @@ public void testDisableMetricsLogger() throws IOException { } @Test + @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { startDNForTest(true); assertNotNull(dn); - org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list( + org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME).getAllAppenders()) + .get(0) instanceof org.apache.log4j.AsyncAppender); } /** @@ -149,27 +144,15 @@ public void testMetricsLogOutput() throws IOException, InterruptedException, metricsProvider); startDNForTest(true); assertNotNull(dn); - final PatternMatchingAppender appender = - (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME) - .getAppender("PATTERNMATCHERAPPENDER"); - + LogCapturer logCapturer = + LogCapturer.captureLogs(LoggerFactory.getLogger(DataNode.METRICS_LOG_NAME)); // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); - + GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), + 1000, 60000); + logCapturer.stopCapturing(); dn.shutdown(); } - private void addAppender(org.apache.log4j.Logger logger, Appender appender) { - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - ((AsyncAppender) appenders.get(0)).addAppender(appender); - } - public interface TestFakeMetricMXBean { int getFakeMetric(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 74c70cec76..82d7a81574 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -27,7 +27,6 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -77,10 +76,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.util.Time; -import org.apache.log4j.SimpleLayout; -import org.apache.log4j.WriterAppender; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -414,14 +412,9 @@ public void testRetainBlockOnPersistentStorage() throws Exception { @Test(timeout=600000) public void testScanDirectoryStructureWarn() throws Exception { + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); //add a logger stream to check what has printed to log - ByteArrayOutputStream loggerStream = new ByteArrayOutputStream(); - org.apache.log4j.Logger rootLogger = - org.apache.log4j.Logger.getRootLogger(); GenericTestUtils.setRootLogLevel(Level.INFO); - WriterAppender writerAppender = - new WriterAppender(new SimpleLayout(), loggerStream); - rootLogger.addAppender(writerAppender); Configuration conf = getConfiguration(); cluster = new MiniDFSCluster @@ -452,7 +445,7 @@ public void testScanDirectoryStructureWarn() throws Exception { scan(1, 1, 0, 1, 0, 0, 0); //ensure the warn log not appear and missing block log do appear - String logContent = new String(loggerStream.toByteArray()); + String logContent = logCapturer.getOutput(); String missingBlockWarn = "Deleted a metadata file" + " for the deleted block"; String dirStructureWarnLog = " found in invalid directory." + @@ -464,6 +457,7 @@ public void testScanDirectoryStructureWarn() throws Exception { LOG.info("check pass"); } finally { + logCapturer.stopCapturing(); if (scanner != null) { scanner.shutdown(); scanner = null; @@ -526,7 +520,7 @@ public void testRegularBlock() throws Exception { client = cluster.getFileSystem().getClient(); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1); // log trace - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer. + LogCapturer logCapturer = LogCapturer. captureLogs(NameNode.stateChangeLog); // Add files with 5 blocks createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 5, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java index 8b1a6c0814..c7fc71f537 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetCache.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import net.jcip.annotations.NotThreadSafe; + +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; @@ -51,7 +53,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.Block; @@ -79,10 +80,10 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -393,9 +394,7 @@ public void testFilesExceedMaxLockedMemory() throws Exception { } // nth file should hit a capacity exception - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier() { @@ -403,11 +402,12 @@ public void testFilesExceedMaxLockedMemory() throws Exception { public Boolean get() { // check the log reported by FsDataSetCache // in the case that cache capacity is exceeded. - int lines = appender.countLinesWithMessage( + int lines = StringUtils.countMatches(logCapturer.getOutput(), "could not reserve more bytes in the cache: "); return lines > 0; } }, 500, 30000); + logCapturer.stopCapturing(); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index 073bb532dd..8f3ef447a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; @@ -321,7 +322,7 @@ public void testDiskBalancerWithFedClusterWithOneNameServiceEmpty() throws 0); DFSTestUtil.waitReplication(fs, filePath, (short) 1); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + LogCapturer logCapturer = LogCapturer .captureLogs(DiskBalancer.LOG); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java deleted file mode 100644 index f099dfae73..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import java.util.regex.Pattern; - -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.spi.LoggingEvent; - -/** - * An appender that matches logged messages against the given - * regular expression. - */ -public class PatternMatchingAppender extends AppenderSkeleton { - private final Pattern pattern; - private volatile boolean matched; - - public PatternMatchingAppender() { - this.pattern = Pattern.compile("^.*FakeMetric.*$"); - this.matched = false; - } - - public boolean isMatched() { - return matched; - } - - @Override - protected void append(LoggingEvent event) { - if (pattern.matcher(event.getMessage().toString()).matches()) { - matched = true; - } - } - - @Override - public void close() { - } - - @Override - public boolean requiresLayout() { - return false; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java index c00649a9db..617f38a63f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java @@ -37,7 +37,7 @@ import org.apache.hadoop.security.authorize.ProxyServers; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Lists; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java index d34d6ca737..fec16c13fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java @@ -41,7 +41,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import java.io.IOException; import java.security.PrivilegedExceptionAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 0f73669675..953d1ef7c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; @@ -39,12 +38,9 @@ import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; @@ -107,6 +103,7 @@ public TestAuditLogs(boolean useAsyncEdits) { UserGroupInformation userGroupInfo; @Before + @SuppressWarnings("unchecked") public void setupCluster() throws Exception { // must configure prior to instantiating the namesystem because it // will reconfigure the logger if async is enabled @@ -122,11 +119,9 @@ public void setupCluster() throws Exception { util.createFiles(fs, fileName); // make sure the appender is what it's supposed to be - Logger logger = org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit"); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list(org.apache.log4j.Logger.getLogger( + "org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit").getAllAppenders()) + .get(0) instanceof org.apache.log4j.AsyncAppender); fnames = util.getFileNames(fileName); util.waitReplication(fs, fileName, (short)3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index d675dcda98..ccc6be33c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -82,7 +82,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; @@ -863,7 +863,7 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception { savedSd = sd; } - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(Storage.class)); try { // try to lock the storage that's already locked diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java index 771caefd20..73aee349da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java @@ -49,7 +49,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 17803a0786..c68ad18570 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; @@ -90,9 +91,6 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.LogManager; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -1717,36 +1715,13 @@ public void testResetThreadLocalCachedOps() throws IOException { } } - class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } - /** * * @throws Exception */ @Test public void testReadActivelyUpdatedLog() throws Exception { - final TestAppender appender = new TestAppender(); - LogManager.getRootLogger().addAppender(appender); + final LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); // Set single handler thread, so all transactions hit same thread-local ops. @@ -1794,21 +1769,16 @@ public void testReadActivelyUpdatedLog() throws Exception { rwf.close(); events.poll(); - String pattern = "Caught exception after reading (.*) ops"; - Pattern r = Pattern.compile(pattern); - final List log = appender.getLog(); - for (LoggingEvent event : log) { - Matcher m = r.matcher(event.getRenderedMessage()); - if (m.find()) { + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("Caught exception after reading")) { fail("Should not try to read past latest syned edit log op"); } } - } finally { if (cluster != null) { cluster.shutdown(); } - LogManager.getRootLogger().removeAppender(appender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java index 3b15c2db7a..fb484cd3ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java @@ -26,6 +26,8 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.Assert; import org.junit.Test; @@ -118,8 +120,8 @@ public void testDumpEdits() throws IOException { op3.setTransactionId(3); buffer.writeOp(op3, fakeLogVersion); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(EditsDoubleBuffer.LOG); + LogCapturer logs = + LogCapturer.captureLogs(EditsDoubleBuffer.LOG); try { buffer.close(); fail(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 89193ca663..860e6b0b25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -64,7 +64,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.FakeTimer; import org.slf4j.event.Level; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java index f0ae181016..afb049156e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLock.java @@ -25,7 +25,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.FakeTimer; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java index 9c77f9d92b..08c9240f26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemLockReport.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; + import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -58,7 +60,7 @@ private interface Procedure { private MiniDFSCluster cluster; private FileSystem fs; private UserGroupInformation userGroupInfo; - private GenericTestUtils.LogCapturer logs; + private LogCapturer logs; @Before public void setUp() throws Exception { @@ -76,7 +78,7 @@ public void setUp() throws Exception { userGroupInfo = UserGroupInformation.createUserForTesting("bob", new String[] {"hadoop"}); - logs = GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.LOG); + logs = LogCapturer.captureLogs(FSNamesystem.LOG); GenericTestUtils .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class.getName()), org.slf4j.event.Level.INFO); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index a312b03168..96650a4d5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -115,7 +115,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.ToolRunner; import org.junit.After; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 464fdfcd6c..651d4f31c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -18,15 +18,13 @@ package org.apache.hadoop.hdfs.server.namenode; -import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AsyncAppender; import org.junit.Rule; import org.junit.Test; @@ -34,7 +32,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.List; import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; @@ -64,12 +61,12 @@ public void testDisableMetricsLogger() throws IOException { } @Test + @SuppressWarnings("unchecked") public void testMetricsLoggerIsAsync() throws IOException { makeNameNode(true); org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); + assertTrue(Collections.list(logger.getAllAppenders()).get(0) + instanceof org.apache.log4j.AsyncAppender); } /** @@ -80,20 +77,14 @@ public void testMetricsLoggerIsAsync() throws IOException { public void testMetricsLogOutput() throws IOException, InterruptedException, TimeoutException { TestFakeMetric metricsProvider = new TestFakeMetric(); - MBeans.register(this.getClass().getSimpleName(), - "DummyMetrics", metricsProvider); + MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", metricsProvider); makeNameNode(true); // Log metrics early and often. - final PatternMatchingAppender appender = - (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(NameNode.METRICS_LOG_NAME) - .getAppender("PATTERNMATCHERAPPENDER"); + LogCapturer logCapturer = + LogCapturer.captureLogs(LoggerFactory.getLogger(NameNode.METRICS_LOG_NAME)); - // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); + GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("FakeMetric"), + 1000, 60000); + logCapturer.stopCapturing(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java index 073ee37781..8750154077 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java @@ -28,7 +28,8 @@ import org.junit.Test; import org.slf4j.LoggerFactory; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; + +import org.apache.hadoop.logging.LogCapturer; public class TestNameNodeResourcePolicy { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 67c8f3c18f..7ea0b24f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -69,12 +68,12 @@ import org.apache.hadoop.hdfs.util.HostsFileWriter; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -524,10 +523,8 @@ private void testImageChecksum(boolean compress) throws Exception { // Corrupt the md5 files in all the namedirs corruptFSImageMD5(true); - // Attach our own log appender so we can verify output - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); + // Attach our own log appender so we can verify output + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); // Try to start a new cluster LOG.info("\n===========================================\n" + @@ -541,10 +538,13 @@ private void testImageChecksum(boolean compress) throws Exception { } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "Failed to load FSImage file", ioe); - int md5failures = appender.countExceptionsWithMessage( - " is corrupt with MD5 checksum of "); + + int md5failures = + org.apache.commons.lang3.StringUtils.countMatches(logCapturer.getOutput(), + " is corrupt with MD5 checksum of "); // Two namedirs, so should have seen two failures assertEquals(2, md5failures); + logCapturer.stopCapturing(); } } finally { if (cluster != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 0e83bec11f..7376237a4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -197,7 +197,7 @@ public void testSharedEditsMissingLogs() throws Exception { // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(BootstrapStandby.class)); try { assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index 168273117b..6fa979d039 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -44,6 +44,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.Whitebox; import org.junit.After; import org.junit.Before; @@ -143,7 +144,7 @@ public void testObserverReadProxyProviderWithDT() throws Exception { () -> (DistributedFileSystem) FileSystem.get(conf)); GenericTestUtils.setLogLevel(ObserverReadProxyProvider.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer + LogCapturer logCapture = LogCapturer .captureLogs(ObserverReadProxyProvider.LOG); try { dfs.access(new Path("/"), FsAction.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 513f60cb1e..3dbadcaaf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -37,7 +37,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.common.Util; @@ -48,12 +47,12 @@ import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ThreadUtil; -import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -299,39 +298,38 @@ public void testStandbyAndObserverState() throws Exception { @Test(timeout = 30000) public void testCheckpointBeforeNameNodeInitializationIsComplete() throws Exception { - final LogVerificationAppender appender = new LogVerificationAppender(); - final org.apache.log4j.Logger logger = org.apache.log4j.Logger - .getRootLogger(); - logger.addAppender(appender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); - // Transition 2 to observer - cluster.transitionToObserver(2); - doEdits(0, 10); - // After a rollEditLog, Standby(nn1)'s next checkpoint would be - // ahead of observer(nn2). - nns[0].getRpcServer().rollEditLog(); + try { + // Transition 2 to observer + cluster.transitionToObserver(2); + doEdits(0, 10); + // After a rollEditLog, Standby(nn1)'s next checkpoint would be + // ahead of observer(nn2). + nns[0].getRpcServer().rollEditLog(); - NameNode nn2 = nns[2]; - FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); + NameNode nn2 = nns[2]; + FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null); - // After standby creating a checkpoint, it will try to push the image to - // active and all observer, updating it's own txid to the most recent. - HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); - HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); + // After standby creating a checkpoint, it will try to push the image to + // active and all observer, updating it's own txid to the most recent. + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12)); + HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12)); - NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); - cluster.transitionToStandby(2); - logger.removeAppender(appender); + NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage); + cluster.transitionToStandby(2); - for (LoggingEvent event : appender.getLog()) { - String message = event.getRenderedMessage(); - if (message.contains("PutImage failed") && - message.contains("FSImage has not been set in the NameNode.")) { - //Logs have the expected exception. - return; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("PutImage failed") && logLine.contains( + "FSImage has not been set in the NameNode.")) { + //Logs have the expected exception. + return; + } } + fail("Expected exception not present in logs."); + } finally { + logCapturer.stopCapturing(); } - fail("Expected exception not present in logs."); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index 58d72f14d7..3741bbf015 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -93,7 +93,7 @@ import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.ExitUtil; import org.junit.After; @@ -1372,7 +1372,7 @@ public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception { Path filePath = new Path("/zeroSizeFile"); DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0); fs.setReplication(filePath, (short) 3); - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( + LogCapturer logs = LogCapturer.captureLogs( LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class)); fs.setStoragePolicy(filePath, "COLD"); fs.satisfyStoragePolicy(filePath); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties index 368deef402..b739b25f35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties @@ -22,9 +22,6 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n -# Only to be used for testing -log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender - # # NameNode metrics logging. # The default is to retain two namenode-metrics.log files up to 64MB each. @@ -32,10 +29,10 @@ log4j.appender.PATTERNMATCHERAPPENDER=org.apache.hadoop.hdfs.server.namenode.Pat # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA,PATTERNMATCHERAPPENDER +namenode.metrics.logger=INFO,ASYNCNNMETRICSRFA log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger} log4j.additivity.NameNodeMetricsLog=false -log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCNNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCNNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCNNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCNNMETRICSRFA.fileName=${hadoop.log.dir}/namenode-metrics.log @@ -48,10 +45,10 @@ log4j.appender.ASYNCNNMETRICSRFA.maxBackupIndex=1 # TODO : While migrating to log4j2, replace AsyncRFAAppender with AsyncAppender as # log4j2 properties support wrapping of other appenders to AsyncAppender using appender ref -datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA,PATTERNMATCHERAPPENDER +datanode.metrics.logger=INFO,ASYNCDNMETRICSRFA log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger} log4j.additivity.DataNodeMetricsLog=false -log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCDNMETRICSRFA=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCDNMETRICSRFA.conversionPattern=%d{ISO8601} %m%n log4j.appender.ASYNCDNMETRICSRFA.maxFileSize=64MB log4j.appender.ASYNCDNMETRICSRFA.fileName=${hadoop.log.dir}/datanode-metrics.log @@ -72,7 +69,7 @@ hdfs.audit.log.maxfilesize=256MB hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.hdfs.util.AsyncRFAAppender +log4j.appender.ASYNCAUDITAPPENDER=org.apache.hadoop.logging.appenders.AsyncRFAAppender log4j.appender.ASYNCAUDITAPPENDER.blocking=false log4j.appender.ASYNCAUDITAPPENDER.bufferSize=256 log4j.appender.ASYNCAUDITAPPENDER.conversionPattern=%m%n diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index e3b3511c0c..142c1ab31d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -124,6 +124,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 15682eeefc..cb5f3edd05 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -36,9 +36,10 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; + +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptFailEvent; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.junit.After; @@ -107,12 +108,10 @@ import org.apache.hadoop.yarn.util.ControlledClock; import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Test; import org.mockito.ArgumentCaptor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -128,29 +127,6 @@ public FileStatus getFileStatus(Path f) throws IOException { } } - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - @BeforeClass public static void setupBeforeClass() { ResourceUtils.resetResourceTypes(new Configuration()); @@ -1724,11 +1700,10 @@ public void testReducerMemoryRequestOverriding() { for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - TestAppender testAppender = new TestAppender(); - final Logger logger = Logger.getLogger(TaskAttemptImpl.class); + final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); + LogCapturer logCapturer = LogCapturer.captureLogs(logger); try { TaskAttemptImpl.RESOURCE_REQUEST_CACHE.clear(); - logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1741,13 +1716,11 @@ public void testReducerMemoryRequestOverriding() { getResourceInfoFromContainerRequest(taImpl, eventHandler). getMemorySize(); assertEquals(3072, memorySize); - assertTrue(testAppender.getLogEvents().stream() - .anyMatch(e -> e.getLevel() == Level.WARN && ("Configuration " + - "mapreduce.reduce.resource." + memoryName + "=3Gi is " + - "overriding the mapreduce.reduce.memory.mb=2048 configuration") - .equals(e.getMessage()))); + assertTrue(logCapturer.getOutput().contains( + "Configuration " + "mapreduce.reduce.resource." + memoryName + "=3Gi is " + + "overriding the mapreduce.reduce.memory.mb=2048 configuration")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } } @@ -1809,10 +1782,9 @@ public void testReducerCpuRequestDefaultMemory() { @Test public void testReducerCpuRequestOverriding() { - TestAppender testAppender = new TestAppender(); - final Logger logger = Logger.getLogger(TaskAttemptImpl.class); + final Logger logger = LoggerFactory.getLogger(TaskAttemptImpl.class); + final LogCapturer logCapturer = LogCapturer.captureLogs(logger); try { - logger.addAppender(testAppender); EventHandler eventHandler = mock(EventHandler.class); Clock clock = SystemClock.getInstance(); JobConf jobConf = new JobConf(); @@ -1825,13 +1797,11 @@ public void testReducerCpuRequestOverriding() { getResourceInfoFromContainerRequest(taImpl, eventHandler). getVirtualCores(); assertEquals(7, vCores); - assertTrue(testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "mapreduce.reduce.resource.vcores=7 is overriding the " + - "mapreduce.reduce.cpu.vcores=9 configuration").equals( - e.getMessage()))); + assertTrue(logCapturer.getOutput().contains( + "Configuration " + "mapreduce.reduce.resource.vcores=7 is overriding the " + + "mapreduce.reduce.cpu.vcores=9 configuration")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml index 7530428d75..d124c97e9d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml @@ -72,6 +72,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java index a0223dedd6..43ab170160 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java @@ -23,12 +23,10 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; -import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; -import java.util.Enumeration; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -44,16 +42,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SecureIOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.util.ProcessTree; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.log4j.Appender; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; @@ -276,42 +271,7 @@ public static synchronized void syncLogsShutdown( } // flush & close all appenders - LogManager.shutdown(); - } - - @SuppressWarnings("unchecked") - public static synchronized void syncLogs() { - // flush standard streams - // - System.out.flush(); - System.err.flush(); - - // flush flushable appenders - // - final Logger rootLogger = Logger.getRootLogger(); - flushAppenders(rootLogger); - final Enumeration allLoggers = rootLogger.getLoggerRepository(). - getCurrentLoggers(); - while (allLoggers.hasMoreElements()) { - final Logger l = allLoggers.nextElement(); - flushAppenders(l); - } - } - - @SuppressWarnings("unchecked") - private static void flushAppenders(Logger l) { - final Enumeration allAppenders = l.getAllAppenders(); - while (allAppenders.hasMoreElements()) { - final Appender a = allAppenders.nextElement(); - if (a instanceof Flushable) { - try { - ((Flushable) a).flush(); - } catch (IOException ioe) { - System.err.println(a + ": Failed to flush!" - + StringUtils.stringifyException(ioe)); - } - } - } + HadoopLoggerUtils.shutdownLogManager(); } public static ScheduledExecutorService createLogSyncer() { @@ -336,7 +296,7 @@ public void run() { new Runnable() { @Override public void run() { - TaskLog.syncLogs(); + HadoopLoggerUtils.syncLogs(); } }, 0L, 5L, TimeUnit.SECONDS); return scheduler; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java index e91b4c1e85..f83835f538 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -28,24 +28,19 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.LineNumberReader; -import java.io.StringReader; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapred.TaskReport; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.protocol.ClientProtocol; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.WriterAppender; import org.mockito.stubbing.Answer; +import org.slf4j.LoggerFactory; /** * Test to make sure that command line output for @@ -73,55 +68,53 @@ public void setUp() throws IOException { @Test public void testJobMonitorAndPrint() throws Exception { - JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, - 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", - "tmp-queue", "tmp-jobfile", "tmp-url", true); - JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, - 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", - "tmp-queue", "tmp-jobfile", "tmp-url", true); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(Job.class)); + try { + JobStatus jobStatus_1 = + new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, State.RUNNING, + JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", + true); + JobStatus jobStatus_2 = + new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, + "tmp-user", "tmp-jobname", "tmp-queue", "tmp-jobfile", "tmp-url", true); - doAnswer((Answer) invocation -> - TaskCompletionEvent.EMPTY_ARRAY).when(job) - .getTaskCompletionEvents(anyInt(), anyInt()); + doAnswer((Answer) invocation -> TaskCompletionEvent.EMPTY_ARRAY).when( + job).getTaskCompletionEvents(anyInt(), anyInt()); - doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); - when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); - // setup the logger to capture all logs - Layout layout = - Logger.getRootLogger().getAppender("stdout").getLayout(); - ByteArrayOutputStream os = new ByteArrayOutputStream(); - WriterAppender appender = new WriterAppender(layout, os); - appender.setThreshold(Level.ALL); - Logger qlogger = Logger.getLogger(Job.class); - qlogger.addAppender(appender); + doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class)); + when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2); - job.monitorAndPrintJob(); + job.monitorAndPrintJob(); - qlogger.removeAppender(appender); - LineNumberReader r = new LineNumberReader(new StringReader(os.toString())); - String line; - boolean foundHundred = false; - boolean foundComplete = false; - boolean foundUber = false; - String uberModeMatch = "uber mode : true"; - String progressMatch = "map 100% reduce 100%"; - String completionMatch = "completed successfully"; - while ((line = r.readLine()) != null) { - if (line.contains(uberModeMatch)) { - foundUber = true; + boolean foundHundred = false; + boolean foundComplete = false; + boolean foundUber = false; + String uberModeMatch = "uber mode : true"; + String progressMatch = "map 100% reduce 100%"; + String completionMatch = "completed successfully"; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine.contains(uberModeMatch)) { + foundUber = true; + } + if (logLine.contains(progressMatch)) { + foundHundred = true; + } + if (logLine.contains(completionMatch)) { + foundComplete = true; + } + if (foundUber && foundHundred && foundComplete) { + break; + } } - foundHundred = line.contains(progressMatch); - if (foundHundred) - break; - } - line = r.readLine(); - foundComplete = line.contains(completionMatch); - assertTrue(foundUber); - assertTrue(foundHundred); - assertTrue(foundComplete); + assertTrue(foundUber); + assertTrue(foundHundred); + assertTrue(foundComplete); - System.out.println("The output of job.toString() is : \n" + job.toString()); - assertTrue(job.toString().contains("Number of maps: 5\n")); - assertTrue(job.toString().contains("Number of reduces: 5\n")); + System.out.println("The output of job.toString() is : \n" + job.toString()); + assertTrue(job.toString().contains("Number of maps: 5\n")); + assertTrue(job.toString().contains("Number of reduces: 5\n")); + } finally { + logCapturer.stopCapturing(); + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 17358a37da..632e972d5a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -128,6 +128,12 @@ assertj-core test + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 0bdc721217..063f185d3d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -34,7 +34,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -45,7 +44,6 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; @@ -55,6 +53,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobPriority; import org.apache.hadoop.mapreduce.JobStatus.State; @@ -110,13 +109,6 @@ import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider; import org.apache.hadoop.yarn.util.resource.ResourceUtils; -import org.apache.log4j.Appender; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Layout; -import org.apache.log4j.Level; -import org.apache.log4j.SimpleLayout; -import org.apache.log4j.WriterAppender; -import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -144,29 +136,6 @@ public class TestYARNRunner { MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS.lastIndexOf("%")); private static final String CUSTOM_RESOURCE_NAME = "a-custom-resource"; - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - private YARNRunner yarnRunner; private ResourceMgrDelegate resourceMgrDelegate; private YarnConfiguration conf; @@ -549,38 +518,48 @@ public void testAMAdminCommandOpts() throws Exception { assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex); } } + @Test(timeout=20000) public void testWarnCommandOpts() throws Exception { - org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getLogger(YARNRunner.class); - - ByteArrayOutputStream bout = new ByteArrayOutputStream(); - Layout layout = new SimpleLayout(); - Appender appender = new WriterAppender(layout, bout); - logger.addAppender(appender); - - JobConf jobConf = new JobConf(); - - jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); - jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); - - YARNRunner yarnRunner = new YARNRunner(jobConf); - - @SuppressWarnings("unused") - ApplicationSubmissionContext submissionContext = - buildSubmitContext(yarnRunner, jobConf); - - String logMsg = bout.toString(); - assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + - "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + - "longer function if hadoop native libraries are used. These values " + - "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + - "env using yarn.app.mapreduce.am.admin.user.env config settings.")); - assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + - "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + - "function if hadoop native libraries are used. These values should " + - "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + - "using yarn.app.mapreduce.am.env config settings.")); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); + try { + JobConf jobConf = new JobConf(); + + jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, + "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); + jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); + + YARNRunner yarnRunner = new YARNRunner(jobConf); + + @SuppressWarnings("unused") + ApplicationSubmissionContext submissionContext = buildSubmitContext(yarnRunner, jobConf); + + boolean isFoundOne = false; + boolean isFoundTwo = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine == null) { + continue; + } + if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + + "longer function if hadoop native libraries are used. These values " + + "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + + "env using yarn.app.mapreduce.am.admin.user.env config settings.")) { + isFoundOne = true; + } + if (logLine.contains("WARN") && logLine.contains("Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + + "function if hadoop native libraries are used. These values should " + + "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + + "using yarn.app.mapreduce.am.env config settings.")) { + isFoundTwo = true; + } + } + assertTrue(isFoundOne); + assertTrue(isFoundTwo); + } finally { + logCapturer.stopCapturing(); + } } @Test(timeout=20000) @@ -996,10 +975,7 @@ public void testAMRMemoryRequestOverriding() throws Exception { for (String memoryName : ImmutableList.of( MRJobConfig.RESOURCE_TYPE_NAME_MEMORY, MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY)) { - TestAppender testAppender = new TestAppender(); - org.apache.log4j.Logger logger = - org.apache.log4j.Logger.getLogger(YARNRunner.class); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger(YARNRunner.class)); try { JobConf jobConf = new JobConf(); jobConf.set(MRJobConfig.MR_AM_RESOURCE_PREFIX + memoryName, "3 Gi"); @@ -1017,13 +993,17 @@ public void testAMRMemoryRequestOverriding() throws Exception { long memorySize = resourceRequest.getCapability().getMemorySize(); Assert.assertEquals(3072, memorySize); - assertTrue(testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " + - "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + - "configuration").equals(e.getMessage()))); + boolean isLogFound = false; + for (String logLine : logCapturer.getOutput().split("\n")) { + if (logLine != null && logLine.contains("WARN") && logLine.contains( + "Configuration " + "yarn.app.mapreduce.am.resource." + memoryName + "=3Gi is " + + "overriding the yarn.app.mapreduce.am.resource.mb=2048 " + "configuration")) { + isLogFound = true; + } + } + assertTrue("Log line could not be found", isLogFound); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java index 338f1172b0..cc93e5629d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestChild.java @@ -29,8 +29,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.HadoopTestCase; import org.apache.hadoop.mapred.JobConf; -import org.apache.log4j.Level; -import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertTrue; @@ -76,12 +74,10 @@ protected void setup(Context context) throws IOException, mapJavaOpts, mapJavaOpts, MAP_OPTS_VAL); } - - Level logLevel = - Level.toLevel(conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, - Level.INFO.toString())); - assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + - logLevel, logLevel, Level.OFF); + + String logLevel = conf.get(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "INFO"); + assertEquals(JobConf.MAPRED_MAP_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, + "OFF"); } } @@ -108,12 +104,10 @@ protected void setup(Context context) reduceJavaOpts, reduceJavaOpts, REDUCE_OPTS_VAL); } - - Level logLevel = - Level.toLevel(conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, - Level.INFO.toString())); - assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + - logLevel, logLevel, Level.OFF); + + String logLevel = conf.get(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "INFO"); + assertEquals(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL + "has value of " + logLevel, logLevel, + "OFF"); } } @@ -127,9 +121,9 @@ private Job submitAndValidateJob(JobConf conf, int numMaps, int numReds, conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, MAP_OPTS_VAL); conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, REDUCE_OPTS_VAL); } - - conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, Level.OFF.toString()); - conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, Level.OFF.toString()); + + conf.set(JobConf.MAPRED_MAP_TASK_LOG_LEVEL, "OFF"); + conf.set(JobConf.MAPRED_REDUCE_TASK_LOG_LEVEL, "OFF"); Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java index 9e58d460d1..d1fc8c04aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java @@ -25,6 +25,7 @@ import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.test.LambdaTestUtils; import org.junit.Assert; @@ -50,8 +51,6 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -64,8 +63,7 @@ public class TestJHSSecurity { @Test public void testDelegationToken() throws Exception { - org.apache.log4j.Logger rootLogger = LogManager.getRootLogger(); - rootLogger.setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); final YarnConfiguration conf = new YarnConfiguration(new JobConf()); // Just a random principle diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java index 43d3abe4f8..f653ce7c0c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobs.java @@ -99,7 +99,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.WorkflowPriorityMappingsManager.WorkflowPriorityMapping; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; -import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -557,9 +556,9 @@ private void testJobClassloader(boolean useCustomClasses) throws IOException, systemClasses); } sleepConf.set(MRJobConfig.IO_SORT_MB, TEST_IO_SORT_MB); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); - sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); + sleepConf.set(MRJobConfig.REDUCE_LOG_LEVEL, "ALL"); sleepConf.set(MRJobConfig.MAP_JAVA_OPTS, "-verbose:class"); final SleepJob sleepJob = new SleepJob(); sleepJob.setConf(sleepConf); @@ -856,11 +855,11 @@ public void testContainerRollingLog() throws IOException, final SleepJob sleepJob = new SleepJob(); final JobConf sleepConf = new JobConf(mrCluster.getConfig()); - sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, "ALL"); final long userLogKb = 4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3); - sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString()); + sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, "ALL"); final long amLogKb = 7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7); diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c4dfd2f9d7..3ebab5a30b 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1944,6 +1944,18 @@ log4j-web ${log4j2.version} + + org.apache.hadoop + hadoop-logging + ${hadoop.version} + + + org.apache.hadoop + hadoop-logging + ${hadoop.version} + test + test-jar + diff --git a/hadoop-tools/hadoop-azure/pom.xml b/hadoop-tools/hadoop-azure/pom.xml index e8c5fb78ef..373b5a07df 100644 --- a/hadoop-tools/hadoop-azure/pom.xml +++ b/hadoop-tools/hadoop-azure/pom.xml @@ -349,7 +349,12 @@ hamcrest-library test - + + org.apache.hadoop + hadoop-logging + test + test-jar + diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java index 1e7330fbd0..2a124c1c99 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Before; import org.junit.Rule; import org.junit.Test; diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java index 476d7a4f01..6acab8fe2a 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java @@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import org.apache.hadoop.logging.LogCapturer; import org.junit.Test; import org.slf4j.Logger; diff --git a/hadoop-tools/hadoop-distcp/pom.xml b/hadoop-tools/hadoop-distcp/pom.xml index 5194e51d81..06c2e192f0 100644 --- a/hadoop-tools/hadoop-distcp/pom.xml +++ b/hadoop-tools/hadoop-distcp/pom.xml @@ -81,6 +81,12 @@ hadoop-hdfs-client provided + + org.apache.hadoop + hadoop-logging + test + test-jar + org.apache.hadoop hadoop-hdfs diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index aa42cb968d..d54fbaa86f 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCp; import org.apache.hadoop.tools.DistCpConstants; @@ -701,8 +702,8 @@ public void testDistCpWithIterator() throws Exception { GenericTestUtils .createFiles(remoteFS, source, getDepth(), getWidth(), getWidth()); - GenericTestUtils.LogCapturer log = - GenericTestUtils.LogCapturer.captureLogs(SimpleCopyListing.LOG); + LogCapturer log = + LogCapturer.captureLogs(SimpleCopyListing.LOG); String options = "-useiterator -update -delete" + getDefaultCLIOptions(); DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, source.toString(), diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java index 02fd48a071..661573f9d8 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java @@ -27,11 +27,10 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.tools.rumen.datatypes.*; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; /** * A default parser for MapReduce job configuration properties. @@ -83,7 +82,7 @@ public class MapReduceJobPropertiesParser implements JobPropertyParser { // turn off the warning w.r.t deprecated mapreduce keys static { - Logger.getLogger(Configuration.class).setLevel(Level.OFF); + HadoopLoggerUtils.setLogLevel(Configuration.class.getName(), "OFF"); } // Accepts a key if there is a corresponding key in the current mapreduce diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 81e888472d..d901513f2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -576,16 +576,6 @@ - - - - - - - - - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index a15c78e426..b41923ef9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -126,7 +127,6 @@ import org.apache.hadoop.yarn.util.TimelineServiceHelper; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; -import org.apache.log4j.LogManager; import org.apache.hadoop.classification.VisibleForTesting; import com.sun.jersey.api.client.ClientHandlerException; @@ -403,7 +403,7 @@ public static void main(String[] args) { result = appMaster.finish(); } catch (Throwable t) { LOG.error("Error running ApplicationMaster", t); - LogManager.shutdown(); + HadoopLoggerUtils.shutdownLogManager(); ExitUtil.terminate(1, t); } finally { if (appMaster != null) { @@ -529,7 +529,7 @@ public boolean init(String[] args) throws ParseException, IOException { //Check whether customer log4j.properties file exists if (fileExist(log4jPath)) { try { - Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, + HadoopLoggerUtils.updateLog4jConfiguration(ApplicationMaster.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 098f3981cf..dc23682f1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -52,6 +52,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -451,7 +452,7 @@ public boolean init(String[] args) throws ParseException { if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { - Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); + HadoopLoggerUtils.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java deleted file mode 100644 index 0301a6880f..0000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.applications.distributedshell; - -import java.io.FileInputStream; -import java.io.InputStream; -import java.util.Map.Entry; -import java.util.Properties; - -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; - -public class Log4jPropertyHelper { - - public static void updateLog4jConfiguration(Class targetClass, - String log4jPath) throws Exception { - Properties customProperties = new Properties(); - try ( - FileInputStream fs = new FileInputStream(log4jPath); - InputStream is = targetClass.getResourceAsStream("/log4j.properties")) { - customProperties.load(fs); - Properties originalProperties = new Properties(); - originalProperties.load(is); - for (Entry entry : customProperties.entrySet()) { - originalProperties.setProperty(entry.getKey().toString(), entry - .getValue().toString()); - } - LogManager.resetConfiguration(); - PropertyConfigurator.configure(originalProperties); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java index 60c06e9aa7..553465313d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSecureApiServiceClient.java @@ -43,7 +43,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.client.util.YarnClientUtils; -import org.apache.log4j.Logger; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.servlet.ServletContextHandler; @@ -52,6 +51,8 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Test Spnego Client Login. @@ -76,8 +77,7 @@ public class TestSecureApiServiceClient extends KerberosSecurityTestcase { private Map props; private static Server server; - private static Logger LOG = Logger - .getLogger(TestSecureApiServiceClient.class); + private static Logger LOG = LoggerFactory.getLogger(TestSecureApiServiceClient.class); private ApiServiceClient asc; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java index f8f948dd88..52ae87671a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/component/TestComponent.java @@ -30,7 +30,6 @@ import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; -import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -49,8 +48,6 @@ */ public class TestComponent { - static final Logger LOG = Logger.getLogger(TestComponent.class); - @Rule public ServiceTestUtils.ServiceFSWatcher rule = new ServiceTestUtils.ServiceFSWatcher(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java index fa5a5870c4..4fc87d95b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogDeletionService.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController; @@ -40,7 +41,6 @@ import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcase; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder; import org.apache.hadoop.yarn.logaggregation.testutils.LogAggregationTestcaseBuilder.AppDescriptor; -import org.apache.log4j.Level; import static org.apache.hadoop.yarn.conf.YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT; import static org.apache.hadoop.yarn.logaggregation.LogAggregationTestUtils.enableFileControllers; @@ -67,7 +67,7 @@ public class TestAggregatedLogDeletionService { @BeforeAll public static void beforeClass() { - org.apache.log4j.Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); } @BeforeEach diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java index 346239f8e1..0fd2841fcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java @@ -28,6 +28,7 @@ import org.slf4j.Marker; import org.slf4j.MarkerFactory; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java index 6b0570a32e..c04fba0a17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/NavBlock.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java index 4128546748..05031adc5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/ErrorsAndWarningsBlock.java @@ -24,7 +24,7 @@ import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.security.AdminACLsManager; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java index 87d511b172..8e24e8cd6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NavBlock.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java index c849619393..12b6dd7f69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java @@ -50,11 +50,12 @@ import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext; import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext; import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext; -import org.apache.log4j.Logger; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertEquals; @@ -63,8 +64,7 @@ public class TestContainersMonitorResourceChange { - static final Logger LOG = Logger - .getLogger(TestContainersMonitorResourceChange.class); + static final Logger LOG = LoggerFactory.getLogger(TestContainersMonitorResourceChange.class); private ContainersMonitorImpl containersMonitor; private MockExecutor executor; private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 9d096d20c5..7ea8a6209e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -245,6 +245,13 @@ test + + org.apache.hadoop + hadoop-logging + test + test-jar + + org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java index dc69eba2bb..80cc9fc8fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java @@ -32,7 +32,6 @@ import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; -import org.apache.log4j.Logger; import java.util.Collections; import java.util.HashMap; @@ -42,6 +41,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongBinaryOperator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * In-memory mapping between applications/container-tags and nodes/racks. * Required by constrained affinity/anti-affinity and cardinality placement. @@ -50,8 +52,7 @@ @InterfaceStability.Unstable public class AllocationTagsManager { - private static final Logger LOG = Logger.getLogger( - AllocationTagsManager.class); + private static final Logger LOG = LoggerFactory.getLogger(AllocationTagsManager.class); private ReentrantReadWriteLock.ReadLock readLock; private ReentrantReadWriteLock.WriteLock writeLock; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java index c17d4f6d7b..15e2d34b00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NavBlock.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.webapp.WebPageUtils; -import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender; +import org.apache.hadoop.logging.appenders.Log4jWarningErrorMetricsAppender; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.LI; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java index 9a85315628..12b017a921 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.util.Time; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -231,8 +232,8 @@ public void testFederationStateStoreServiceInitialHeartbeatDelay() throws Except conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INITIAL_DELAY, 10); conf.set(YarnConfiguration.RM_CLUSTER_ID, subClusterId.getId()); - GenericTestUtils.LogCapturer logCapture = - GenericTestUtils.LogCapturer.captureLogs(FederationStateStoreService.LOG); + LogCapturer logCapture = + LogCapturer.captureLogs(FederationStateStoreService.LOG); final MockRM rm = new MockRM(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java index a1989d5c0c..dc2d18d552 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java @@ -28,17 +28,13 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -83,6 +79,7 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.LoggerFactory; public class TestSystemMetricsPublisherForV2 { @@ -301,42 +298,15 @@ public void testPublishContainerMetrics() throws Exception { @Test(timeout = 10000) public void testPutEntityWhenNoCollector() throws Exception { // Validating the logs as DrainDispatcher won't throw exception - class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } - - TestAppender appender = new TestAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { RMApp app = createRMApp(ApplicationId.newInstance(0, 1)); metricsPublisher.appCreated(app, app.getStartTime()); dispatcher.await(); - for (LoggingEvent event : appender.getLog()) { - assertFalse("Dispatcher Crashed", - event.getRenderedMessage().contains("Error in dispatcher thread")); - } + assertFalse("Dispatcher Crashed", + logCapturer.getOutput().contains("Error in dispatcher thread")); } finally { - logger.removeAppender(appender); + logCapturer.stopCapturing(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java index 2e7b01ed50..07630f5461 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java @@ -18,12 +18,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework.ProportionalCapacityPreemptionPolicyMockFramework; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.junit.Test; import java.io.IOException; import java.util.Map; @@ -157,7 +156,7 @@ public void testPreemptionToBalanceUsedPlusPendingLessThanGuaranteed() @Test public void testPreemptionToBalanceWithVcoreResource() throws IOException { - Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = @@ -195,7 +194,7 @@ public void testPreemptionToBalanceWithVcoreResource() throws IOException { @Test public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException { - Logger.getRootLogger().setLevel(Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); String labelsConfig = "=100:100,true"; // default partition String nodesConfig = "n1="; // only one node String queuesConfig = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java index 024ec86f7d..c6066fd208 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/mockframework/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -16,6 +16,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.mockframework; +import org.apache.hadoop.logging.HadoopLoggerUtils; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicyForNodePartitions; import org.slf4j.Logger; @@ -110,8 +111,7 @@ private void resetResourceInformationMap() { public void setup() { resetResourceInformationMap(); - org.apache.log4j.Logger.getRootLogger().setLevel( - org.apache.log4j.Level.DEBUG); + HadoopLoggerUtils.setLogLevel("root", "DEBUG"); conf = new CapacitySchedulerConfiguration(new Configuration(false)); conf.setLong( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java index 6aaa15f3e1..c5add68f8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/TestMetricsInvariantChecker.java @@ -25,9 +25,10 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; -import org.apache.log4j.Logger; import org.junit.Before; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static junit.framework.TestCase.fail; @@ -37,8 +38,7 @@ * the invariant throws in case the invariants are not respected. */ public class TestMetricsInvariantChecker { - public final static Logger LOG = - Logger.getLogger(TestMetricsInvariantChecker.class); + public final static Logger LOG = LoggerFactory.getLogger(TestMetricsInvariantChecker.class); private MetricsSystem metricsSystem; private MetricsInvariantChecker ic; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java index 38fbcd8415..68bbc94f97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java @@ -19,6 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.logging.LogCapturer; import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; @@ -29,19 +30,13 @@ import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import org.slf4j.LoggerFactory; import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration.parseResourceConfigValue; import static org.junit.Assert.assertEquals; @@ -54,29 +49,6 @@ public class TestFairSchedulerConfiguration { private static final String A_CUSTOM_RESOURCE = "a-custom-resource"; - private static class TestAppender extends AppenderSkeleton { - - private final List logEvents = new CopyOnWriteArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - public void close() { - } - - @Override - protected void append(LoggingEvent arg0) { - logEvents.add(arg0); - } - - private List getLogEvents() { - return logEvents; - } - } - @Rule public ExpectedException exception = ExpectedException.none(); @@ -751,9 +723,7 @@ private void initResourceTypes() { @Test public void testMemoryIncrementConfiguredViaMultipleProperties() { - TestAppender testAppender = new TestAppender(); - Logger logger = LogManager.getRootLogger(); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-mb", "7"); @@ -763,23 +733,19 @@ public void testMemoryIncrementConfiguredViaMultipleProperties() { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13L, increment.getMemorySize()); - assertTrue("Warning message is not logged when specifying memory " + - "increment via multiple properties", - testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.resource-types.memory-mb.increment-allocation=13 is " + - "overriding the yarn.scheduler.increment-allocation-mb=7 " + - "property").equals(e.getMessage()))); + assertTrue("Warning message is not logged when specifying memory " + + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " + + "yarn.resource-types.memory-mb.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-mb=7 " + + "property")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } @Test public void testCpuIncrementConfiguredViaMultipleProperties() { - TestAppender testAppender = new TestAppender(); - Logger logger = LogManager.getRootLogger(); - logger.addAppender(testAppender); + LogCapturer logCapturer = LogCapturer.captureLogs(LoggerFactory.getLogger("root")); try { Configuration conf = new Configuration(); conf.set("yarn.scheduler.increment-allocation-vcores", "7"); @@ -789,15 +755,13 @@ public void testCpuIncrementConfiguredViaMultipleProperties() { FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf); Resource increment = fsc.getIncrementAllocation(); Assert.assertEquals(13, increment.getVirtualCores()); - assertTrue("Warning message is not logged when specifying CPU vCores " + - "increment via multiple properties", - testAppender.getLogEvents().stream().anyMatch( - e -> e.getLevel() == Level.WARN && ("Configuration " + - "yarn.resource-types.vcores.increment-allocation=13 is " + - "overriding the yarn.scheduler.increment-allocation-vcores=7 " + - "property").equals(e.getMessage()))); + assertTrue("Warning message is not logged when specifying CPU vCores " + + "increment via multiple properties", logCapturer.getOutput().contains("Configuration " + + "yarn.resource-types.vcores.increment-allocation=13 is " + + "overriding the yarn.scheduler.increment-allocation-vcores=7 " + + "property")); } finally { - logger.removeAppender(testAppender); + logCapturer.stopCapturing(); } } }