diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 5c2df9acf4..8632c567aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -310,4 +310,14 @@
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1ab7edd6ad..e5e21e4307 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -733,12 +733,43 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default";
public static final String DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY = "dfs.namenode.audit.log.token.tracking.id";
public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false;
- public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
+ /**
+ * Deprecated. Use log4j properties instead.
+ * Set system env variable HDFS_AUDIT_LOGGER, which in tern assigns the value to
+ * "hdfs.audit.logger" for log4j properties to determine log level and appender.
+ */
+ @Deprecated
+ public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
+ @Deprecated
public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
- public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY = "dfs.namenode.audit.log.async.blocking";
+
+ /**
+ * Deprecated. Use log4j properties instead.
+ * Set value to Async appender "blocking" property as part of log4j properties configuration.
+ *
+ * For example,
+ * log4j.appender.ASYNCAPPENDER=org.apache.log4j.AsyncAppender
+ * log4j.appender.ASYNCAPPENDER.blocking=false
+ */
+ @Deprecated
+ public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY =
+ "dfs.namenode.audit.log.async.blocking";
+ @Deprecated
public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT = true;
- public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY = "dfs.namenode.audit.log.async.buffer.size";
- public static final int DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT = 128;
+
+ /**
+ * Deprecated. Use log4j properties instead.
+ * Set value to Async appender "bufferSize" property as part of log4j properties configuration.
+ *
+ * For example,
+ * log4j.appender.ASYNCAPPENDER=org.apache.log4j.AsyncAppender
+ * log4j.appender.ASYNCAPPENDER.bufferSize=128
+ */
+ @Deprecated
+ public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY =
+ "dfs.namenode.audit.log.async.buffer.size";
+ @Deprecated
+ public static final int DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT = 128;
public static final String DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = "dfs.namenode.audit.log.debug.cmdlist";
public static final String DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY =
"dfs.namenode.metrics.logger.period.seconds";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
index 66685f6cc1..21c01cebd4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.hdfs.server.common;
import java.lang.management.ManagementFactory;
-import java.util.Collections;
import java.util.HashSet;
-import java.util.List;
import java.util.Set;
import javax.management.Attribute;
@@ -34,8 +32,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
/**
* MetricsLoggerTask can be used as utility to dump metrics to log.
@@ -56,12 +52,12 @@ public class MetricsLoggerTask implements Runnable {
}
}
- private org.apache.log4j.Logger metricsLog;
+ private Logger metricsLog;
private String nodeName;
private short maxLogLineLength;
public MetricsLoggerTask(String metricsLog, String nodeName, short maxLogLineLength) {
- this.metricsLog = org.apache.log4j.Logger.getLogger(metricsLog);
+ this.metricsLog = LoggerFactory.getLogger(metricsLog);
this.nodeName = nodeName;
this.maxLogLineLength = maxLogLineLength;
}
@@ -115,8 +111,11 @@ private String trimLine(String valueStr) {
.substring(0, maxLogLineLength) + "...");
}
- private static boolean hasAppenders(org.apache.log4j.Logger logger) {
- return logger.getAllAppenders().hasMoreElements();
+ // TODO : hadoop-logging module to hide log4j implementation details, this method
+ // can directly call utility from hadoop-logging.
+ private static boolean hasAppenders(Logger logger) {
+ return org.apache.log4j.Logger.getLogger(logger.getName()).getAllAppenders()
+ .hasMoreElements();
}
/**
@@ -138,26 +137,4 @@ private static Set getFilteredAttributes(MBeanInfo mBeanInfo) {
return attributeNames;
}
- /**
- * Make the metrics logger async and add all pre-existing appenders to the
- * async appender.
- */
- public static void makeMetricsLoggerAsync(String metricsLog) {
- org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(metricsLog);
- logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
-
- @SuppressWarnings("unchecked")
- List appenders = Collections.list(logger.getAllAppenders());
- // failsafe against trying to async it more than once
- if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
- AsyncAppender asyncAppender = new AsyncAppender();
- // change logger to have an async appender containing all the
- // previously configured appenders
- for (Appender appender : appenders) {
- logger.removeAppender(appender);
- asyncAppender.addAppender(appender);
- }
- logger.addAppender(asyncAppender);
- }
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index ce56688598..b781053a76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -4058,8 +4058,6 @@ protected void startMetricsLogger() {
return;
}
- MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
-
// Schedule the periodic logging.
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a7ee335497..e44a16f029 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -338,10 +338,9 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Lists;
-import org.apache.log4j.Logger;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
import org.eclipse.jetty.util.ajax.JSON;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
@@ -349,8 +348,6 @@
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.slf4j.LoggerFactory;
-
/**
* FSNamesystem is a container of both transient
* and persisted name-space state, and does all the book-keeping
@@ -384,8 +381,7 @@
public class FSNamesystem implements Namesystem, FSNamesystemMBean,
NameNodeMXBean, ReplicatedBlocksMBean, ECBlockGroupsMBean {
- public static final org.slf4j.Logger LOG = LoggerFactory
- .getLogger(FSNamesystem.class.getName());
+ public static final Logger LOG = LoggerFactory.getLogger(FSNamesystem.class);
// The following are private configurations
public static final String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED =
@@ -488,7 +484,8 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){
* perm=<permissions (optional)>
*
*/
- public static final Logger AUDIT_LOG = Logger.getLogger(FSNamesystem.class.getName() + ".audit");
+ public static final Logger AUDIT_LOG =
+ LoggerFactory.getLogger(FSNamesystem.class.getName() + ".audit");
private final int maxCorruptFileBlocksReturn;
private final boolean isPermissionEnabled;
@@ -858,11 +855,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
throws IOException {
provider = DFSUtil.createKeyProviderCryptoExtension(conf);
LOG.info("KeyProvider: " + provider);
- if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,
- DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
- LOG.info("Enabling async auditlog");
- enableAsyncAuditLog(conf);
- }
+ checkForAsyncLogEnabledByOldConfigs(conf);
auditLogWithRemotePort =
conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_WITH_REMOTE_PORT_KEY,
DFS_NAMENODE_AUDIT_LOG_WITH_REMOTE_PORT_DEFAULT);
@@ -1076,6 +1069,14 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
}
}
+ @SuppressWarnings("deprecation")
+ private static void checkForAsyncLogEnabledByOldConfigs(Configuration conf) {
+ if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
+ LOG.warn("Use log4j properties to enable async log for audit logs. {} is deprecated",
+ DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY);
+ }
+ }
+
@VisibleForTesting
public List getAuditLoggers() {
return auditLoggers;
@@ -8856,30 +8857,6 @@ public void logAuditMessage(String message) {
}
}
- private static void enableAsyncAuditLog(Configuration conf) {
- Logger logger = AUDIT_LOG;
- @SuppressWarnings("unchecked")
- List appenders = Collections.list(logger.getAllAppenders());
- // failsafe against trying to async it more than once
- if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
- AsyncAppender asyncAppender = new AsyncAppender();
- asyncAppender.setBlocking(conf.getBoolean(
- DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY,
- DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT
- ));
- asyncAppender.setBufferSize(conf.getInt(
- DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY,
- DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT
- ));
- // change logger to have an async appender containing all the
- // previously configured appenders
- for (Appender appender : appenders) {
- logger.removeAppender(appender);
- asyncAppender.addAppender(appender);
- }
- logger.addAppender(asyncAppender);
- }
- }
/**
* Return total number of Sync Operations on FSEditLog.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index ddd9fd8087..ff25eedea0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -946,8 +946,6 @@ protected void startMetricsLogger(Configuration conf) {
return;
}
- MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG_NAME);
-
// Schedule the periodic logging.
metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
new file mode 100644
index 0000000000..276e5b0987
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AsyncRFAAppender.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.util;
+
+import java.io.IOException;
+
+import org.apache.log4j.AsyncAppender;
+import org.apache.log4j.PatternLayout;
+import org.apache.log4j.RollingFileAppender;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * Until we migrate to log4j2, use this appender for namenode audit logger as well as
+ * datanode and namenode metric loggers with log4j properties, if async logging is required with
+ * RFA.
+ * This appender will take parameters necessary to supply RollingFileAppender to AsyncAppender.
+ * While migrating to log4j2, we can directly wrap RFA appender to Async appender as part of
+ * log4j2 properties. However, same is not possible with log4j1 properties.
+ */
+public class AsyncRFAAppender extends AsyncAppender {
+
+ /**
+ * The default maximum file size is 10MB.
+ */
+ private String maxFileSize = String.valueOf(10*1024*1024);
+
+ /**
+ * There is one backup file by default.
+ */
+ private int maxBackupIndex = 1;
+
+ /**
+ * The name of the log file.
+ */
+ private String fileName = null;
+
+ private String conversionPattern = null;
+
+ /**
+ * Does appender block when buffer is full.
+ */
+ private boolean blocking = true;
+
+ /**
+ * Buffer size.
+ */
+ private int bufferSize = DEFAULT_BUFFER_SIZE;
+
+ private RollingFileAppender rollingFileAppender = null;
+
+ private volatile boolean isRollingFileAppenderAssigned = false;
+
+ @Override
+ public void append(LoggingEvent event) {
+ if (rollingFileAppender == null) {
+ appendRFAToAsyncAppender();
+ }
+ super.append(event);
+ }
+
+ private synchronized void appendRFAToAsyncAppender() {
+ if (!isRollingFileAppenderAssigned) {
+ PatternLayout patternLayout;
+ if (conversionPattern != null) {
+ patternLayout = new PatternLayout(conversionPattern);
+ } else {
+ patternLayout = new PatternLayout();
+ }
+ try {
+ rollingFileAppender = new RollingFileAppender(patternLayout, fileName, true);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ rollingFileAppender.setMaxBackupIndex(maxBackupIndex);
+ rollingFileAppender.setMaxFileSize(maxFileSize);
+ this.addAppender(rollingFileAppender);
+ isRollingFileAppenderAssigned = true;
+ super.setBlocking(blocking);
+ super.setBufferSize(bufferSize);
+ }
+ }
+
+ public String getMaxFileSize() {
+ return maxFileSize;
+ }
+
+ public void setMaxFileSize(String maxFileSize) {
+ this.maxFileSize = maxFileSize;
+ }
+
+ public int getMaxBackupIndex() {
+ return maxBackupIndex;
+ }
+
+ public void setMaxBackupIndex(int maxBackupIndex) {
+ this.maxBackupIndex = maxBackupIndex;
+ }
+
+ public String getFileName() {
+ return fileName;
+ }
+
+ public void setFileName(String fileName) {
+ this.fileName = fileName;
+ }
+
+ public String getConversionPattern() {
+ return conversionPattern;
+ }
+
+ public void setConversionPattern(String conversionPattern) {
+ this.conversionPattern = conversionPattern;
+ }
+
+ public boolean isBlocking() {
+ return blocking;
+ }
+
+ public void setBlocking(boolean blocking) {
+ this.blocking = blocking;
+ }
+
+ public int getBufferSize() {
+ return bufferSize;
+ }
+
+ public void setBufferSize(int bufferSize) {
+ this.bufferSize = bufferSize;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
index 29619cc4e3..73201ba605 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -30,7 +30,6 @@
import java.util.List;
import java.util.Random;
import java.util.concurrent.TimeoutException;
-import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -40,12 +39,11 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.PatternMatchingAppender;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Appender;
-import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.AsyncAppender;
-import org.apache.log4j.spi.LoggingEvent;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
@@ -151,9 +149,9 @@ public void testMetricsLogOutput() throws IOException, InterruptedException,
metricsProvider);
startDNForTest(true);
assertNotNull(dn);
- final PatternMatchingAppender appender = new PatternMatchingAppender(
- "^.*FakeMetric.*$");
- addAppender(org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME), appender);
+ final PatternMatchingAppender appender =
+ (PatternMatchingAppender) org.apache.log4j.Logger.getLogger(DataNode.METRICS_LOG_NAME)
+ .getAppender("PATTERNMATCHERAPPENDER");
// Ensure that the supplied pattern was matched.
GenericTestUtils.waitFor(new Supplier() {
@@ -186,37 +184,4 @@ public int getFakeMetric() {
}
}
- /**
- * An appender that matches logged messages against the given regular
- * expression.
- */
- public static class PatternMatchingAppender extends AppenderSkeleton {
- private final Pattern pattern;
- private volatile boolean matched;
-
- public PatternMatchingAppender(String pattern) {
- this.pattern = Pattern.compile(pattern);
- this.matched = false;
- }
-
- public boolean isMatched() {
- return matched;
- }
-
- @Override
- protected void append(LoggingEvent event) {
- if (pattern.matcher(event.getMessage().toString()).matches()) {
- matched = true;
- }
- }
-
- @Override
- public void close() {
- }
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
new file mode 100644
index 0000000000..f099dfae73
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/PatternMatchingAppender.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.regex.Pattern;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+
+/**
+ * An appender that matches logged messages against the given
+ * regular expression.
+ */
+public class PatternMatchingAppender extends AppenderSkeleton {
+ private final Pattern pattern;
+ private volatile boolean matched;
+
+ public PatternMatchingAppender() {
+ this.pattern = Pattern.compile("^.*FakeMetric.*$");
+ this.matched = false;
+ }
+
+ public boolean isMatched() {
+ return matched;
+ }
+
+ @Override
+ protected void append(LoggingEvent event) {
+ if (pattern.matcher(event.getMessage().toString()).matches()) {
+ matched = true;
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
index dad4fa306c..a6eba0ea05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java
@@ -26,11 +26,11 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
+import org.slf4j.event.Level;
import java.net.Inet4Address;
import java.util.Arrays;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 54fcc17cdc..698178e4e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -25,10 +25,10 @@
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
+import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Enumeration;
import java.util.List;
import java.util.regex.Pattern;
@@ -46,15 +46,10 @@
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.RollingFileAppender;
+
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -68,36 +63,39 @@
*/
@RunWith(Parameterized.class)
public class TestAuditLogs {
- static final String auditLogFile = PathUtils.getTestDirName(TestAuditLogs.class) + "/TestAuditLogs-audit.log";
- final boolean useAsyncLog;
+
+ private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TestAuditLogs.class);
+
+ private static final File AUDIT_LOG_FILE =
+ new File(System.getProperty("hadoop.log.dir"), "hdfs-audit.log");
+
final boolean useAsyncEdits;
@Parameters
public static Collection