From b8815fe68bddd670174b6f1fa19aff178dec7b59 Mon Sep 17 00:00:00 2001 From: zhengchenyu Date: Sun, 1 Oct 2023 19:55:32 +0800 Subject: [PATCH] MAPREDUCE-7453. Revert HADOOP-18649. (#6102). Contributed by zhengchenyu. In container-log4j.properties, log4j.appender.{APPENDER}.MaxFileSize is set to ${yarn.app.container.log.filesize}, but yarn.app.container.log.filesize is 0 in default. So log is missing. This log is always rolling and only show the latest log. --- .../hadoop/mapreduce/v2/util/MRApps.java | 5 +- .../src/main/resources/mapred-default.xml | 15 +- .../hadoop/yarn/ContainerLogAppender.java | 128 ++++++++++++++++++ .../yarn/ContainerRollingLogAppender.java | 75 ++++++++++ .../hadoop/yarn/TestContainerLogAppender.java | 48 +++++++ .../main/resources/container-log4j.properties | 39 +++--- 6 files changed, 286 insertions(+), 24 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerRollingLogAppender.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLogAppender.java diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 72dd48b09c..a3ccfd72d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -60,6 +60,8 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.util.ApplicationClassLoader; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.ContainerLogAppender; +import org.apache.hadoop.yarn.ContainerRollingLogAppender; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -586,7 +588,8 @@ public static String getChildLogLevel(Configuration conf, boolean isMap) { /** * Add the JVM system properties necessary to configure - * {@link org.apache.log4j.RollingFileAppender}. + * {@link ContainerLogAppender} or + * {@link ContainerRollingLogAppender}. * * @param task for map/reduce, or null for app master * @param vargs the argument list to append to diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index a6d68acda3..9b0d8b563d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -840,8 +840,11 @@ yarn.app.mapreduce.task.container.log.backups 0 Number of backup files for task logs when using - RollingFileAppender (RFA). See - org.apache.log4j.RollingFileAppender.maxBackupIndex. + ContainerRollingLogAppender (CRLA). See + org.apache.log4j.RollingFileAppender.maxBackupIndex. By default, + ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA + is enabled for tasks when both mapreduce.task.userlog.limit.kb and + yarn.app.mapreduce.task.container.log.backups are greater than zero. @@ -849,8 +852,12 @@ yarn.app.mapreduce.am.container.log.backups 0 Number of backup files for the ApplicationMaster logs when using - RollingFileAppender (RFA). See - org.apache.log4j.RollingFileAppender.maxBackupIndex. + ContainerRollingLogAppender (CRLA). See + org.apache.log4j.RollingFileAppender.maxBackupIndex. By default, + ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA + is enabled for the ApplicationMaster when both + yarn.app.mapreduce.am.container.log.limit.kb and + yarn.app.mapreduce.am.container.log.backups are greater than zero. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java new file mode 100644 index 0000000000..03a0078167 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn; + +import java.io.File; +import java.io.Flushable; +import java.util.ArrayDeque; +import java.util.Deque; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.log4j.FileAppender; +import org.apache.log4j.spi.LoggingEvent; + +/** + * A simple log4j-appender for container's logs. + */ +@Public +@Unstable +public class ContainerLogAppender extends FileAppender + implements Flushable { + + private String containerLogDir; + private String containerLogFile; + private int maxEvents; + private Deque eventBuffer; + private boolean closed = false; + + @Override + public synchronized void activateOptions() { + if (maxEvents > 0) { + this.eventBuffer = new ArrayDeque<>(); + } + setFile(new File(this.containerLogDir, containerLogFile).toString()); + setAppend(true); + super.activateOptions(); + } + + @Override + public synchronized void append(LoggingEvent event) { + if (closed) { + return; + } + if (eventBuffer != null) { + if (eventBuffer.size() == maxEvents) { + eventBuffer.removeFirst(); + } + eventBuffer.addLast(event); + } else { + super.append(event); + } + } + + @Override + public void flush() { + if (qw != null) { + qw.flush(); + } + } + + @Override + public synchronized void close() { + if (!closed) { + closed = true; + if (eventBuffer != null) { + for (LoggingEvent event : eventBuffer) { + super.append(event); + } + // let garbage collection do its work + eventBuffer = null; + } + super.close(); + } + } + + /** + * Getter/Setter methods for log4j. + * + * @return containerLogDir. + */ + public String getContainerLogDir() { + return this.containerLogDir; + } + + public void setContainerLogDir(String containerLogDir) { + this.containerLogDir = containerLogDir; + } + + public String getContainerLogFile() { + return containerLogFile; + } + + public void setContainerLogFile(String containerLogFile) { + this.containerLogFile = containerLogFile; + } + + private static final long EVENT_SIZE = 100; + + public long getTotalLogFileSize() { + return maxEvents * EVENT_SIZE; + } + + /** + * Setter so that log4j can configure it from the + * configuration(log4j.properties). + * + * @param logSize log size. + */ + public void setTotalLogFileSize(long logSize) { + maxEvents = (int)(logSize / EVENT_SIZE); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerRollingLogAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerRollingLogAppender.java new file mode 100644 index 0000000000..acb47d9dd0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerRollingLogAppender.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.log4j.RollingFileAppender; + +import java.io.File; +import java.io.Flushable; + +/** + * A simple log4j-appender for container's logs. + * + */ +@Public +@Unstable +public class ContainerRollingLogAppender extends RollingFileAppender implements Flushable { + private String containerLogDir; + private String containerLogFile; + + @Override + public void activateOptions() { + synchronized (this) { + setFile(new File(this.containerLogDir, containerLogFile).toString()); + setAppend(true); + super.activateOptions(); + } + } + + @Override + public void flush() { + if (qw != null) { + qw.flush(); + } + } + + /** + * Getter/Setter methods for log4j. + * + * @return containerLogDir. + */ + + public String getContainerLogDir() { + return this.containerLogDir; + } + + public void setContainerLogDir(String containerLogDir) { + this.containerLogDir = containerLogDir; + } + + public String getContainerLogFile() { + return containerLogFile; + } + + public void setContainerLogFile(String containerLogFile) { + this.containerLogFile = containerLogFile; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLogAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLogAppender.java new file mode 100644 index 0000000000..26acfd7bad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLogAppender.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn; + +import org.junit.jupiter.api.Test; + +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; + +public class TestContainerLogAppender { + + @Test + void testAppendInClose() throws Exception { + final ContainerLogAppender claAppender = new ContainerLogAppender(); + claAppender.setName("testCLA"); + claAppender.setLayout(new PatternLayout("%-5p [%t]: %m%n")); + claAppender.setContainerLogDir("target/testAppendInClose/logDir"); + claAppender.setContainerLogFile("syslog"); + claAppender.setTotalLogFileSize(1000); + claAppender.activateOptions(); + final Logger claLog = Logger.getLogger("testAppendInClose-catergory"); + claLog.setAdditivity(false); + claLog.addAppender(claAppender); + claLog.info(new Object() { + public String toString() { + claLog.info("message1"); + return "return message1"; + } + }); + claAppender.close(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties index c5371c6d9e..678e3a74c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties @@ -26,35 +26,36 @@ log4j.threshold=ALL # #Default values -yarn.app.container.log.filesize=100MB -yarn.app.container.log.backups=1 -yarn.app.mapreduce.shuffle.log.backups=1 +yarn.app.container.log.dir=null +yarn.app.container.log.filesize=100 -log4j.appender.CLA=org.apache.log4j.RollingFileAppender -log4j.appender.CLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile} -log4j.appender.CLA.MaxFileSize=${yarn.app.container.log.filesize} -log4j.appender.CLA.MaxBackupIndex=${yarn.app.container.log.backups} +log4j.appender.CLA=org.apache.hadoop.yarn.ContainerLogAppender +log4j.appender.CLA.containerLogDir=${yarn.app.container.log.dir} +log4j.appender.CLA.containerLogFile=${hadoop.root.logfile} +log4j.appender.CLA.totalLogFileSize=${yarn.app.container.log.filesize} log4j.appender.CLA.layout=org.apache.log4j.PatternLayout log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n -log4j.appender.CRLA=org.apache.log4j.RollingFileAppender -log4j.appender.CRLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile} -log4j.appender.CRLA.MaxFileSize=${yarn.app.container.log.filesize} -log4j.appender.CRLA.MaxBackupIndex=${yarn.app.container.log.backups} +log4j.appender.CRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender +log4j.appender.CRLA.containerLogDir=${yarn.app.container.log.dir} +log4j.appender.CRLA.containerLogFile=${hadoop.root.logfile} +log4j.appender.CRLA.maximumFileSize=${yarn.app.container.log.filesize} +log4j.appender.CRLA.maxBackupIndex=${yarn.app.container.log.backups} log4j.appender.CRLA.layout=org.apache.log4j.PatternLayout log4j.appender.CRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n -log4j.appender.shuffleCLA=org.apache.log4j.RollingFileAppender -log4j.appender.shuffleCLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile} -log4j.appender.shuffleCLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize} -log4j.appender.shuffleCLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups} +log4j.appender.shuffleCLA=org.apache.hadoop.yarn.ContainerLogAppender +log4j.appender.shuffleCLA.containerLogDir=${yarn.app.container.log.dir} +log4j.appender.shuffleCLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile} +log4j.appender.shuffleCLA.totalLogFileSize=${yarn.app.mapreduce.shuffle.log.filesize} log4j.appender.shuffleCLA.layout=org.apache.log4j.PatternLayout log4j.appender.shuffleCLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n -log4j.appender.shuffleCRLA=org.apache.log4j.RollingFileAppender -log4j.appender.shuffleCRLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile} -log4j.appender.shuffleCRLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize} -log4j.appender.shuffleCRLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups} +log4j.appender.shuffleCRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender +log4j.appender.shuffleCRLA.containerLogDir=${yarn.app.container.log.dir} +log4j.appender.shuffleCRLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile} +log4j.appender.shuffleCRLA.maximumFileSize=${yarn.app.mapreduce.shuffle.log.filesize} +log4j.appender.shuffleCRLA.maxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups} log4j.appender.shuffleCRLA.layout=org.apache.log4j.PatternLayout log4j.appender.shuffleCRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n