MAPREDUCE-7453. Revert HADOOP-18649. (#6102). Contributed by zhengchenyu.

In container-log4j.properties, log4j.appender.{APPENDER}.MaxFileSize is set to ${yarn.app.container.log.filesize}, but yarn.app.container.log.filesize is 0 in default. So log is missing. This log is always rolling and only show the latest log.
This commit is contained in:
zhengchenyu 2023-10-01 19:55:32 +08:00 committed by GitHub
parent 5f47f091a2
commit b8815fe68b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 286 additions and 24 deletions

View File

@ -60,6 +60,8 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.util.ApplicationClassLoader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.ContainerLogAppender;
import org.apache.hadoop.yarn.ContainerRollingLogAppender;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.LocalResource;
@ -586,7 +588,8 @@ public static String getChildLogLevel(Configuration conf, boolean isMap) {
/**
* Add the JVM system properties necessary to configure
* {@link org.apache.log4j.RollingFileAppender}.
* {@link ContainerLogAppender} or
* {@link ContainerRollingLogAppender}.
*
* @param task for map/reduce, or null for app master
* @param vargs the argument list to append to

View File

@ -840,8 +840,11 @@
<name>yarn.app.mapreduce.task.container.log.backups</name>
<value>0</value>
<description>Number of backup files for task logs when using
RollingFileAppender (RFA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex.
ContainerRollingLogAppender (CRLA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex. By default,
ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA
is enabled for tasks when both mapreduce.task.userlog.limit.kb and
yarn.app.mapreduce.task.container.log.backups are greater than zero.
</description>
</property>
@ -849,8 +852,12 @@
<name>yarn.app.mapreduce.am.container.log.backups</name>
<value>0</value>
<description>Number of backup files for the ApplicationMaster logs when using
RollingFileAppender (RFA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex.
ContainerRollingLogAppender (CRLA). See
org.apache.log4j.RollingFileAppender.maxBackupIndex. By default,
ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA
is enabled for the ApplicationMaster when both
yarn.app.mapreduce.am.container.log.limit.kb and
yarn.app.mapreduce.am.container.log.backups are greater than zero.
</description>
</property>

View File

@ -0,0 +1,128 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import java.io.File;
import java.io.Flushable;
import java.util.ArrayDeque;
import java.util.Deque;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.FileAppender;
import org.apache.log4j.spi.LoggingEvent;
/**
* A simple log4j-appender for container's logs.
*/
@Public
@Unstable
public class ContainerLogAppender extends FileAppender
implements Flushable {
private String containerLogDir;
private String containerLogFile;
private int maxEvents;
private Deque<LoggingEvent> eventBuffer;
private boolean closed = false;
@Override
public synchronized void activateOptions() {
if (maxEvents > 0) {
this.eventBuffer = new ArrayDeque<>();
}
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}
@Override
public synchronized void append(LoggingEvent event) {
if (closed) {
return;
}
if (eventBuffer != null) {
if (eventBuffer.size() == maxEvents) {
eventBuffer.removeFirst();
}
eventBuffer.addLast(event);
} else {
super.append(event);
}
}
@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}
@Override
public synchronized void close() {
if (!closed) {
closed = true;
if (eventBuffer != null) {
for (LoggingEvent event : eventBuffer) {
super.append(event);
}
// let garbage collection do its work
eventBuffer = null;
}
super.close();
}
}
/**
* Getter/Setter methods for log4j.
*
* @return containerLogDir.
*/
public String getContainerLogDir() {
return this.containerLogDir;
}
public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}
public String getContainerLogFile() {
return containerLogFile;
}
public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}
private static final long EVENT_SIZE = 100;
public long getTotalLogFileSize() {
return maxEvents * EVENT_SIZE;
}
/**
* Setter so that log4j can configure it from the
* configuration(log4j.properties).
*
* @param logSize log size.
*/
public void setTotalLogFileSize(long logSize) {
maxEvents = (int)(logSize / EVENT_SIZE);
}
}

View File

@ -0,0 +1,75 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.RollingFileAppender;
import java.io.File;
import java.io.Flushable;
/**
* A simple log4j-appender for container's logs.
*
*/
@Public
@Unstable
public class ContainerRollingLogAppender extends RollingFileAppender implements Flushable {
private String containerLogDir;
private String containerLogFile;
@Override
public void activateOptions() {
synchronized (this) {
setFile(new File(this.containerLogDir, containerLogFile).toString());
setAppend(true);
super.activateOptions();
}
}
@Override
public void flush() {
if (qw != null) {
qw.flush();
}
}
/**
* Getter/Setter methods for log4j.
*
* @return containerLogDir.
*/
public String getContainerLogDir() {
return this.containerLogDir;
}
public void setContainerLogDir(String containerLogDir) {
this.containerLogDir = containerLogDir;
}
public String getContainerLogFile() {
return containerLogFile;
}
public void setContainerLogFile(String containerLogFile) {
this.containerLogFile = containerLogFile;
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn;
import org.junit.jupiter.api.Test;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
public class TestContainerLogAppender {
@Test
void testAppendInClose() throws Exception {
final ContainerLogAppender claAppender = new ContainerLogAppender();
claAppender.setName("testCLA");
claAppender.setLayout(new PatternLayout("%-5p [%t]: %m%n"));
claAppender.setContainerLogDir("target/testAppendInClose/logDir");
claAppender.setContainerLogFile("syslog");
claAppender.setTotalLogFileSize(1000);
claAppender.activateOptions();
final Logger claLog = Logger.getLogger("testAppendInClose-catergory");
claLog.setAdditivity(false);
claLog.addAppender(claAppender);
claLog.info(new Object() {
public String toString() {
claLog.info("message1");
return "return message1";
}
});
claAppender.close();
}
}

View File

@ -26,35 +26,36 @@ log4j.threshold=ALL
#
#Default values
yarn.app.container.log.filesize=100MB
yarn.app.container.log.backups=1
yarn.app.mapreduce.shuffle.log.backups=1
yarn.app.container.log.dir=null
yarn.app.container.log.filesize=100
log4j.appender.CLA=org.apache.log4j.RollingFileAppender
log4j.appender.CLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile}
log4j.appender.CLA.MaxFileSize=${yarn.app.container.log.filesize}
log4j.appender.CLA.MaxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CLA=org.apache.hadoop.yarn.ContainerLogAppender
log4j.appender.CLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.CLA.containerLogFile=${hadoop.root.logfile}
log4j.appender.CLA.totalLogFileSize=${yarn.app.container.log.filesize}
log4j.appender.CLA.layout=org.apache.log4j.PatternLayout
log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n
log4j.appender.CRLA=org.apache.log4j.RollingFileAppender
log4j.appender.CRLA.File=${yarn.app.container.log.dir}/${hadoop.root.logfile}
log4j.appender.CRLA.MaxFileSize=${yarn.app.container.log.filesize}
log4j.appender.CRLA.MaxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender
log4j.appender.CRLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.CRLA.containerLogFile=${hadoop.root.logfile}
log4j.appender.CRLA.maximumFileSize=${yarn.app.container.log.filesize}
log4j.appender.CRLA.maxBackupIndex=${yarn.app.container.log.backups}
log4j.appender.CRLA.layout=org.apache.log4j.PatternLayout
log4j.appender.CRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n
log4j.appender.shuffleCLA=org.apache.log4j.RollingFileAppender
log4j.appender.shuffleCLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCLA=org.apache.hadoop.yarn.ContainerLogAppender
log4j.appender.shuffleCLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.shuffleCLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCLA.totalLogFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCLA.layout=org.apache.log4j.PatternLayout
log4j.appender.shuffleCLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n
log4j.appender.shuffleCRLA=org.apache.log4j.RollingFileAppender
log4j.appender.shuffleCRLA.File=${yarn.app.container.log.dir}/${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCRLA.MaxFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCRLA.MaxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCRLA=org.apache.hadoop.yarn.ContainerRollingLogAppender
log4j.appender.shuffleCRLA.containerLogDir=${yarn.app.container.log.dir}
log4j.appender.shuffleCRLA.containerLogFile=${yarn.app.mapreduce.shuffle.logfile}
log4j.appender.shuffleCRLA.maximumFileSize=${yarn.app.mapreduce.shuffle.log.filesize}
log4j.appender.shuffleCRLA.maxBackupIndex=${yarn.app.mapreduce.shuffle.log.backups}
log4j.appender.shuffleCRLA.layout=org.apache.log4j.PatternLayout
log4j.appender.shuffleCRLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n