YARN-7047. Moving logging APIs over to slf4j in hadoop-yarn-server-nodemanager. Contributed by Yeliang Cang.

This commit is contained in:
Akira Ajisaka 2017-08-22 17:14:12 +09:00
parent ae8fb13b31
commit d5ff57a08f
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
87 changed files with 361 additions and 321 deletions

View File

@ -35,11 +35,11 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -68,7 +68,8 @@
* underlying OS. All executor implementations must extend ContainerExecutor.
*/
public abstract class ContainerExecutor implements Configurable {
private static final Log LOG = LogFactory.getLog(ContainerExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainerExecutor.class);
protected static final String WILDCARD = "*";
/**

View File

@ -20,6 +20,8 @@
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.File;
@ -34,8 +36,6 @@
import java.util.Map;
import org.apache.commons.lang.math.RandomUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
@ -73,8 +73,8 @@
*/
public class DefaultContainerExecutor extends ContainerExecutor {
private static final Log LOG = LogFactory
.getLog(DefaultContainerExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(DefaultContainerExecutor.class);
private static final int WIN_MAX_PATH = 260;
@ -423,7 +423,7 @@ public void writeLocalWrapperScript(Path launchDst, Path pidFile)
pout = new PrintStream(out, false, "UTF-8");
writeLocalWrapperScript(launchDst, pidFile, pout);
} finally {
IOUtils.cleanup(LOG, pout, out);
IOUtils.cleanupWithLogger(LOG, pout, out);
}
}
@ -505,7 +505,7 @@ private void writeSessionScript(Path launchDst, Path pidFile)
String exec = Shell.isSetsidAvailable? "exec setsid" : "exec";
pout.printf("%s /bin/bash \"%s\"", exec, launchDst.toUri().getPath());
} finally {
IOUtils.cleanup(LOG, pout, out);
IOUtils.cleanupWithLogger(LOG, pout, out);
}
lfs.setPermission(sessionScriptPath,
ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION);

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager;
import static java.util.concurrent.TimeUnit.SECONDS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
@ -31,8 +33,6 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
@ -49,7 +49,8 @@
public class DeletionService extends AbstractService {
private static final Log LOG = LogFactory.getLog(DeletionService.class);
private static final Logger LOG =
LoggerFactory.getLogger(DeletionService.class);
private int debugDelay;
private final ContainerExecutor containerExecutor;

View File

@ -33,11 +33,11 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileContext;
@ -55,7 +55,8 @@
* Manages a list of local storage directories.
*/
public class DirectoryCollection {
private static final Log LOG = LogFactory.getLog(DirectoryCollection.class);
private static final Logger LOG =
LoggerFactory.getLogger(DirectoryCollection.class);
private final Configuration conf;
private final DiskValidator diskValidator;

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
@ -98,8 +98,8 @@
*/
public class LinuxContainerExecutor extends ContainerExecutor {
private static final Log LOG = LogFactory
.getLog(LinuxContainerExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(LinuxContainerExecutor.class);
private String nonsecureLocalUser;
private Pattern nonsecureLocalUserPattern;

View File

@ -27,9 +27,9 @@
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
@ -51,7 +51,8 @@
*/
public class LocalDirsHandlerService extends AbstractService {
private static Log LOG = LogFactory.getLog(LocalDirsHandlerService.class);
private static final Logger LOG =
LoggerFactory.getLogger(LocalDirsHandlerService.class);
private static final String diskCapacityExceededErrorMsg = "usable space is below configured utilization percentage/no more usable space";

View File

@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.server.nodemanager;
import java.net.InetAddress;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -31,7 +31,8 @@
* Audit log format is written as key=value pairs. Tab separated.
*/
public class NMAuditLogger {
private static final Log LOG = LogFactory.getLog(NMAuditLogger.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMAuditLogger.class);
enum Keys {USER, OPERATION, TARGET, RESULT, IP,
DESCRIPTION, APPID, CONTAINERID}

View File

@ -26,9 +26,9 @@
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -105,7 +105,8 @@ public int getExitCode() {
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
private static final Log LOG = LogFactory.getLog(NodeManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodeManager.class);
private static long nmStartupTime = System.currentTimeMillis();
protected final NodeManagerMetrics metrics = NodeManagerMetrics.create();
private JvmPauseMonitor pauseMonitor;
@ -469,7 +470,7 @@ public void run() {
((NodeStatusUpdaterImpl) nodeStatusUpdater)
.rebootNodeStatusUpdaterAndRegisterWithRM();
} catch (YarnRuntimeException e) {
LOG.fatal("Error while rebooting NodeStatusUpdater.", e);
LOG.error("Error while rebooting NodeStatusUpdater.", e);
shutDown(NodeManagerStatus.EXCEPTION.getExitCode());
}
}
@ -729,7 +730,7 @@ private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
String message =
"Failing NodeManager start since we're on a "
+ "Unix-based system but bash doesn't seem to be available.";
LOG.fatal(message);
LOG.error(message);
throw new YarnRuntimeException(message);
}
}
@ -748,7 +749,7 @@ private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
this.init(conf);
this.start();
} catch (Throwable t) {
LOG.fatal("Error starting NodeManager", t);
LOG.error("Error starting NodeManager", t);
System.exit(-1);
}
}

View File

@ -18,13 +18,13 @@
package org.apache.hadoop.yarn.server.nodemanager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of the node resource monitor. It periodically tracks the
@ -34,8 +34,8 @@ public class NodeResourceMonitorImpl extends AbstractService implements
NodeResourceMonitor {
/** Logging infrastructure. */
final static Log LOG = LogFactory
.getLog(NodeResourceMonitorImpl.class);
final static Logger LOG =
LoggerFactory.getLogger(NodeResourceMonitorImpl.class);
/** Interval to monitor the node resource utilization. */
private long monitoringInterval;

View File

@ -33,9 +33,9 @@
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputByteBuffer;
@ -99,7 +99,8 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
public static final String YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS =
YarnConfiguration.NM_PREFIX + "duration-to-track-stopped-containers";
private static final Log LOG = LogFactory.getLog(NodeStatusUpdaterImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodeStatusUpdaterImpl.class);
private final Object heartbeatMonitor = new Object();
private final Object shutdownMonitor = new Object();
@ -427,7 +428,7 @@ nodeManagerVersionId, containerReports, getRunningApplications(),
successfullRegistrationMsg.append(nodeLabelsHandler
.verifyRMRegistrationResponseForNodeLabels(regNMResponse));
LOG.info(successfullRegistrationMsg);
LOG.info(successfullRegistrationMsg.toString());
}
private List<ApplicationId> createKeepAliveApplicationList() {

View File

@ -27,9 +27,9 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
@ -52,8 +52,8 @@
public class AMRMProxyTokenSecretManager extends
SecretManager<AMRMTokenIdentifier> {
private static final Log LOG = LogFactory
.getLog(AMRMProxyTokenSecretManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(AMRMProxyTokenSecretManager.class);
private int serialNo = new SecureRandom().nextInt();
private MasterKeyData nextMasterKey;

View File

@ -16,8 +16,6 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
@ -25,6 +23,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
@ -34,7 +34,8 @@
*/
public final class NMProtoUtils {
private static final Log LOG = LogFactory.getLog(NMProtoUtils.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMProtoUtils.class);
private NMProtoUtils() { }

View File

@ -22,9 +22,9 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.service.CompositeService;
@ -50,7 +50,8 @@
public class NMCollectorService extends CompositeService implements
CollectorNodemanagerProtocol {
private static final Log LOG = LogFactory.getLog(NMCollectorService.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMCollectorService.class);
private final Context context;

View File

@ -25,9 +25,9 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -51,7 +51,8 @@ public class AuxServices extends AbstractService
static final String STATE_STORE_ROOT_NAME = "nm-aux-services";
private static final Log LOG = LogFactory.getLog(AuxServices.class);
private static final Logger LOG =
LoggerFactory.getLogger(AuxServices.class);
protected final Map<String,AuxiliaryService> serviceMap;
protected final Map<String,ByteBuffer> serviceMetaData;
@ -161,7 +162,7 @@ public void serviceInit(Configuration conf) throws Exception {
}
s.init(conf);
} catch (RuntimeException e) {
LOG.fatal("Failed to initialize " + sName, e);
LOG.error("Failed to initialize " + sName, e);
throw e;
}
}
@ -205,7 +206,7 @@ public void serviceStop() throws Exception {
@Override
public void stateChanged(Service service) {
LOG.fatal("Service " + service.getName() + " changed state: " +
LOG.error("Service " + service.getName() + " changed state: " +
service.getServiceState());
stop();
}

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -189,7 +189,8 @@ private enum ReInitOp {
*/
private static final int SHUTDOWN_CLEANUP_SLOP_MS = 1000;
private static final Log LOG = LogFactory.getLog(ContainerManagerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainerManagerImpl.class);
public static final String INVALID_NMTOKEN_MSG = "Invalid NMToken";
static final String INVALID_CONTAINERTOKEN_MSG =

View File

@ -25,10 +25,10 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.Credentials;
@ -85,7 +85,8 @@ public class ApplicationImpl implements Application {
private final WriteLock writeLock;
private final Context context;
private static final Log LOG = LogFactory.getLog(ApplicationImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(ApplicationImpl.class);
private LogAggregationContext logAggregationContext;

View File

@ -33,10 +33,10 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
@ -174,7 +174,8 @@ private ReInitializationContext createContextForRollback() {
/** The NM-wide configuration - not specific to this container */
private final Configuration daemonConf;
private static final Log LOG = LogFactory.getLog(ContainerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainerImpl.class);
// whether container has been recovered after a restart

View File

@ -16,11 +16,11 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashSet;
@ -34,7 +34,8 @@
*/
public abstract class DeletionTask implements Runnable {
static final Log LOG = LogFactory.getLog(DeletionTask.class);
static final Logger LOG =
LoggerFactory.getLogger(DeletionTask.class);
public static final int INVALID_TASK_ID = -1;

View File

@ -20,6 +20,8 @@
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.File;
@ -36,8 +38,6 @@
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
@ -89,7 +89,8 @@
public class ContainerLaunch implements Callable<Integer> {
private static final Log LOG = LogFactory.getLog(ContainerLaunch.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainerLaunch.class);
public static final String CONTAINER_SCRIPT =
Shell.appendScriptExtension("launch_container");
@ -269,7 +270,8 @@ public Integer call() {
creds.writeTokenStorageToStream(tokensOutStream);
// /////////// End of writing out container-tokens
} finally {
IOUtils.cleanup(LOG, containerScriptOutStream, tokensOutStream);
IOUtils.cleanupWithLogger(LOG, containerScriptOutStream,
tokensOutStream);
}
ret = launchContainer(new ContainerStartContext.Builder()
@ -518,7 +520,7 @@ protected void handleContainerExitCode(int exitCode, Path containerLogDir) {
@SuppressWarnings("unchecked")
protected void handleContainerExitWithFailure(ContainerId containerID,
int ret, Path containerLogDir, StringBuilder diagnosticInfo) {
LOG.warn(diagnosticInfo);
LOG.warn(diagnosticInfo.toString());
String errorFileNamePattern =
conf.get(YarnConfiguration.NM_CONTAINER_STDERR_PATTERN,
@ -569,7 +571,7 @@ protected void handleContainerExitWithFailure(ContainerId containerID,
} catch (IOException e) {
LOG.error("Failed to get tail of the container's error log file", e);
} finally {
IOUtils.cleanup(LOG, errorFileIS);
IOUtils.cleanupWithLogger(LOG, errorFileIS);
}
this.dispatcher.getEventHandler()

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
@ -38,6 +36,8 @@
import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
@ -48,7 +48,8 @@
*/
public class ContainerRelaunch extends ContainerLaunch {
private static final Log LOG = LogFactory.getLog(ContainerRelaunch.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainerRelaunch.class);
public ContainerRelaunch(Context context, Configuration configuration,
Dispatcher dispatcher, ContainerExecutor exec, Application app,

View File

@ -23,9 +23,9 @@
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -55,7 +55,8 @@
public class ContainersLauncher extends AbstractService
implements EventHandler<ContainersLauncherEvent> {
private static final Log LOG = LogFactory.getLog(ContainersLauncher.class);
private static final Logger LOG =
LoggerFactory.getLogger(ContainersLauncher.class);
private final Context context;
private final ContainerExecutor exec;

View File

@ -21,9 +21,9 @@
import java.io.File;
import java.io.InterruptedIOException;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -47,8 +47,8 @@
*/
public class RecoveredContainerLaunch extends ContainerLaunch {
private static final Log LOG = LogFactory.getLog(
RecoveredContainerLaunch.class);
private static final Logger LOG =
LoggerFactory.getLogger(RecoveredContainerLaunch.class);
public RecoveredContainerLaunch(Context context, Configuration configuration,
Dispatcher dispatcher, ContainerExecutor exec, Application app,

View File

@ -22,8 +22,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -46,7 +46,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class PrivilegedOperationExecutor {
private static final Log LOG = LogFactory.getLog(PrivilegedOperationExecutor
private static final Logger LOG =
LoggerFactory.getLogger(PrivilegedOperationExecutor
.class);
private volatile static PrivilegedOperationExecutor instance;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -45,8 +45,8 @@
@InterfaceStability.Unstable
public class CGroupsBlkioResourceHandlerImpl implements DiskResourceHandler {
static final Log LOG = LogFactory
.getLog(CGroupsBlkioResourceHandlerImpl.class);
static final Logger LOG =
LoggerFactory.getLogger(CGroupsBlkioResourceHandlerImpl.class);
private CGroupsHandler cGroupsHandler;
// Arbitrarily choose a weight - all that matters is that all containers

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -59,7 +59,8 @@
@InterfaceAudience.Private
public class CGroupsCpuResourceHandlerImpl implements CpuResourceHandler {
static final Log LOG = LogFactory.getLog(CGroupsCpuResourceHandlerImpl.class);
static final Logger LOG =
LoggerFactory.getLogger(CGroupsCpuResourceHandlerImpl.class);
private CGroupsHandler cGroupsHandler;
private boolean strictResourceUsageMode = false;

View File

@ -22,8 +22,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -59,7 +59,8 @@
@InterfaceStability.Unstable
class CGroupsHandlerImpl implements CGroupsHandler {
private static final Log LOG = LogFactory.getLog(CGroupsHandlerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(CGroupsHandlerImpl.class);
private static final String MTAB_FILE = "/proc/mounts";
private static final String CGROUPS_FSTYPE = "cgroup";
@ -243,7 +244,7 @@ static Map<String, Set<String>> parseMtab(String mtab)
LOG.warn("Error while reading " + mtab, e);
}
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanupWithLogger(LOG, in);
}
return ret;

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -44,8 +44,8 @@
@InterfaceStability.Unstable
public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
static final Log LOG = LogFactory.getLog(
CGroupsMemoryResourceHandlerImpl.class);
static final Logger LOG =
LoggerFactory.getLogger(CGroupsMemoryResourceHandlerImpl.class);
private static final CGroupsHandler.CGroupController MEMORY =
CGroupsHandler.CGroupController.MEMORY;
private static final int OPPORTUNISTIC_SWAPPINESS = 100;

View File

@ -21,8 +21,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -49,7 +49,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ResourceHandlerModule {
static final Log LOG = LogFactory.getLog(ResourceHandlerModule.class);
static final Logger LOG =
LoggerFactory.getLogger(ResourceHandlerModule.class);
private static volatile ResourceHandlerChain resourceHandlerChain;
/**

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -31,6 +29,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashMap;
@ -43,8 +43,8 @@
public class TrafficControlBandwidthHandlerImpl
implements OutboundBandwidthResourceHandler {
private static final Log LOG = LogFactory
.getLog(TrafficControlBandwidthHandlerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(TrafficControlBandwidthHandlerImpl.class);
//In the absence of 'scheduling' support, we'll 'infer' the guaranteed
//outbound bandwidth for each container based on this number. This will
//likely go away once we add support on the RM for this resource type.
@ -117,7 +117,7 @@ public List<PrivilegedOperation> bootstrap(Configuration configuration)
.append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ")
.append(containerBandwidthMbit);
LOG.info(logLine);
LOG.info(logLine.toString());
trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit);
return null;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -29,6 +27,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.util.ArrayList;
@ -46,7 +46,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable class TrafficController {
private static final Log LOG = LogFactory.getLog(TrafficController.class);
private static final Logger LOG =
LoggerFactory.getLogger(TrafficController.class);
private static final int ROOT_QDISC_HANDLE = 42;
private static final int ZERO_CLASS_ID = 0;
private static final int ROOT_CLASS_ID = 1;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -34,6 +32,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
@ -48,8 +48,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
private static final Log LOG =
LogFactory.getLog(DefaultLinuxContainerRuntime.class);
private static final Logger LOG =
LoggerFactory.getLogger(DefaultLinuxContainerRuntime.class);
private final PrivilegedOperationExecutor privilegedOperationExecutor;
private Configuration conf;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -30,6 +28,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
@ -45,8 +45,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
private static final Log LOG = LogFactory
.getLog(DelegatingLinuxContainerRuntime.class);
private static final Logger LOG =
LoggerFactory.getLogger(DelegatingLinuxContainerRuntime.class);
private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime;
private DockerLinuxContainerRuntime dockerLinuxContainerRuntime;
private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime;

View File

@ -21,8 +21,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -132,8 +132,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
private static final Log LOG = LogFactory.getLog(
DockerLinuxContainerRuntime.class);
private static final Logger LOG =
LoggerFactory.getLogger(DockerLinuxContainerRuntime.class);
// This validates that the image is a proper docker image
public static final String DOCKER_IMAGE_PATTERN =

View File

@ -19,8 +19,6 @@
package org.apache.hadoop.yarn.server.nodemanager.
containermanager.linux.runtime;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -30,7 +28,8 @@
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext;
import org.apache.log4j.Logger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.FilePermission;
import java.io.IOException;
@ -117,8 +116,8 @@
@InterfaceStability.Unstable
public class JavaSandboxLinuxContainerRuntime
extends DefaultLinuxContainerRuntime {
private static final Log LOG =
LogFactory.getLog(DefaultLinuxContainerRuntime.class);
private static final Logger LOG =
LoggerFactory.getLogger(DefaultLinuxContainerRuntime.class);
private Configuration configuration;
private SandboxMode sandboxMode;
@ -254,7 +253,7 @@ public void prepareContainer(ContainerRuntimeContext ctx)
} catch (IOException e) {
throw new ContainerExecutionException(e);
} finally {
IOUtils.cleanup(LOG, policyOutputStream);
IOUtils.cleanupWithLogger(LOG, policyOutputStream);
}
}
}
@ -417,7 +416,7 @@ static final class NMContainerPolicyUtils{
+ SEPARATOR + "-\" {%n" +
" permission " + AllPermission.class.getCanonicalName() + ";%n};%n";
static final Logger LOG =
Logger.getLogger(NMContainerPolicyUtils.class);
LoggerFactory.getLogger(NMContainerPolicyUtils.class);
/**
* Write new policy file to policyOutStream which will include read access

View File

@ -20,13 +20,13 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
@ -38,7 +38,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class DockerClient {
private static final Log LOG = LogFactory.getLog(DockerClient.class);
private static final Logger LOG =
LoggerFactory.getLogger(DockerClient.class);
private static final String TMP_FILE_PREFIX = "docker.";
private static final String TMP_FILE_SUFFIX = ".cmd";
private final String tmpDirPath;

View File

@ -16,13 +16,13 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
@ -30,7 +30,8 @@
* Utility class for executing common docker operations.
*/
public final class DockerCommandExecutor {
private static final Log LOG = LogFactory.getLog(DockerCommandExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(DockerCommandExecutor.class);
/**
* Potential states that the docker status can return.

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
import static org.apache.hadoop.util.Shell.getAllShells;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataInputStream;
import java.io.File;
@ -44,8 +46,6 @@
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
@ -86,7 +86,8 @@
public class ContainerLocalizer {
static final Log LOG = LogFactory.getLog(ContainerLocalizer.class);
static final Logger LOG =
LoggerFactory.getLogger(ContainerLocalizer.class);
public static final String FILECACHE = "filecache";
public static final String APPCACHE = "appcache";

View File

@ -26,9 +26,9 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -58,7 +58,8 @@
class LocalResourcesTrackerImpl implements LocalResourcesTracker {
static final Log LOG = LogFactory.getLog(LocalResourcesTrackerImpl.class);
static final Logger LOG =
LoggerFactory.getLogger(LocalResourcesTrackerImpl.class);
private static final String RANDOM_DIR_REGEX = "-?\\d+";
private static final Pattern RANDOM_DIR_PATTERN = Pattern
.compile(RANDOM_DIR_REGEX);

View File

@ -24,9 +24,9 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.event.Dispatcher;
@ -53,7 +53,8 @@
*/
public class LocalizedResource implements EventHandler<ResourceEvent> {
private static final Log LOG = LogFactory.getLog(LocalizedResource.class);
private static final Logger LOG =
LoggerFactory.getLogger(LocalizedResource.class);
volatile Path localPath;
volatile long size = -1;

View File

@ -19,6 +19,8 @@
import static org.apache.hadoop.fs.CreateFlag.CREATE;
import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataOutputStream;
import java.io.File;
@ -50,8 +52,6 @@
import java.util.concurrent.TimeUnit;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -148,7 +148,8 @@
public class ResourceLocalizationService extends CompositeService
implements EventHandler<LocalizationEvent>, LocalizationProtocol {
private static final Log LOG = LogFactory.getLog(ResourceLocalizationService.class);
private static final Logger LOG =
LoggerFactory.getLogger(ResourceLocalizationService.class);
public static final String NM_PRIVATE_DIR = "nmPrivate";
public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 0700);
@ -956,7 +957,7 @@ public void run() {
}
}
} catch(Throwable t) {
LOG.fatal("Error: Shutting down", t);
LOG.error("Error: Shutting down", t);
} finally {
LOG.info("Public cache exiting");
threadPool.shutdownNow();

View File

@ -18,11 +18,11 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URISyntaxException;
import java.util.ArrayList;
@ -40,7 +40,8 @@
*/
public class ResourceSet {
private static final Log LOG = LogFactory.getLog(ResourceSet.class);
private static final Logger LOG =
LoggerFactory.getLogger(ResourceSet.class);
// resources by localization state (localized, pending, failed)
private Map<String, Path> localizedResources =

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
import java.lang.annotation.Annotation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
@ -32,7 +32,8 @@
public class LocalizerSecurityInfo extends SecurityInfo {
private static final Log LOG = LogFactory.getLog(LocalizerSecurityInfo.class);
private static final Logger LOG =
LoggerFactory.getLogger(LocalizerSecurityInfo.class);
@Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security;
import java.util.Collection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@ -30,8 +30,8 @@
public class LocalizerTokenSelector implements
TokenSelector<LocalizerTokenIdentifier> {
private static final Log LOG = LogFactory
.getLog(LocalizerTokenSelector.class);
private static final Logger LOG =
LoggerFactory.getLogger(LocalizerTokenSelector.class);
@SuppressWarnings("unchecked")
@Override

View File

@ -22,9 +22,9 @@
import java.net.InetSocketAddress;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
@ -50,8 +50,8 @@
*/
public class SharedCacheUploadService extends AbstractService implements
EventHandler<SharedCacheUploadEvent> {
private static final Log LOG =
LogFactory.getLog(SharedCacheUploadService.class);
private static final Logger LOG =
LoggerFactory.getLogger(SharedCacheUploadService.class);
private boolean enabled;
private FileSystem fs;

View File

@ -24,9 +24,9 @@
import java.net.URISyntaxException;
import java.util.concurrent.Callable;
import java.util.concurrent.ThreadLocalRandom;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -60,7 +60,8 @@ class SharedCacheUploader implements Callable<Boolean> {
static final FsPermission FILE_PERMISSION =
new FsPermission((short)00555);
private static final Log LOG = LogFactory.getLog(SharedCacheUploader.class);
private static final Logger LOG =
LoggerFactory.getLogger(SharedCacheUploader.class);
private final LocalResource resource;
private final Path localPath;

View File

@ -32,9 +32,9 @@
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
@ -83,8 +83,8 @@
public class AppLogAggregatorImpl implements AppLogAggregator {
private static final Log LOG = LogFactory
.getLog(AppLogAggregatorImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(AppLogAggregatorImpl.class);
private static final int THREAD_SLEEP_TIME = 1000;
// This is temporary solution. The configuration will be deleted once
// we find a more scalable method to only write a single log file per LRS.

View File

@ -26,9 +26,9 @@
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
@ -69,7 +69,8 @@
public class LogAggregationService extends AbstractService implements
LogHandler {
private static final Log LOG = LogFactory.getLog(LogAggregationService.class);
private static final Logger LOG =
LoggerFactory.getLogger(LogAggregationService.class);
private static final long MIN_LOG_ROLLING_INTERVAL = 3600;
// This configuration is for debug and test purpose. By setting
// this configuration as true. We can break the lower bound of

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -41,8 +41,8 @@
@Private
public class SampleContainerLogAggregationPolicy implements
ContainerLogAggregationPolicy {
private static final Log LOG =
LogFactory.getLog(SampleContainerLogAggregationPolicy.class);
private static final Logger LOG =
LoggerFactory.getLogger(SampleContainerLogAggregationPolicy.class);
static String SAMPLE_RATE = "SR";
public static final float DEFAULT_SAMPLE_RATE = 0.2f;

View File

@ -26,9 +26,9 @@
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.RejectedExecutionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@ -60,8 +60,8 @@
public class NonAggregatingLogHandler extends AbstractService implements
LogHandler {
private static final Log LOG = LogFactory
.getLog(NonAggregatingLogHandler.class);
private static final Logger LOG =
LoggerFactory.getLogger(NonAggregatingLogHandler.class);
private final Dispatcher dispatcher;
private final DeletionService delService;
private final Map<ApplicationId, String> appOwners;

View File

@ -20,8 +20,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.AbstractService;
@ -55,8 +55,8 @@
public class ContainersMonitorImpl extends AbstractService implements
ContainersMonitor {
private final static Log LOG = LogFactory
.getLog(ContainersMonitorImpl.class);
private final static Logger LOG =
LoggerFactory.getLogger(ContainersMonitorImpl.class);
private long monitoringInterval;
private MonitoringThread monitoringThread;

View File

@ -20,9 +20,9 @@
import java.io.IOException;
import java.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -31,8 +31,8 @@
*/
public class ConfigurationNodeLabelsProvider extends AbstractNodeLabelsProvider {
private static final Log LOG = LogFactory
.getLog(ConfigurationNodeLabelsProvider.class);
private static final Logger LOG =
LoggerFactory.getLogger(ConfigurationNodeLabelsProvider.class);
public ConfigurationNodeLabelsProvider() {
super("Configuration Based NodeLabels Provider");

View File

@ -25,9 +25,9 @@
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@ -132,7 +132,8 @@ public TimerTask createTimerTask() {
*/
private class NodeLabelsScriptRunner extends TimerTask {
private final Log LOG = LogFactory.getLog(NodeLabelsScriptRunner.class);
private final Logger LOG =
LoggerFactory.getLogger(NodeLabelsScriptRunner.class);
public NodeLabelsScriptRunner() {
ArrayList<String> execScript = new ArrayList<String>();

View File

@ -20,6 +20,7 @@
import static org.fusesource.leveldbjni.JniDBFactory.asString;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -34,8 +35,6 @@
import java.util.TimerTask;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -69,7 +68,6 @@
import org.fusesource.leveldbjni.internal.NativeDB;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Logger;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
@ -79,8 +77,8 @@
public class NMLeveldbStateStoreService extends NMStateStoreService {
public static final Log LOG =
LogFactory.getLog(NMLeveldbStateStoreService.class);
public static final org.slf4j.Logger LOG =
LoggerFactory.getLogger(NMLeveldbStateStoreService.class);
private static final String DB_NAME = "yarn-nm-state";
private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version";
@ -1382,8 +1380,9 @@ public void run() {
}
}
private static class LeveldbLogger implements Logger {
private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
private static class LeveldbLogger implements org.iq80.leveldb.Logger {
private static final org.slf4j.Logger LOG =
LoggerFactory.getLogger(LeveldbLogger.class);
@Override
public void log(String message) {

View File

@ -24,9 +24,9 @@
import java.util.List;
import java.util.Map.Entry;
import java.util.TreeMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.SecretManager;
@ -48,8 +48,8 @@
public class NMContainerTokenSecretManager extends
BaseContainerTokenSecretManager {
private static final Log LOG = LogFactory
.getLog(NMContainerTokenSecretManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMContainerTokenSecretManager.class);
private MasterKeyData previousMasterKey;
private final TreeMap<Long, List<ContainerId>> recentlyStartedContainerTracker;

View File

@ -23,9 +23,9 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -45,8 +45,8 @@
public class NMTokenSecretManagerInNM extends BaseNMTokenSecretManager {
private static final Log LOG = LogFactory
.getLog(NMTokenSecretManagerInNM.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMTokenSecretManagerInNM.class);
private MasterKeyData previousMasterKey;

View File

@ -22,9 +22,9 @@
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -66,7 +66,8 @@
*/
public class NMTimelinePublisher extends CompositeService {
private static final Log LOG = LogFactory.getLog(NMTimelinePublisher.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMTimelinePublisher.class);
private Dispatcher dispatcher;

View File

@ -37,11 +37,11 @@
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.IOUtils;
@ -68,8 +68,8 @@
@Deprecated
public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
final static Log LOG = LogFactory
.getLog(CgroupsLCEResourcesHandler.class);
final static Logger LOG =
LoggerFactory.getLogger(CgroupsLCEResourcesHandler.class);
private Configuration conf;
private String cgroupPrefix;
@ -435,7 +435,7 @@ private Map<String, Set<String>> parseMtab() throws IOException {
} catch (IOException e) {
throw new IOException("Error while reading " + getMtabFileName(), e);
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanupWithLogger(LOG, in);
}
return ret;

View File

@ -18,18 +18,18 @@
package org.apache.hadoop.yarn.server.nodemanager.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Deprecated
public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
final static Log LOG = LogFactory
.getLog(DefaultLCEResourcesHandler.class);
final static Logger LOG =
LoggerFactory.getLogger(DefaultLCEResourcesHandler.class);
private Configuration conf;

View File

@ -18,13 +18,13 @@
package org.apache.hadoop.yarn.server.nodemanager.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to determine hardware related characteristics such as the
@ -34,8 +34,8 @@
@InterfaceStability.Unstable
public class NodeManagerHardwareUtils {
private static final Log LOG = LogFactory
.getLog(NodeManagerHardwareUtils.class);
private static final Logger LOG =
LoggerFactory.getLogger(NodeManagerHardwareUtils.class);
private static boolean isHardwareDetectionEnabled(Configuration conf) {
return conf.getBoolean(

View File

@ -22,9 +22,9 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -35,7 +35,8 @@
*/
public class ProcessIdFileReader {
private static final Log LOG = LogFactory.getLog(ProcessIdFileReader.class);
private static final Logger LOG =
LoggerFactory.getLogger(ProcessIdFileReader.class);
/**
* Get the process id from specified file path.

View File

@ -25,6 +25,8 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@ -43,8 +45,6 @@
import javax.ws.rs.core.UriInfo;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.http.JettyUtils;
@ -82,7 +82,8 @@
@Singleton
@Path("/ws/v1/node")
public class NMWebServices {
private static final Log LOG = LogFactory.getLog(NMWebServices.class);
private static final Logger LOG =
LoggerFactory.getLogger(NMWebServices.class);
private Context nmContext;
private ResourceView rview;
private WebApp webapp;

View File

@ -18,9 +18,6 @@
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender;
import org.apache.hadoop.yarn.webapp.YarnWebParams;
@ -30,6 +27,8 @@
import com.google.inject.Inject;
import static org.apache.hadoop.util.GenericsUtil.isLog4jLogger;
public class NavBlock extends HtmlBlock implements YarnWebParams {
private Configuration conf;
@ -43,8 +42,7 @@ public NavBlock(Configuration conf) {
protected void render(Block html) {
boolean addErrorsAndWarningsLink = false;
Log log = LogFactory.getLog(NMErrorsAndWarningsPage.class);
if (log instanceof Log4JLogger) {
if (isLog4jLogger(NMErrorsAndWarningsPage.class)) {
Log4jWarningErrorMetricsAppender appender = Log4jWarningErrorMetricsAppender.findAppender();
if (appender != null) {
addErrorsAndWarningsLink = true;

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.yarn.server.nodemanager.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
@ -45,7 +45,8 @@
public class WebServer extends AbstractService {
private static final Log LOG = LogFactory.getLog(WebServer.class);
private static final Logger LOG =
LoggerFactory.getLogger(WebServer.class);
private final Context nmContext;
private final NMWebApp nmWebApp;

View File

@ -19,12 +19,12 @@
package org.apache.hadoop.yarn.server.nodemanager;
import static org.junit.Assert.fail;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
@ -60,8 +60,8 @@
public class DummyContainerManager extends ContainerManagerImpl {
private static final Log LOG = LogFactory
.getLog(DummyContainerManager.class);
private static final Logger LOG =
LoggerFactory.getLogger(DummyContainerManager.class);
public DummyContainerManager(Context context, ContainerExecutor exec,
DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,

View File

@ -19,11 +19,11 @@
package org.apache.hadoop.yarn.server.nodemanager;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@ -46,7 +46,8 @@
* real RM.
*/
public class MockNodeStatusUpdater extends NodeStatusUpdaterImpl {
static final Log LOG = LogFactory.getLog(MockNodeStatusUpdater.class);
static final Logger LOG =
LoggerFactory.getLogger(MockNodeStatusUpdater.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);

View File

@ -20,9 +20,9 @@
import java.io.File;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
@ -34,8 +34,8 @@
public class TestContainerManagerWithLCE extends TestContainerManager {
private static final Log LOG = LogFactory
.getLog(TestContainerManagerWithLCE.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestContainerManagerWithLCE.class);
public TestContainerManagerWithLCE() throws UnsupportedFileSystemException {
super();

View File

@ -27,6 +27,8 @@
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
@ -40,8 +42,6 @@
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
@ -137,8 +137,8 @@
* </ol>
*/
public class TestLinuxContainerExecutor {
private static final Log LOG = LogFactory
.getLog(TestLinuxContainerExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestLinuxContainerExecutor.class);
private static File workSpace;
static {

View File

@ -29,6 +29,8 @@
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileReader;
@ -44,8 +46,6 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -79,8 +79,8 @@
public class TestLinuxContainerExecutorWithMocks {
private static final Log LOG = LogFactory
.getLog(TestLinuxContainerExecutorWithMocks.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestLinuxContainerExecutorWithMocks.class);
private static final String MOCK_EXECUTOR =
"./src/test/resources/mock-container-executor";

View File

@ -22,11 +22,11 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileUtil;
@ -47,8 +47,8 @@
public class TestNodeHealthService {
private static volatile Log LOG = LogFactory
.getLog(TestNodeHealthService.class);
private static volatile Logger LOG =
LoggerFactory.getLogger(TestNodeHealthService.class);
protected static File testRootDir = new File("target",
TestNodeHealthService.class.getName() + "-localDir").getAbsoluteFile();

View File

@ -22,6 +22,8 @@
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -32,8 +34,6 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
@ -82,7 +82,8 @@ public class TestNodeManagerReboot {
private FileContext localFS;
private MyNodeManager nm;
private DeletionService delService;
static final Log LOG = LogFactory.getLog(TestNodeManagerReboot.class);
static final Logger LOG =
LoggerFactory.getLogger(TestNodeManagerReboot.class);
@Before
public void setup() throws UnsupportedFileSystemException {

View File

@ -21,6 +21,8 @@
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -36,8 +38,6 @@
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -113,8 +113,8 @@ public class TestNodeManagerResync {
new NodeManagerEvent(NodeManagerEventType.RESYNC);
private final long DUMMY_RM_IDENTIFIER = 1234;
protected static Log LOG = LogFactory
.getLog(TestNodeManagerResync.class);
protected static final Logger LOG =
LoggerFactory.getLogger(TestNodeManagerResync.class);
@Before
public void setup() throws UnsupportedFileSystemException {

View File

@ -21,6 +21,8 @@
import static org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils.newNodeHeartbeatResponse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.File;
@ -45,8 +47,6 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@ -125,7 +125,8 @@ public class TestNodeStatusUpdater {
DefaultMetricsSystem.setMiniClusterMode(true);
}
static final Log LOG = LogFactory.getLog(TestNodeStatusUpdater.class);
static final Logger LOG =
LoggerFactory.getLogger(TestNodeStatusUpdater.class);
static final File basedir =
new File("target", TestNodeStatusUpdater.class.getName());
static final File nmLocalDir = new File(basedir, "nm0");

View File

@ -33,9 +33,9 @@
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
@ -87,8 +87,8 @@
*
*/
public abstract class BaseAMRMProxyTest {
private static final Log LOG = LogFactory
.getLog(BaseAMRMProxyTest.class);
private static final Logger LOG =
LoggerFactory.getLogger(BaseAMRMProxyTest.class);
// The AMRMProxyService instance that will be used by all the test cases
private MockAMRMProxyService amrmProxyService;

View File

@ -22,9 +22,9 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@ -50,8 +50,8 @@
public class TestAMRMProxyService extends BaseAMRMProxyTest {
private static final Log LOG = LogFactory
.getLog(TestAMRMProxyService.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestAMRMProxyService.class);
private static MockResourceManagerFacade mockRM;

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager;
import static org.mockito.Mockito.spy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -30,8 +32,6 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@ -114,8 +114,8 @@ public BaseContainerManagerTest() throws UnsupportedFileSystemException {
tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
}
protected static Log LOG = LogFactory
.getLog(BaseContainerManagerTest.class);
protected static Logger LOG =
LoggerFactory.getLogger(BaseContainerManagerTest.class);
protected static final int HTTP_PORT = 5412;
protected Configuration conf = new YarnConfiguration();

View File

@ -26,6 +26,8 @@
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
@ -42,8 +44,6 @@
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -73,7 +73,8 @@
import org.junit.Test;
public class TestAuxServices {
private static final Log LOG = LogFactory.getLog(TestAuxServices.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestAuxServices.class);
private static final File TEST_DIR = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),

View File

@ -40,7 +40,6 @@
import java.util.Map;
import com.google.common.base.Supplier;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -110,6 +109,7 @@
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.slf4j.LoggerFactory;
public class TestContainerManager extends BaseContainerManagerTest {
@ -118,7 +118,7 @@ public TestContainerManager() throws UnsupportedFileSystemException {
}
static {
LOG = LogFactory.getLog(TestContainerManager.class);
LOG = LoggerFactory.getLogger(TestContainerManager.class);
}
private boolean delayContainers = false;

View File

@ -20,13 +20,13 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.ArrayList;
@ -34,8 +34,8 @@
import java.util.List;
public class TestPrivilegedOperationExecutor {
private static final Log LOG = LogFactory
.getLog(TestPrivilegedOperationExecutor.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestPrivilegedOperationExecutor.class);
private String localDataDir;
private String customExecutorPath;
private Configuration nullConf = null;

View File

@ -21,8 +21,8 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -58,8 +58,8 @@
* Tests for the CGroups handler implementation.
*/
public class TestCGroupsHandlerImpl {
private static final Log LOG =
LogFactory.getLog(TestCGroupsHandlerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestCGroupsHandlerImpl.class);
private PrivilegedOperationExecutor privilegedOperationExecutorMock;
private String tmpPath;

View File

@ -20,19 +20,19 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
public class TestResourceHandlerModule {
private static final Log LOG = LogFactory.
getLog(TestResourceHandlerModule.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestResourceHandlerModule.class);
Configuration emptyConf;
Configuration networkEnabledConf;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -35,6 +33,8 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.List;
@ -49,8 +49,8 @@
import static org.mockito.Mockito.when;
public class TestTrafficControlBandwidthHandlerImpl {
private static final Log LOG =
LogFactory.getLog(TestTrafficControlBandwidthHandlerImpl.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestTrafficControlBandwidthHandlerImpl.class);
private static final int ROOT_BANDWIDTH_MBIT = 100;
private static final int YARN_BANDWIDTH_MBIT = 70;
private static final int TEST_CLASSID = 100;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -33,6 +31,8 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -49,7 +49,8 @@
import static org.mockito.Mockito.when;
public class TestTrafficController {
private static final Log LOG = LogFactory.getLog(TestTrafficController.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestTrafficController.class);
private static final int ROOT_BANDWIDTH_MBIT = 100;
private static final int YARN_BANDWIDTH_MBIT = 70;
private static final int CONTAINER_BANDWIDTH_MBIT = 10;

View File

@ -20,8 +20,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -44,6 +42,8 @@
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -64,8 +64,8 @@
import static org.mockito.Mockito.*;
public class TestDockerContainerRuntime {
private static final Log LOG = LogFactory
.getLog(TestDockerContainerRuntime.class);
private static final Logger LOG =
LoggerFactory.getLogger(TestDockerContainerRuntime.class);
private Configuration conf;
private PrivilegedOperationExecutor mockExecutor;
private CGroupsHandler mockCGroupsHandler;

View File

@ -37,6 +37,8 @@
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
@ -52,8 +54,6 @@
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -94,7 +94,8 @@
public class TestContainerLocalizer {
static final Log LOG = LogFactory.getLog(TestContainerLocalizer.class);
static final Logger LOG =
LoggerFactory.getLogger(TestContainerLocalizer.class);
static final Path basedir =
new Path("target", TestContainerLocalizer.class.getName());
static final FsPermission CACHE_DIR_PERM = new FsPermission((short)0710);
@ -299,7 +300,7 @@ public void run() {
try {
localizerA.runLocalization(nmAddr);
} catch (Exception e) {
LOG.warn(e);
LOG.warn(e.toString());
}
}
};
@ -309,7 +310,7 @@ public void run() {
try {
localizerB.runLocalization(nmAddr);
} catch (Exception e) {
LOG.warn(e);
LOG.warn(e.toString());
}
}
};

View File

@ -60,7 +60,6 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -137,6 +136,7 @@
import org.eclipse.jetty.util.MultiException;
import com.google.common.base.Supplier;
import org.slf4j.LoggerFactory;
//@Ignore
public class TestLogAggregationService extends BaseContainerManagerTest {
@ -144,7 +144,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
private Map<ApplicationAccessType, String> acls = createAppAcls();
static {
LOG = LogFactory.getLog(TestLogAggregationService.class);
LOG = LoggerFactory.getLogger(TestLogAggregationService.class);
}
private static RecordFactory recordFactory = RecordFactoryProvider

View File

@ -36,7 +36,6 @@
import java.util.regex.Pattern;
import com.google.common.base.Supplier;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -76,6 +75,7 @@
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.LoggerFactory;
public class TestContainersMonitor extends BaseContainerManagerTest {
@ -84,7 +84,7 @@ public TestContainersMonitor() throws UnsupportedFileSystemException {
}
static {
LOG = LogFactory.getLog(TestContainersMonitor.class);
LOG = LoggerFactory.getLogger(TestContainersMonitor.class);
}
@Before
public void setup() throws IOException {

View File

@ -24,7 +24,6 @@
import java.util.HashMap;
import java.util.List;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
@ -57,6 +56,7 @@
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.LoggerFactory;
import static org.mockito.Mockito.spy;
@ -70,7 +70,7 @@ public TestContainerSchedulerQueuing() throws UnsupportedFileSystemException {
}
static {
LOG = LogFactory.getLog(TestContainerSchedulerQueuing.class);
LOG = LoggerFactory.getLogger(TestContainerSchedulerQueuing.class);
}
private boolean delayContainers = true;