HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

This commit is contained in:
Akira Ajisaka 2017-07-18 13:32:37 +09:00
parent 5b007921cd
commit ccaf036662
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
257 changed files with 1021 additions and 932 deletions

View File

@ -79,8 +79,6 @@
import com.google.common.base.Charsets;
import org.apache.commons.collections.map.UnmodifiableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -98,6 +96,8 @@
import org.apache.hadoop.util.StringUtils;
import org.codehaus.stax2.XMLInputFactory2;
import org.codehaus.stax2.XMLStreamReader2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
@ -192,11 +192,12 @@
@InterfaceStability.Stable
public class Configuration implements Iterable<Map.Entry<String,String>>,
Writable {
private static final Log LOG =
LogFactory.getLog(Configuration.class);
private static final Logger LOG =
LoggerFactory.getLogger(Configuration.class);
private static final Log LOG_DEPRECATION =
LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
private static final Logger LOG_DEPRECATION =
LoggerFactory.getLogger(
"org.apache.hadoop.conf.Configuration.deprecation");
private boolean quietmode = true;
@ -2885,10 +2886,10 @@ private Resource loadResource(Properties properties,
}
return null;
} catch (IOException e) {
LOG.fatal("error parsing conf " + name, e);
LOG.error("error parsing conf " + name, e);
throw new RuntimeException(e);
} catch (XMLStreamException e) {
LOG.fatal("error parsing conf " + name, e);
LOG.error("error parsing conf " + name, e);
throw new RuntimeException(e);
}
}

View File

@ -22,9 +22,10 @@
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
@ -41,8 +42,8 @@
public abstract class ReconfigurableBase
extends Configured implements Reconfigurable {
private static final Log LOG =
LogFactory.getLog(ReconfigurableBase.class);
private static final Logger LOG =
LoggerFactory.getLogger(ReconfigurableBase.class);
// Use for testing purpose.
private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil();

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.conf;
import org.apache.commons.logging.*;
import org.apache.commons.lang.StringEscapeUtils;
import java.util.Collection;
@ -33,6 +31,8 @@
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A servlet for changing a node's configuration.
@ -45,8 +45,8 @@ public class ReconfigurationServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private static final Log LOG =
LogFactory.getLog(ReconfigurationServlet.class);
private static final Logger LOG =
LoggerFactory.getLogger(ReconfigurationServlet.class);
// the prefix used to fing the attribute holding the reconfigurable
// for a given request

View File

@ -26,12 +26,12 @@
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
@ -42,8 +42,8 @@
*/
@InterfaceAudience.Private
public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
private static final Log LOG =
LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(JceAesCtrCryptoCodec.class.getName());
private Configuration conf;
private String provider;

View File

@ -26,22 +26,22 @@
import java.security.SecureRandom;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
import org.apache.hadoop.crypto.random.OsSecureRandom;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implement the AES-CTR crypto codec using JNI into OpenSSL.
*/
@InterfaceAudience.Private
public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
private static final Log LOG =
LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(OpensslAesCtrCryptoCodec.class.getName());
private Configuration conf;
private Random random;

View File

@ -26,13 +26,13 @@
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* OpenSSL cipher using JNI.
@ -41,8 +41,8 @@
*/
@InterfaceAudience.Private
public final class OpensslCipher {
private static final Log LOG =
LogFactory.getLog(OpensslCipher.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(OpensslCipher.class.getName());
public static final int ENCRYPT_MODE = 1;
public static final int DECRYPT_MODE = 0;

View File

@ -19,13 +19,13 @@
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* OpenSSL secure random using JNI.
@ -44,8 +44,8 @@
@InterfaceAudience.Private
public class OpensslSecureRandom extends Random {
private static final long serialVersionUID = -7828193502768789584L;
private static final Log LOG =
LogFactory.getLog(OpensslSecureRandom.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(OpensslSecureRandom.class.getName());
/** If native SecureRandom unavailable, use java SecureRandom */
private java.security.SecureRandom fallback = null;

View File

@ -23,12 +23,12 @@
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
@ -39,7 +39,8 @@
*/
@InterfaceAudience.Private
public class OsSecureRandom extends Random implements Closeable, Configurable {
public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
public static final Logger LOG =
LoggerFactory.getLogger(OsSecureRandom.class);
private static final long serialVersionUID = 6391500337172057900L;
@ -112,7 +113,7 @@ synchronized protected int next(int nbits) {
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanup(LOG, stream);
IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
}

View File

@ -32,8 +32,6 @@
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -52,6 +50,8 @@
import org.apache.hadoop.util.Progressable;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class provides an interface for implementors of a Hadoop file system
@ -66,7 +66,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractFileSystem {
static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class);
/** Recording statistics per a file system class. */
private static final Map<URI, Statistics>

View File

@ -27,14 +27,14 @@
import java.util.Arrays;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstract Checksumed Fs.
@ -110,8 +110,8 @@ private int getSumBufferSize(int bytesPerSum, int bufferSize, Path file)
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
public static final Logger LOG =
LoggerFactory.getLogger(FSInputChecker.class);
private static final int HEADER_LENGTH = 8;
private ChecksumFs fs;

View File

@ -26,12 +26,12 @@
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A daemon thread that waits for the next file system to renew.
@ -39,8 +39,8 @@
@InterfaceAudience.Private
public class DelegationTokenRenewer
extends Thread {
private static final Log LOG = LogFactory
.getLog(DelegationTokenRenewer.class);
private static final Logger LOG = LoggerFactory
.getLogger(DelegationTokenRenewer.class);
/** The renewable interface used by the renewer. */
public interface Renewable {
@ -243,7 +243,7 @@ public <T extends FileSystem & Renewable> void removeRenewAction(
LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem");
if (LOG.isDebugEnabled()) {
LOG.debug(ie.getStackTrace());
LOG.debug("Exception in removeRenewAction: ", ie);
}
}
}

View File

@ -22,11 +22,12 @@
import java.io.InputStream;
import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
@ -37,8 +38,8 @@
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
abstract public class FSInputChecker extends FSInputStream {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
public static final Logger LOG =
LoggerFactory.getLogger(FSInputChecker.class);
/** The file name from which data is read from */
protected Path file;

View File

@ -35,8 +35,6 @@
import java.util.TreeSet;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -63,6 +61,8 @@
import com.google.common.base.Preconditions;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The FileContext class provides an interface for users of the Hadoop
@ -169,7 +169,7 @@
@InterfaceStability.Stable
public class FileContext {
public static final Log LOG = LogFactory.getLog(FileContext.class);
public static final Logger LOG = LoggerFactory.getLogger(FileContext.class);
/**
* Default permission for directory and symlink
* In previous versions, this default permission was also used to
@ -332,7 +332,7 @@ public AbstractFileSystem run() throws UnsupportedFileSystemException {
}
});
} catch (InterruptedException ex) {
LOG.error(ex);
LOG.error(ex.toString());
throw new IOException("Failed to get the AbstractFileSystem for path: "
+ uri, ex);
}
@ -446,7 +446,7 @@ public static FileContext getFileContext(final URI defaultFsUri,
} catch (UnsupportedFileSystemException ex) {
throw ex;
} catch (IOException ex) {
LOG.error(ex);
LOG.error(ex.toString());
throw new RuntimeException(ex);
}
return getFileContext(defaultAfs, aConf);

View File

@ -45,8 +45,6 @@
import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -57,6 +55,8 @@
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A collection of file-processing util methods
@ -65,7 +65,7 @@
@InterfaceStability.Evolving
public class FileUtil {
private static final Log LOG = LogFactory.getLog(FileUtil.class);
private static final Logger LOG = LoggerFactory.getLogger(FileUtil.class);
/* The error code is defined in winutils to indicate insufficient
* privilege to create symbolic links. This value need to keep in
@ -697,7 +697,7 @@ private static void unTarUsingJava(File inFile, File untarDir,
entry = tis.getNextTarEntry();
}
} finally {
IOUtils.cleanup(LOG, tis, inputStream);
IOUtils.cleanupWithLogger(LOG, tis, inputStream);
}
}
@ -1287,7 +1287,7 @@ public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
bos = new BufferedOutputStream(fos);
jos = new JarOutputStream(bos, jarManifest);
} finally {
IOUtils.cleanup(LOG, jos, bos, fos);
IOUtils.cleanupWithLogger(LOG, jos, bos, fos);
}
String[] jarCp = {classPathJar.getCanonicalPath(),
unexpandedWildcardClasspath.toString()};

View File

@ -24,8 +24,6 @@
import java.util.LinkedList;
import org.apache.commons.lang.WordUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -39,12 +37,14 @@
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Provide command line access to a FileSystem. */
@InterfaceAudience.Private
public class FsShell extends Configured implements Tool {
static final Log LOG = LogFactory.getLog(FsShell.class);
static final Logger LOG = LoggerFactory.getLogger(FsShell.class);
private static final int MAX_LINE_WIDTH = 80;

View File

@ -22,7 +22,6 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.ChmodParser;
@ -32,6 +31,7 @@
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.util.Shell;
import org.slf4j.Logger;
/**
* This class is the home for file permissions related commands.
@ -41,7 +41,7 @@
@InterfaceStability.Unstable
public class FsShellPermissions extends FsCommand {
static Log LOG = FsShell.LOG;
static final Logger LOG = FsShell.LOG;
/**
* Register the permission related commands with the factory

View File

@ -23,18 +23,19 @@
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Globber {
public static final Log LOG = LogFactory.getLog(Globber.class.getName());
public static final Logger LOG =
LoggerFactory.getLogger(Globber.class.getName());
private final FileSystem fs;
private final FileContext fc;

View File

@ -17,14 +17,14 @@
*/
package org.apache.hadoop.fs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.FileNotFoundException;
@ -50,7 +50,8 @@
public class HarFileSystem extends FileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
private static final Logger LOG =
LoggerFactory.getLogger(HarFileSystem.class);
public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
@ -1173,7 +1174,7 @@ private void parseMetaData() throws IOException {
LOG.warn("Encountered exception ", ioe);
throw ioe;
} finally {
IOUtils.cleanup(LOG, lin, in);
IOUtils.cleanupWithLogger(LOG, lin, in);
}
FSDataInputStream aIn = fs.open(archiveIndexPath);
@ -1198,7 +1199,7 @@ private void parseMetaData() throws IOException {
}
}
} finally {
IOUtils.cleanup(LOG, aIn);
IOUtils.cleanupWithLogger(LOG, aIn);
}
}
}

View File

@ -23,7 +23,6 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -31,6 +30,8 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** An implementation of a round-robin scheme for disk allocation for creating
* files. The way it works is that it is kept track what disk was last
@ -245,8 +246,8 @@ int getCurrentDirectoryIndex() {
private static class AllocatorPerContext {
private final Log LOG =
LogFactory.getLog(AllocatorPerContext.class);
private static final Logger LOG =
LoggerFactory.getLogger(AllocatorPerContext.class);
private Random dirIndexRandomizer = new Random();
private String contextCfgItemName;

View File

@ -19,11 +19,12 @@
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Provides a trash facility which supports pluggable Trash policies.
@ -34,8 +35,8 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Trash extends Configured {
private static final org.apache.commons.logging.Log LOG =
LogFactory.getLog(Trash.class);
private static final Logger LOG =
LoggerFactory.getLogger(Trash.class);
private TrashPolicy trashPolicy; // configured trash policy instance

View File

@ -30,8 +30,6 @@
import java.util.Collection;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -41,6 +39,8 @@
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Provides a <i>trash</i> feature. Files are moved to a user's trash
* directory, a subdirectory of their home directory named ".Trash". Files are
@ -54,8 +54,8 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TrashPolicyDefault extends TrashPolicy {
private static final Log LOG =
LogFactory.getLog(TrashPolicyDefault.class);
private static final Logger LOG =
LoggerFactory.getLogger(TrashPolicyDefault.class);
private static final Path CURRENT = new Path("Current");

View File

@ -25,8 +25,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPFile;
@ -45,6 +43,8 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
@ -56,8 +56,8 @@
@InterfaceStability.Stable
public class FTPFileSystem extends FileSystem {
public static final Log LOG = LogFactory
.getLog(FTPFileSystem.class);
public static final Logger LOG = LoggerFactory
.getLogger(FTPFileSystem.class);
public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024;

View File

@ -24,8 +24,6 @@
import java.io.ObjectInputValidation;
import java.io.Serializable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -33,6 +31,8 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class for file/directory permissions.
@ -41,7 +41,7 @@
@InterfaceStability.Stable
public class FsPermission implements Writable, Serializable,
ObjectInputValidation {
private static final Log LOG = LogFactory.getLog(FsPermission.class);
private static final Logger LOG = LoggerFactory.getLogger(FsPermission.class);
private static final long serialVersionUID = 0x2fe08564;
static final WritableFactory FACTORY = new WritableFactory() {

View File

@ -23,19 +23,20 @@
import java.util.Iterator;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import com.jcraft.jsch.ChannelSftp;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.Session;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** Concurrent/Multiple Connections. */
class SFTPConnectionPool {
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
public static final Logger LOG =
LoggerFactory.getLogger(SFTPFileSystem.class);
// Maximum number of allowed live connections. This doesn't mean we cannot
// have more live connections. It means that when we have more
// live connections than this threshold, any unused connection will be

View File

@ -26,8 +26,6 @@
import java.util.ArrayList;
import java.util.Vector;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -41,11 +39,14 @@
import com.jcraft.jsch.ChannelSftp.LsEntry;
import com.jcraft.jsch.SftpATTRS;
import com.jcraft.jsch.SftpException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** SFTP FileSystem. */
public class SFTPFileSystem extends FileSystem {
public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class);
public static final Logger LOG =
LoggerFactory.getLogger(SFTPFileSystem.class);
private SFTPConnectionPool connectionPool;
private URI uri;

View File

@ -27,8 +27,6 @@
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -36,6 +34,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathNotFoundException;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An abstract class for the execution of a file system command
@ -59,7 +59,7 @@ abstract public class Command extends Configured {
private int depth = 0;
protected ArrayList<Exception> exceptions = new ArrayList<Exception>();
private static final Log LOG = LogFactory.getLog(Command.class);
private static final Logger LOG = LoggerFactory.getLogger(Command.class);
/** allows stdout to be captured if necessary */
public PrintStream out = System.out;

View File

@ -26,8 +26,6 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -47,6 +45,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
@ -141,7 +141,8 @@ public interface ActiveStandbyElectorCallback {
@VisibleForTesting
protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
public static final Logger LOG =
LoggerFactory.getLogger(ActiveStandbyElector.class);
private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
@ -712,7 +713,7 @@ protected ZooKeeper createZooKeeper() throws IOException {
}
private void fatalError(String errorMessage) {
LOG.fatal(errorMessage);
LOG.error(errorMessage);
reset();
appClient.notifyFatalError(errorMessage);
}
@ -824,10 +825,10 @@ private boolean reEstablishSession() {
createConnection();
success = true;
} catch(IOException e) {
LOG.warn(e);
LOG.warn(e.toString());
sleepFor(5000);
} catch(KeeperException e) {
LOG.warn(e);
LOG.warn(e.toString());
sleepFor(5000);
}
++connectionRetryCount;
@ -866,7 +867,7 @@ public synchronized void terminateConnection() {
try {
tempZk.close();
} catch(InterruptedException e) {
LOG.warn(e);
LOG.warn(e.toString());
}
zkConnectionState = ConnectionState.TERMINATED;
wantToBeInElection = false;

View File

@ -19,9 +19,6 @@
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -32,6 +29,8 @@
import org.apache.hadoop.ipc.RPC;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The FailOverController is responsible for electing an active service
@ -43,7 +42,8 @@
@InterfaceStability.Evolving
public class FailoverController {
private static final Log LOG = LogFactory.getLog(FailoverController.class);
private static final Logger LOG =
LoggerFactory.getLogger(FailoverController.class);
private final int gracefulFenceTimeout;
private final int rpcTimeoutToNewActive;
@ -252,7 +252,7 @@ public void failover(HAServiceTarget fromSvc,
} catch (FailoverFailedException ffe) {
msg += ". Failback to " + fromSvc +
" failed (" + ffe.getMessage() + ")";
LOG.fatal(msg);
LOG.error(msg);
}
}
throw new FailoverFailedException(msg, cause);

View File

@ -28,8 +28,6 @@
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -43,6 +41,8 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A command-line tool for making calls in the HAServiceProtocol.
@ -62,7 +62,7 @@ public abstract class HAAdmin extends Configured implements Tool {
* operation, which is why it is not documented in the usage below.
*/
private static final String FORCEMANUAL = "forcemanual";
private static final Log LOG = LogFactory.getLog(HAAdmin.class);
private static final Logger LOG = LoggerFactory.getLogger(HAAdmin.class);
private int rpcTimeoutForChecks = -1;
@ -449,7 +449,7 @@ protected int runCmd(String[] argv) throws Exception {
if (cmdLine.hasOption(FORCEMANUAL)) {
if (!confirmForceManual()) {
LOG.fatal("Aborted");
LOG.error("Aborted");
return -1;
}
// Instruct the NNs to honor this request even if they're

View File

@ -23,8 +23,6 @@
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
import org.apache.hadoop.ha.HAServiceProtocol;
@ -35,6 +33,8 @@
import org.apache.hadoop.util.Daemon;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is a daemon which runs in a loop, periodically heartbeating
@ -47,7 +47,7 @@
*/
@InterfaceAudience.Private
public class HealthMonitor {
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
HealthMonitor.class);
private Daemon daemon;
@ -283,7 +283,7 @@ private MonitorDaemon() {
setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
LOG.fatal("Health monitor failed", e);
LOG.error("Health monitor failed", e);
enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
}
});

View File

@ -22,8 +22,6 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -31,6 +29,8 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class parses the configured list of fencing methods, and
@ -61,7 +61,7 @@ public class NodeFencer {
private static final Pattern HASH_COMMENT_RE =
Pattern.compile("#.*$");
private static final Log LOG = LogFactory.getLog(NodeFencer.class);
private static final Logger LOG = LoggerFactory.getLogger(NodeFencer.class);
/**
* Standard fencing methods included with Hadoop.

View File

@ -23,8 +23,6 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import com.google.common.annotations.VisibleForTesting;
@ -272,7 +270,7 @@ private int parseConfiggedPort(String portStr)
* Adapter from JSch's logger interface to our log4j
*/
private static class LogAdapter implements com.jcraft.jsch.Logger {
static final Log LOG = LogFactory.getLog(
static final Logger LOG = LoggerFactory.getLogger(
SshFenceByTcpPort.class.getName() + ".jsch");
@Override
@ -285,9 +283,8 @@ public boolean isEnabled(int level) {
case com.jcraft.jsch.Logger.WARN:
return LOG.isWarnEnabled();
case com.jcraft.jsch.Logger.ERROR:
return LOG.isErrorEnabled();
case com.jcraft.jsch.Logger.FATAL:
return LOG.isFatalEnabled();
return LOG.isErrorEnabled();
default:
return false;
}
@ -306,10 +303,8 @@ public void log(int level, String message) {
LOG.warn(message);
break;
case com.jcraft.jsch.Logger.ERROR:
LOG.error(message);
break;
case com.jcraft.jsch.Logger.FATAL:
LOG.fatal(message);
LOG.error(message);
break;
default:
break;

View File

@ -28,8 +28,6 @@
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -56,11 +54,13 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.LimitedPrivate("HDFS")
public abstract class ZKFailoverController {
static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
static final Logger LOG = LoggerFactory.getLogger(ZKFailoverController.class);
public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
@ -162,7 +162,7 @@ public HAServiceTarget getLocalTarget() {
public int run(final String[] args) throws Exception {
if (!localTarget.isAutoFailoverEnabled()) {
LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
LOG.error("Automatic failover is not enabled for " + localTarget + "." +
" Please ensure that automatic failover is enabled in the " +
"configuration before running the ZK failover controller.");
return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
@ -184,7 +184,7 @@ public Integer run() {
}
});
} catch (RuntimeException rte) {
LOG.fatal("The failover controller encounters runtime error: " + rte);
LOG.error("The failover controller encounters runtime error: " + rte);
throw (Exception)rte.getCause();
}
}
@ -195,7 +195,7 @@ private int doRun(String[] args)
try {
initZK();
} catch (KeeperException ke) {
LOG.fatal("Unable to start failover controller. Unable to connect "
LOG.error("Unable to start failover controller. Unable to connect "
+ "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
+ "configured value for " + ZK_QUORUM_KEY + " and ensure that "
+ "ZooKeeper is running.");
@ -221,7 +221,7 @@ private int doRun(String[] args)
}
if (!elector.parentZNodeExists()) {
LOG.fatal("Unable to start failover controller. "
LOG.error("Unable to start failover controller. "
+ "Parent znode does not exist.\n"
+ "Run with -formatZK flag to initialize ZooKeeper.");
return ERR_CODE_NO_PARENT_ZNODE;
@ -230,7 +230,7 @@ private int doRun(String[] args)
try {
localTarget.checkFencingConfigured();
} catch (BadFencingConfigurationException e) {
LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
LOG.error("Fencing is not configured for " + localTarget + ".\n" +
"You must configure a fencing method before using automatic " +
"failover.", e);
return ERR_CODE_NO_FENCER;
@ -376,7 +376,7 @@ private synchronized void mainLoop() throws InterruptedException {
}
private synchronized void fatalError(String err) {
LOG.fatal("Fatal error occurred:" + err);
LOG.error("Fatal error occurred:" + err);
fatalError = err;
notifyAll();
}
@ -395,7 +395,7 @@ private synchronized void becomeActive() throws ServiceFailedException {
} catch (Throwable t) {
String msg = "Couldn't make " + localTarget + " active";
LOG.fatal(msg, t);
LOG.error(msg, t);
recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
StringUtils.stringifyException(t)));

View File

@ -19,8 +19,6 @@
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.HAServiceProtocol;
@ -42,6 +40,8 @@
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used on the server side. Calls come across the wire for the
@ -61,7 +61,7 @@ public class HAServiceProtocolServerSideTranslatorPB implements
TransitionToActiveResponseProto.newBuilder().build();
private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP =
TransitionToStandbyResponseProto.newBuilder().build();
private static final Log LOG = LogFactory.getLog(
private static final Logger LOG = LoggerFactory.getLogger(
HAServiceProtocolServerSideTranslatorPB.class);
public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {

View File

@ -53,8 +53,6 @@
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -103,6 +101,8 @@
import org.eclipse.jetty.util.ssl.SslContextFactory;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.webapp.WebAppContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
@ -117,7 +117,7 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class HttpServer2 implements FilterContainer {
public static final Log LOG = LogFactory.getLog(HttpServer2.class);
public static final Logger LOG = LoggerFactory.getLogger(HttpServer2.class);
public static final String HTTP_SCHEME = "http";
public static final String HTTPS_SCHEME = "https";

View File

@ -29,11 +29,11 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
@ -47,7 +47,8 @@
public class StaticUserWebFilter extends FilterInitializer {
static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
private static final Logger LOG =
LoggerFactory.getLogger(StaticUserWebFilter.class);
static class User implements Principal {
private final String name;

View File

@ -22,8 +22,6 @@
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -36,6 +34,8 @@
import org.apache.hadoop.util.bloom.Filter;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
@ -52,7 +52,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BloomMapFile {
private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
private static final Logger LOG = LoggerFactory.getLogger(BloomMapFile.class);
public static final String BLOOM_FILE_NAME = "bloom";
public static final int HASH_COUNT = 5;

View File

@ -22,11 +22,10 @@
import java.security.AccessController;
import java.security.PrivilegedAction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
@ -36,7 +35,7 @@
* class to be able to compare arrays that start at non-zero offsets.
*/
abstract class FastByteComparisons {
static final Log LOG = LogFactory.getLog(FastByteComparisons.class);
static final Logger LOG = LoggerFactory.getLogger(FastByteComparisons.class);
/**
* Lexicographically compare two byte arrays.

View File

@ -32,13 +32,12 @@
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@ -49,7 +48,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class IOUtils {
public static final Log LOG = LogFactory.getLog(IOUtils.class);
public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
/**
* Copies from one stream to another.

View File

@ -23,8 +23,6 @@
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -37,6 +35,8 @@
import org.apache.hadoop.util.Options;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
@ -60,7 +60,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MapFile {
private static final Log LOG = LogFactory.getLog(MapFile.class);
private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
/** The name of the index file. */
public static final String INDEX_FILE_NAME = "index";
@ -1002,7 +1002,7 @@ public static void main(String[] args) throws Exception {
while (reader.next(key, value)) // copy all entries
writer.append(key, value);
} finally {
IOUtils.cleanup(LOG, writer, reader);
IOUtils.cleanupWithLogger(LOG, writer, reader);
}
}
}

View File

@ -23,8 +23,6 @@
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.nativeio.NativeIO;
@ -33,6 +31,8 @@
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages a pool of threads which can issue readahead requests on file descriptors.
@ -40,7 +40,7 @@
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReadaheadPool {
static final Log LOG = LogFactory.getLog(ReadaheadPool.class);
static final Logger LOG = LoggerFactory.getLogger(ReadaheadPool.class);
private static final int POOL_SIZE = 4;
private static final int MAX_POOL_SIZE = 16;
private static final int CAPACITY = 1024;

View File

@ -25,7 +25,6 @@
import java.security.MessageDigest;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.Options.CreateOpts;
@ -51,6 +50,8 @@
import org.apache.hadoop.util.MergeSort;
import org.apache.hadoop.util.PriorityQueue;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@ -203,7 +204,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceFile {
private static final Log LOG = LogFactory.getLog(SequenceFile.class);
private static final Logger LOG = LoggerFactory.getLogger(SequenceFile.class);
private SequenceFile() {} // no public ctor
@ -1923,7 +1924,7 @@ private void initialize(Path filename, FSDataInputStream in,
succeeded = true;
} finally {
if (!succeeded) {
IOUtils.cleanup(LOG, this.in);
IOUtils.cleanupWithLogger(LOG, this.in);
}
}
}

View File

@ -25,9 +25,10 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.commons.logging.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** A WritableComparable for strings that uses the UTF8 encoding.
*
@ -42,7 +43,7 @@
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Stable
public class UTF8 implements WritableComparable<UTF8> {
private static final Log LOG= LogFactory.getLog(UTF8.class);
private static final Logger LOG= LoggerFactory.getLogger(UTF8.class);
private static final DataInputBuffer IBUF = new DataInputBuffer();
private static final ThreadLocal<DataOutputBuffer> OBUF_FACTORY =

View File

@ -23,8 +23,6 @@
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -33,6 +31,8 @@
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A global compressor/decompressor pool used to save and reuse
@ -41,7 +41,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CodecPool {
private static final Log LOG = LogFactory.getLog(CodecPool.class);
private static final Logger LOG = LoggerFactory.getLogger(CodecPool.class);
/**
* A global compressor pool used to save the expensive

View File

@ -19,8 +19,6 @@
import java.util.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -28,6 +26,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A factory that will find the correct codec for a given filename.
@ -36,8 +36,8 @@
@InterfaceStability.Evolving
public class CompressionCodecFactory {
public static final Log LOG =
LogFactory.getLog(CompressionCodecFactory.class.getName());
public static final Logger LOG =
LoggerFactory.getLogger(CompressionCodecFactory.class.getName());
private static final ServiceLoader<CompressionCodec> CODEC_PROVIDERS =
ServiceLoader.load(CompressionCodec.class);

View File

@ -22,14 +22,14 @@
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@ -37,7 +37,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
private static final Log LOG = LogFactory.getLog(DefaultCodec.class);
private static final Logger LOG = LoggerFactory.getLogger(DefaultCodec.class);
Configuration conf;

View File

@ -24,9 +24,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the popular
@ -42,7 +41,8 @@ public class Bzip2Compressor implements Compressor {
static final int DEFAULT_BLOCK_SIZE = 9;
static final int DEFAULT_WORK_FACTOR = 30;
private static final Log LOG = LogFactory.getLog(Bzip2Compressor.class);
private static final Logger LOG =
LoggerFactory.getLogger(Bzip2Compressor.class);
private long stream;
private int blockSize;

View File

@ -23,9 +23,8 @@
import java.nio.ByteBuffer;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the popular
@ -36,7 +35,8 @@
public class Bzip2Decompressor implements Decompressor {
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;
private static final Log LOG = LogFactory.getLog(Bzip2Decompressor.class);
private static final Logger LOG =
LoggerFactory.getLogger(Bzip2Decompressor.class);
private long stream;
private boolean conserveMemory;

View File

@ -18,12 +18,12 @@
package org.apache.hadoop.io.compress.bzip2;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A collection of factories to create the right
@ -31,7 +31,7 @@
*
*/
public class Bzip2Factory {
private static final Log LOG = LogFactory.getLog(Bzip2Factory.class);
private static final Logger LOG = LoggerFactory.getLogger(Bzip2Factory.class);
private static String bzip2LibraryName = "";
private static boolean nativeBzip2Loaded;

View File

@ -22,19 +22,19 @@
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Compressor implements Compressor {
private static final Log LOG =
LogFactory.getLog(Lz4Compressor.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;

View File

@ -22,18 +22,18 @@
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the lz4 compression algorithm.
* http://code.google.com/p/lz4/
*/
public class Lz4Decompressor implements Decompressor {
private static final Log LOG =
LogFactory.getLog(Lz4Compressor.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(Lz4Compressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;

View File

@ -22,19 +22,19 @@
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyCompressor implements Compressor {
private static final Log LOG =
LogFactory.getLog(SnappyCompressor.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(SnappyCompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;

View File

@ -22,19 +22,19 @@
import java.nio.Buffer;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Decompressor} based on the snappy compression algorithm.
* http://code.google.com/p/snappy/
*/
public class SnappyDecompressor implements Decompressor {
private static final Log LOG =
LogFactory.getLog(SnappyDecompressor.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(SnappyDecompressor.class.getName());
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
private int directBufferSize;

View File

@ -23,9 +23,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A wrapper around java.util.zip.Deflater to make it conform
@ -34,7 +33,8 @@
*/
public class BuiltInZlibDeflater extends Deflater implements Compressor {
private static final Log LOG = LogFactory.getLog(BuiltInZlibDeflater.class);
private static final Logger LOG =
LoggerFactory.getLogger(BuiltInZlibDeflater.class);
public BuiltInZlibDeflater(int level, boolean nowrap) {
super(level, nowrap);

View File

@ -25,9 +25,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link Compressor} based on the popular
@ -37,7 +36,8 @@
*/
public class ZlibCompressor implements Compressor {
private static final Log LOG = LogFactory.getLog(ZlibCompressor.class);
private static final Logger LOG =
LoggerFactory.getLogger(ZlibCompressor.class);
private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64*1024;

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.io.compress.zlib;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
@ -29,6 +27,8 @@
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A collection of factories to create the right
@ -36,8 +36,8 @@
*
*/
public class ZlibFactory {
private static final Log LOG =
LogFactory.getLog(ZlibFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(ZlibFactory.class);
private static boolean nativeZlibLoaded = false;

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.io.erasurecode;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.erasurecode.codec.ErasureCodec;
@ -31,6 +29,8 @@
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
@ -48,7 +48,7 @@
@InterfaceAudience.Private
public final class CodecUtil {
private static final Log LOG = LogFactory.getLog(CodecUtil.class);
private static final Logger LOG = LoggerFactory.getLogger(CodecUtil.class);
public static final String IO_ERASURECODE_CODEC = "io.erasurecode.codec.";

View File

@ -18,17 +18,17 @@
package org.apache.hadoop.io.erasurecode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Erasure code native libraries (for now, Intel ISA-L) related utilities.
*/
public final class ErasureCodeNative {
private static final Log LOG =
LogFactory.getLog(ErasureCodeNative.class.getName());
private static final Logger LOG =
LoggerFactory.getLogger(ErasureCodeNative.class.getName());
/**
* The reason why ISA-L library is not available, or null if it is available.

View File

@ -30,8 +30,6 @@
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -43,6 +41,8 @@
import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
import org.apache.hadoop.io.file.tfile.Utils.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Block Compressed file, the underlying physical storage layer for TFile.
@ -54,7 +54,7 @@ final class BCFile {
// the current version of BCFile impl, increment them (major or minor) made
// enough changes
static final Version API_VERSION = new Version((short) 1, (short) 0);
static final Log LOG = LogFactory.getLog(BCFile.class);
static final Logger LOG = LoggerFactory.getLogger(BCFile.class);
/**
* Prevent the instantiation of BCFile objects.

View File

@ -24,8 +24,6 @@
import java.io.OutputStream;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
@ -35,6 +33,8 @@
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
@ -44,7 +44,7 @@
* Compression related stuff.
*/
final class Compression {
static final Log LOG = LogFactory.getLog(Compression.class);
static final Logger LOG = LoggerFactory.getLogger(Compression.class);
/**
* Prevent the instantiation of class.

View File

@ -29,8 +29,6 @@
import java.util.ArrayList;
import java.util.Comparator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -51,6 +49,8 @@
import org.apache.hadoop.io.file.tfile.CompareUtils.MemcmpRawComparator;
import org.apache.hadoop.io.file.tfile.Utils.Version;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A TFile is a container of key-value pairs. Both keys and values are type-less
@ -131,7 +131,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TFile {
static final Log LOG = LogFactory.getLog(TFile.class);
static final Logger LOG = LoggerFactory.getLogger(TFile.class);
private static final String CHUNK_BUF_SIZE_ATTR = "tfile.io.chunk.size";
private static final String FS_INPUT_BUF_SIZE_ATTR =
@ -335,7 +335,7 @@ public void close() throws IOException {
writerBCF.close();
}
} finally {
IOUtils.cleanup(LOG, blkAppender, writerBCF);
IOUtils.cleanupWithLogger(LOG, blkAppender, writerBCF);
blkAppender = null;
writerBCF = null;
state = State.CLOSED;

View File

@ -25,8 +25,6 @@
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
@ -36,12 +34,14 @@
import org.apache.hadoop.io.file.tfile.BCFile.MetaIndexEntry;
import org.apache.hadoop.io.file.tfile.TFile.TFileIndexEntry;
import org.apache.hadoop.io.file.tfile.Utils.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Dumping the information of a TFile.
*/
class TFileDumper {
static final Log LOG = LogFactory.getLog(TFileDumper.class);
static final Logger LOG = LoggerFactory.getLogger(TFileDumper.class);
private TFileDumper() {
// namespace object not constructable.
@ -290,7 +290,7 @@ static public void dumpInfo(String file, PrintStream out, Configuration conf)
}
}
} finally {
IOUtils.cleanup(LOG, reader, fsdis);
IOUtils.cleanupWithLogger(LOG, reader, fsdis);
}
}
}

View File

@ -40,9 +40,9 @@
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
import com.google.common.annotations.VisibleForTesting;
@ -98,7 +98,7 @@ public static class POSIX {
write. */
public static int SYNC_FILE_RANGE_WAIT_AFTER = 4;
private static final Log LOG = LogFactory.getLog(NativeIO.class);
private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
// Set to true via JNI if possible
public static boolean fadvisePossible = false;
@ -634,7 +634,7 @@ public static boolean access(String path, AccessRight desiredAccess)
}
}
private static final Log LOG = LogFactory.getLog(NativeIO.class);
private static final Logger LOG = LoggerFactory.getLogger(NativeIO.class);
private static boolean nativeLoaded = false;
@ -940,10 +940,10 @@ public static void copyFileUnbuffered(File src, File dst) throws IOException {
position += transferred;
}
} finally {
IOUtils.cleanup(LOG, output);
IOUtils.cleanup(LOG, fos);
IOUtils.cleanup(LOG, input);
IOUtils.cleanup(LOG, fis);
IOUtils.cleanupWithLogger(LOG, output);
IOUtils.cleanupWithLogger(LOG, fos);
IOUtils.cleanupWithLogger(LOG, input);
IOUtils.cleanupWithLogger(LOG, fis);
}
}
}

View File

@ -22,10 +22,10 @@
import java.io.FileDescriptor;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A factory for creating shared file descriptors inside a given directory.
@ -45,7 +45,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SharedFileDescriptorFactory {
public static final Log LOG = LogFactory.getLog(SharedFileDescriptorFactory.class);
public static final Logger LOG =
LoggerFactory.getLogger(SharedFileDescriptorFactory.class);
private final String prefix;
private final String path;

View File

@ -32,8 +32,6 @@
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
@ -41,6 +39,8 @@
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
@ -49,7 +49,7 @@
*/
public class RetryPolicies {
public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
public static final Logger LOG = LoggerFactory.getLogger(RetryPolicies.class);
/**
* <p>

View File

@ -19,17 +19,17 @@
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
import org.apache.hadoop.ipc.RemoteException;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.ipc.RetriableException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RetryUtils {
public static final Log LOG = LogFactory.getLog(RetryUtils.class);
public static final Logger LOG = LoggerFactory.getLogger(RetryUtils.class);
/**
* Return the default retry policy set in conf.

View File

@ -21,8 +21,6 @@
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -31,6 +29,8 @@
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
import org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
@ -41,8 +41,8 @@
@InterfaceStability.Evolving
public class SerializationFactory extends Configured {
static final Log LOG =
LogFactory.getLog(SerializationFactory.class.getName());
static final Logger LOG =
LoggerFactory.getLogger(SerializationFactory.class.getName());
private List<Serialization<?>> serializations = new ArrayList<Serialization<?>>();

View File

@ -28,20 +28,21 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Abstracts queue operations for different blocking queues.
*/
public class CallQueueManager<E extends Schedulable>
extends AbstractQueue<E> implements BlockingQueue<E> {
public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
public static final Logger LOG =
LoggerFactory.getLogger(CallQueueManager.class);
// Number of checkpoints for empty queue.
private static final int CHECKPOINT_NUM = 20;
// Interval to check empty queue.

View File

@ -21,8 +21,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability;
@ -57,6 +55,8 @@
import org.apache.hadoop.util.concurrent.AsyncGet;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.SocketFactory;
import javax.security.sasl.Sasl;
@ -84,7 +84,7 @@
@InterfaceStability.Evolving
public class Client implements AutoCloseable {
public static final Log LOG = LogFactory.getLog(Client.class);
public static final Logger LOG = LoggerFactory.getLogger(Client.class);
/** A counter for generating call IDs. */
private static final AtomicInteger callIdCounter = new AtomicInteger();

View File

@ -33,11 +33,11 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
import org.apache.hadoop.metrics2.util.MBeans;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A queue with multiple levels for each priority.
@ -50,7 +50,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
"faircallqueue.priority-levels";
public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
public static final Logger LOG = LoggerFactory.getLogger(FairCallQueue.class);
/* The queues */
private final ArrayList<BlockingQueue<E>> queues;

View File

@ -21,8 +21,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.*;
import com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@ -39,6 +37,8 @@
import org.apache.hadoop.util.concurrent.AsyncGet;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.SocketFactory;
import java.io.IOException;
@ -55,7 +55,8 @@
*/
@InterfaceStability.Evolving
public class ProtobufRpcEngine implements RpcEngine {
public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
public static final Logger LOG =
LoggerFactory.getLogger(ProtobufRpcEngine.class);
private static final ThreadLocal<AsyncGet<Message, Exception>>
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();

View File

@ -38,8 +38,6 @@
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -60,6 +58,8 @@
import org.apache.hadoop.util.Time;
import com.google.protobuf.BlockingService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** A simple RPC mechanism.
*
@ -110,7 +110,7 @@ public Writable call(Server server, String protocol,
Writable rpcRequest, long receiveTime) throws Exception ;
}
static final Log LOG = LogFactory.getLog(RPC.class);
static final Logger LOG = LoggerFactory.getLogger(RPC.class);
/**
* Get all superInterfaces that extend VersionedProtocol

View File

@ -24,9 +24,9 @@
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Used to registry custom methods to refresh at runtime.
@ -34,7 +34,8 @@
*/
@InterfaceStability.Unstable
public class RefreshRegistry {
public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
public static final Logger LOG =
LoggerFactory.getLogger(RefreshRegistry.class);
// Used to hold singleton instance
private static class RegistryHolder {

View File

@ -22,8 +22,6 @@
import java.util.UUID;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.util.LightWeightCache;
@ -32,6 +30,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Maintains a cache of non-idempotent requests that have been successfully
@ -44,7 +44,7 @@
*/
@InterfaceAudience.Private
public class RetryCache {
public static final Log LOG = LogFactory.getLog(RetryCache.class);
public static final Logger LOG = LoggerFactory.getLogger(RetryCache.class);
private final RetryCacheMetrics retryCacheMetrics;
private static final int MAX_CAPACITY = 16;

View File

@ -70,8 +70,6 @@
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
@ -125,6 +123,8 @@
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.Message;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** An abstract IPC service. IPC calls take a single {@link Writable} as a
* parameter, and return a {@link Writable} as their value. A service runs on
@ -293,9 +293,9 @@ public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) {
}
public static final Log LOG = LogFactory.getLog(Server.class);
public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+Server.class.getName());
public static final Logger LOG = LoggerFactory.getLogger(Server.class);
public static final Logger AUDITLOG =
LoggerFactory.getLogger("SecurityLogger."+Server.class.getName());
private static final String AUTH_FAILED_FOR = "Auth failed for ";
private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
@ -1113,7 +1113,7 @@ private synchronized void doRunLoop() {
} catch (IOException ex) {
LOG.error("Error in Reader", ex);
} catch (Throwable re) {
LOG.fatal("Bug in read selector!", re);
LOG.error("Bug in read selector!", re);
ExitUtil.terminate(1, "Bug in read selector!");
}
}
@ -2692,7 +2692,7 @@ public void run() {
}
} finally {
CurCall.set(null);
IOUtils.cleanup(LOG, traceScope);
IOUtils.cleanupWithLogger(LOG, traceScope);
}
}
LOG.debug(Thread.currentThread().getName() + ": exiting");
@ -2701,7 +2701,7 @@ public void run() {
}
@VisibleForTesting
void logException(Log logger, Throwable e, Call call) {
void logException(Logger logger, Throwable e, Call call) {
if (exceptionsHandler.isSuppressedLog(e.getClass())) {
return; // Log nothing.
}

View File

@ -20,9 +20,9 @@
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Determines which queue to start reading from, occasionally drawing from
@ -43,8 +43,8 @@ public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
"faircallqueue.multiplexer.weights";
public static final Log LOG =
LogFactory.getLog(WeightedRoundRobinMultiplexer.class);
public static final Logger LOG =
LoggerFactory.getLogger(WeightedRoundRobinMultiplexer.class);
private final int numQueues; // The number of queues under our provisioning

View File

@ -28,8 +28,6 @@
import javax.net.SocketFactory;
import org.apache.commons.logging.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
@ -43,12 +41,14 @@
import org.apache.hadoop.conf.*;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
@Deprecated
public class WritableRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(RPC.class);
private static final Logger LOG = LoggerFactory.getLogger(RPC.class);
//writableRpcVersion should be updated if there is a change
//in format of the rpc messages.

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
@ -26,6 +24,8 @@
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is for maintaining the various RetryCache-related statistics
@ -35,7 +35,7 @@
@Metrics(about="Aggregate RetryCache metrics", context="rpc")
public class RetryCacheMetrics {
static final Log LOG = LogFactory.getLog(RetryCacheMetrics.class);
static final Logger LOG = LoggerFactory.getLogger(RetryCacheMetrics.class);
final MetricsRegistry registry;
final String name;

View File

@ -17,14 +17,14 @@
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is for maintaining RPC method related statistics
@ -37,7 +37,7 @@ public class RpcDetailedMetrics {
@Metric MutableRatesWithAggregation rates;
@Metric MutableRatesWithAggregation deferredRpcRates;
static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
static final Logger LOG = LoggerFactory.getLogger(RpcDetailedMetrics.class);
final MetricsRegistry registry;
final String name;
@ -45,7 +45,7 @@ public class RpcDetailedMetrics {
name = "RpcDetailedActivityForPort"+ port;
registry = new MetricsRegistry("rpcdetailed")
.tag("port", "RPC port", String.valueOf(port));
LOG.debug(registry.info());
LOG.debug(registry.info().toString());
}
public String name() { return name; }

View File

@ -17,8 +17,6 @@
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.classification.InterfaceAudience;
@ -31,6 +29,8 @@
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is for maintaining the various RPC statistics
@ -40,7 +40,7 @@
@Metrics(about="Aggregate RPC metrics", context="rpc")
public class RpcMetrics {
static final Log LOG = LogFactory.getLog(RpcMetrics.class);
static final Logger LOG = LoggerFactory.getLogger(RpcMetrics.class);
final Server server;
final MetricsRegistry registry;
final String name;

View File

@ -19,9 +19,9 @@
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.http.HttpServer2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
@ -116,7 +116,8 @@
*
*/
public class JMXJsonServlet extends HttpServlet {
private static final Log LOG = LogFactory.getLog(JMXJsonServlet.class);
private static final Logger LOG =
LoggerFactory.getLogger(JMXJsonServlet.class);
static final String ACCESS_CONTROL_ALLOW_METHODS =
"Access-Control-Allow-Methods";
static final String ACCESS_CONTROL_ALLOW_ORIGIN =

View File

@ -106,7 +106,7 @@ MBeanInfo get() {
}
++curRecNo;
}
MetricsSystemImpl.LOG.debug(attrs);
MetricsSystemImpl.LOG.debug(attrs.toString());
MBeanAttributeInfo[] attrsArray = new MBeanAttributeInfo[attrs.size()];
return new MBeanInfo(name, description, attrs.toArray(attrsArray),
null, null, null); // no ops/ctors/notifications

View File

@ -41,18 +41,18 @@
import org.apache.commons.configuration2.builder.fluent.Parameters;
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
import org.apache.commons.configuration2.ex.ConfigurationException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsPlugin;
import org.apache.hadoop.metrics2.filter.GlobFilter;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Metrics configuration for MetricsSystemImpl
*/
class MetricsConfig extends SubsetConfiguration {
static final Log LOG = LogFactory.getLog(MetricsConfig.class);
static final Logger LOG = LoggerFactory.getLogger(MetricsConfig.class);
static final String DEFAULT_FILE_NAME = "hadoop-metrics2.properties";
static final String PREFIX_DEFAULT = "*.";
@ -121,7 +121,7 @@ static MetricsConfig loadFirst(String prefix, String... fileNames) {
LOG.info("loaded properties from "+ fname);
LOG.debug(toString(cf));
MetricsConfig mc = new MetricsConfig(cf, prefix);
LOG.debug(mc);
LOG.debug(mc.toString());
return mc;
} catch (ConfigurationException e) {
// Commons Configuration defines the message text when file not found

View File

@ -24,8 +24,6 @@
import static com.google.common.base.Preconditions.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
@ -36,13 +34,16 @@
import org.apache.hadoop.metrics2.MetricsFilter;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An adapter class for metrics sink and associated filters
*/
class MetricsSinkAdapter implements SinkQueue.Consumer<MetricsBuffer> {
private final Log LOG = LogFactory.getLog(MetricsSinkAdapter.class);
private static final Logger LOG =
LoggerFactory.getLogger(MetricsSinkAdapter.class);
private final String name, description, context;
private final MetricsSink sink;
private final MetricsFilter sourceFilter, recordFilter, metricFilter;
@ -207,7 +208,7 @@ void stop() {
stopping = true;
sinkThread.interrupt();
if (sink instanceof Closeable) {
IOUtils.cleanup(LOG, (Closeable)sink);
IOUtils.cleanupWithLogger(LOG, (Closeable)sink);
}
try {
sinkThread.join();

View File

@ -33,8 +33,6 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsFilter;
@ -43,6 +41,8 @@
import static org.apache.hadoop.metrics2.impl.MetricsConfig.*;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.metrics2.util.Contracts.*;
@ -51,7 +51,8 @@
*/
class MetricsSourceAdapter implements DynamicMBean {
private static final Log LOG = LogFactory.getLog(MetricsSourceAdapter.class);
private static final Logger LOG =
LoggerFactory.getLogger(MetricsSourceAdapter.class);
private final String prefix, name;
private final MetricsSource source;

View File

@ -36,8 +36,6 @@
import static com.google.common.base.Preconditions.*;
import org.apache.commons.configuration2.PropertiesConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.math3.util.ArithmeticUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
@ -62,6 +60,8 @@
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A base class for metrics system singletons
@ -70,7 +70,7 @@
@Metrics(context="metricssystem")
public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
static final Log LOG = LogFactory.getLog(MetricsSystemImpl.class);
static final Logger LOG = LoggerFactory.getLogger(MetricsSystemImpl.class);
static final String MS_NAME = "MetricsSystem";
static final String MS_STATS_NAME = MS_NAME +",sub=Stats";
static final String MS_STATS_DESC = "Metrics system metrics";

View File

@ -22,20 +22,21 @@
import static com.google.common.base.Preconditions.*;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.metrics2.util.Contracts.*;
/**
* Metric generated from a method, mostly used by annotation
*/
class MethodMetric extends MutableMetric {
private static final Log LOG = LogFactory.getLog(MethodMetric.class);
private static final Logger LOG = LoggerFactory.getLogger(MethodMetric.class);
private final Object obj;
private final Method method;

View File

@ -24,8 +24,6 @@
import static com.google.common.base.Preconditions.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsException;
@ -34,6 +32,8 @@
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to build {@link MetricsSource} object from annotations.
@ -49,7 +49,8 @@
*/
@InterfaceAudience.Private
public class MetricsSourceBuilder {
private static final Log LOG = LogFactory.getLog(MetricsSourceBuilder.class);
private static final Logger LOG =
LoggerFactory.getLogger(MetricsSourceBuilder.class);
private final Object source;
private final MutableMetricsFactory factory;

View File

@ -22,19 +22,20 @@
import java.lang.reflect.Method;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class MutableMetricsFactory {
private static final Log LOG = LogFactory.getLog(MutableMetricsFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(MutableMetricsFactory.class);
MutableMetric newForField(Field field, Metric annotation,
MetricsRegistry registry) {

View File

@ -24,12 +24,11 @@
import static com.google.common.base.Preconditions.*;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Helper class to manage a group of mutable rate metrics
@ -43,7 +42,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableRates extends MutableMetric {
static final Log LOG = LogFactory.getLog(MutableRates.class);
static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
private final MetricsRegistry registry;
private final Set<Class<?>> protocolCache = Sets.newHashSet();

View File

@ -27,12 +27,12 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.util.SampleStat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
@ -48,7 +48,8 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableRatesWithAggregation extends MutableMetric {
static final Log LOG = LogFactory.getLog(MutableRatesWithAggregation.class);
static final Logger LOG =
LoggerFactory.getLogger(MutableRatesWithAggregation.class);
private final Map<String, MutableRate> globalMetrics =
new ConcurrentHashMap<>();
private final Set<Class<?>> protocolCache = Sets.newHashSet();

View File

@ -19,8 +19,6 @@
package org.apache.hadoop.metrics2.sink;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.AbstractMetric;
@ -28,6 +26,8 @@
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.MetricsTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
@ -42,7 +42,8 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class GraphiteSink implements MetricsSink, Closeable {
private static final Log LOG = LogFactory.getLog(GraphiteSink.class);
private static final Logger LOG =
LoggerFactory.getLogger(GraphiteSink.class);
private static final String SERVER_HOST_KEY = "server_host";
private static final String SERVER_PORT_KEY = "server_port";
private static final String METRICS_PREFIX = "metrics_prefix";

View File

@ -26,11 +26,11 @@
import java.util.Map;
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.metrics2.MetricsSink;
import org.apache.hadoop.metrics2.util.Servers;
import org.apache.hadoop.net.DNS;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This the base class for Ganglia sink classes using metrics2. Lot of the code
@ -41,7 +41,7 @@
*/
public abstract class AbstractGangliaSink implements MetricsSink {
public final Log LOG = LogFactory.getLog(this.getClass());
public final Logger LOG = LoggerFactory.getLogger(this.getClass());
/*
* Output of "gmetric --help" showing allowable values
@ -127,7 +127,7 @@ public void init(SubsetConfiguration conf) {
conf.getString("dfs.datanode.dns.interface", "default"),
conf.getString("dfs.datanode.dns.nameserver", "default"));
} catch (UnknownHostException uhe) {
LOG.error(uhe);
LOG.error(uhe.toString());
hostName = "UNKNOWN.example.com";
}
}
@ -155,7 +155,7 @@ public void init(SubsetConfiguration conf) {
datagramSocket = new DatagramSocket();
}
} catch (IOException e) {
LOG.error(e);
LOG.error(e.toString());
}
// see if sparseMetrics is supported. Default is false

View File

@ -28,8 +28,6 @@
import org.apache.commons.configuration2.SubsetConfiguration;
import org.apache.commons.configuration2.convert.DefaultListDelimiterHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsException;
@ -38,6 +36,8 @@
import org.apache.hadoop.metrics2.impl.MsInfo;
import org.apache.hadoop.metrics2.util.MetricsCache;
import org.apache.hadoop.metrics2.util.MetricsCache.Record;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This code supports Ganglia 3.0
@ -45,7 +45,7 @@
*/
public class GangliaSink30 extends AbstractGangliaSink {
public final Log LOG = LogFactory.getLog(this.getClass());
public final Logger LOG = LoggerFactory.getLogger(this.getClass());
private static final String TAGS_FOR_PREFIX_PROPERTY_PREFIX = "tagsForPrefix.";

View File

@ -18,10 +18,11 @@
package org.apache.hadoop.metrics2.sink.ganglia;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* This code supports Ganglia 3.1
@ -29,7 +30,7 @@
*/
public class GangliaSink31 extends GangliaSink30 {
public final Log LOG = LogFactory.getLog(this.getClass());
public final Logger LOG = LoggerFactory.getLogger(this.getClass());
/**
* The method sends metrics to Ganglia servers. The method has been taken from

View File

@ -25,11 +25,11 @@
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This util class provides a method to register an MBean using
@ -39,7 +39,7 @@
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MBeans {
private static final Log LOG = LogFactory.getLog(MBeans.class);
private static final Logger LOG = LoggerFactory.getLogger(MBeans.class);
private static final String DOMAIN_PREFIX = "Hadoop:";
private static final String SERVICE_PREFIX = "service=";
private static final String NAME_PREFIX = "name=";

View File

@ -19,13 +19,13 @@
package org.apache.hadoop.metrics2.util;
import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.LinkedHashMap;
@ -39,7 +39,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MetricsCache {
static final Log LOG = LogFactory.getLog(MetricsCache.class);
static final Logger LOG = LoggerFactory.getLogger(MetricsCache.class);
static final int MAX_RECS_PER_NAME_DEFAULT = 1000;
private final Map<String, RecordCache> map = Maps.newHashMap();

View File

@ -20,10 +20,10 @@
import com.google.common.net.InetAddresses;
import com.sun.istack.Nullable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.NetworkInterface;
@ -52,7 +52,7 @@
@InterfaceStability.Unstable
public class DNS {
private static final Log LOG = LogFactory.getLog(DNS.class);
private static final Logger LOG = LoggerFactory.getLogger(DNS.class);
/**
* The cached hostname -initially null.

View File

@ -44,8 +44,6 @@
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.classification.InterfaceAudience;
@ -58,11 +56,13 @@
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetUtils {
private static final Log LOG = LogFactory.getLog(NetUtils.class);
private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
private static Map<String, String> hostToResolved =
new HashMap<String, String>();

View File

@ -21,13 +21,13 @@
import java.util.*;
import java.io.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class implements the {@link DNSToSwitchMapping} interface using a
@ -145,8 +145,8 @@ protected static class RawScriptBasedMapping
extends AbstractDNSToSwitchMapping {
private String scriptName;
private int maxArgs; //max hostnames per call of the script
private static final Log LOG =
LogFactory.getLog(ScriptBasedMapping.class);
private static final Logger LOG =
LoggerFactory.getLogger(ScriptBasedMapping.class);
/**
* Set the configuration and extract the configuration parameters of interest

Some files were not shown because too many files have changed in this diff Show More