diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index fb753222dc..61568d56ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1872,7 +1872,7 @@ protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn) return PBHelperClient.convert( reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { - IOUtilsClient.cleanup(null, pair.in, pair.out); + IOUtilsClient.cleanupWithLogger(LOG, pair.in, pair.out); } } @@ -2933,7 +2933,7 @@ public Peer newConnectedPeer(InetSocketAddress addr, return peer; } finally { if (!success) { - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); IOUtils.closeSocket(sock); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 3fac7c8c10..8f785c7915 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -649,7 +649,7 @@ public static Peer peerFromSocketAndKey( return peer; } finally { if (!success) { - IOUtilsClient.cleanup(null, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java index 1ddb42ead4..0580ed536d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java @@ -188,7 +188,7 @@ public void put(DatanodeID dnId, Peer peer) { if (peer.isClosed()) return; if (capacity <= 0) { // Cache disabled. - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); return; } putInternal(dnId, peer); @@ -221,7 +221,7 @@ private synchronized void evictExpired(long expiryPeriod) { Time.monotonicNow() - entry.getValue().getTime() < expiryPeriod) { break; } - IOUtilsClient.cleanup(LOG, entry.getValue().getPeer()); + IOUtilsClient.cleanupWithLogger(LOG, entry.getValue().getPeer()); iter.remove(); } } @@ -239,7 +239,7 @@ private synchronized void evictOldest() { "capacity: " + capacity); } Entry entry = iter.next(); - IOUtilsClient.cleanup(LOG, entry.getValue().getPeer()); + IOUtilsClient.cleanupWithLogger(LOG, entry.getValue().getPeer()); iter.remove(); } @@ -267,7 +267,7 @@ private void run() throws InterruptedException { @VisibleForTesting synchronized void clear() { for (Value value : multimap.values()) { - IOUtilsClient.cleanup(LOG, value.getPeer()); + IOUtilsClient.cleanupWithLogger(LOG, value.getPeer()); } multimap.clear(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index ce4318531a..8e592f4a54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -549,14 +549,14 @@ public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { // Handle an I/O error we got when using a cached socket. // These are considered less serious, because the socket may be stale. LOG.debug("{}: closing stale domain peer {}", this, peer, e); - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); } else { // Handle an I/O error we got when using a newly created socket. // We temporarily disable the domain socket path for a few minutes in // this case, to prevent wasting more time on it. LOG.warn(this + ": I/O error requesting file descriptors. " + "Disabling domain socket " + peer.getDomainSocket(), e); - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); clientContext.getDomainSocketFactory() .disableDomainSocketPath(pathInfo.getPath()); return null; @@ -620,7 +620,7 @@ private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, return null; } finally { if (replica == null) { - IOUtilsClient.cleanup(DFSClient.LOG, fis[0], fis[1]); + IOUtilsClient.cleanupWithLogger(DFSClient.LOG, fis[0], fis[1]); } } case ERROR_UNSUPPORTED: @@ -692,7 +692,7 @@ private BlockReader getRemoteBlockReaderFromDomain() throws IOException { blockReader = getRemoteBlockReader(peer); return blockReader; } catch (IOException ioe) { - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); if (isSecurityException(ioe)) { LOG.trace("{}: got security exception while constructing a remote " + " block reader from the unix domain socket at {}", @@ -715,7 +715,7 @@ private BlockReader getRemoteBlockReaderFromDomain() throws IOException { } } finally { if (blockReader == null) { - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); } } } @@ -766,7 +766,7 @@ private BlockReader getRemoteBlockReaderFromTcp() throws IOException { } } finally { if (blockReader == null) { - IOUtilsClient.cleanup(LOG, peer); + IOUtilsClient.cleanupWithLogger(LOG, peer); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java index e1e38c6e94..e48ace6c22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java @@ -686,7 +686,7 @@ public synchronized long skip(long n) throws IOException { @Override public synchronized void close() throws IOException { - IOUtilsClient.cleanup(LOG, dataIn, checksumIn); + IOUtilsClient.cleanupWithLogger(LOG, dataIn, checksumIn); if (slowReadBuff != null) { bufferPool.returnBuffer(slowReadBuff); slowReadBuff = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index 9c2d2e0ecb..aa982d0dc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -880,7 +880,7 @@ public void close() { maxNonMmappedEvictableLifespanMs = 0; maxEvictableMmapedSize = 0; // Close and join cacheCleaner thread. - IOUtilsClient.cleanup(LOG, cacheCleaner); + IOUtilsClient.cleanupWithLogger(LOG, cacheCleaner); // Purge all replicas. while (true) { Object eldestKey; @@ -931,7 +931,7 @@ public void close() { LOG.error("Interrupted while waiting for CleanerThreadPool " + "to terminate", e); } - IOUtilsClient.cleanup(LOG, shmManager); + IOUtilsClient.cleanupWithLogger(LOG, shmManager); } @VisibleForTesting // ONLY for testing diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java index fd5dbfc728..14116e2fdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java @@ -244,7 +244,7 @@ void close() { suffix += " munmapped."; } } - IOUtilsClient.cleanup(LOG, dataStream, metaStream); + IOUtilsClient.cleanupWithLogger(LOG, dataStream, metaStream); if (slot != null) { cache.scheduleSlotReleaser(slot); if (LOG.isTraceEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/IOUtilsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/IOUtilsClient.java index 71596f3835..85e9cee748 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/IOUtilsClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/IOUtilsClient.java @@ -31,7 +31,8 @@ public class IOUtilsClient { * @param log the log to record problems to at debug level. Can be null. * @param closeables the objects to close */ - public static void cleanup(Logger log, java.io.Closeable... closeables) { + public static void cleanupWithLogger(Logger log, + java.io.Closeable... closeables) { for (java.io.Closeable c : closeables) { if (c != null) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index f25797e9f4..6067a5df34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -1211,11 +1211,11 @@ private void doSingleWrite(final WriteCtx writeCtx) { LOG.info("Clean up open file context for fileId: {}", latestAttr.getFileId()); - cleanupWithLogger(); + cleanup(); } } - synchronized void cleanupWithLogger() { + synchronized void cleanup() { if (!activeState) { LOG.info("Current OpenFileCtx is already inactive, no need to cleanup."); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index cb9e2c8630..5c915d26bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -139,7 +139,7 @@ boolean put(FileHandle h, OpenFileCtx context) { // Cleanup the old stream outside the lock if (toEvict != null) { - toEvict.cleanupWithLogger(); + toEvict.cleanup(); } return true; } @@ -179,7 +179,7 @@ void scan(long streamTimeout) { // Invoke the cleanup outside the lock for (OpenFileCtx ofc : ctxToRemove) { - ofc.cleanupWithLogger(); + ofc.cleanup(); } } @@ -215,7 +215,7 @@ void cleanAll() { // Invoke the cleanup outside the lock for (OpenFileCtx ofc : cleanedContext) { - ofc.cleanupWithLogger(); + ofc.cleanup(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 23f0478214..6dd366f650 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -64,8 +64,8 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -99,7 +99,8 @@ @InterfaceAudience.Private public class DFSUtil { - public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(DFSUtil.class.getName()); private DFSUtil() { /* Hidden constructor */ } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsDtFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsDtFetcher.java index 02aa4b98f3..4fcc319e8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsDtFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsDtFetcher.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.net.URI; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -40,7 +40,8 @@ * compilation units. Resolution of fetcher impl will be done at runtime. */ public class HdfsDtFetcher implements DtFetcher { - private static final Log LOG = LogFactory.getLog(HdfsDtFetcher.class); + private static final Logger LOG = + LoggerFactory.getLogger(HdfsDtFetcher.class); private static final String SERVICE_NAME = HdfsConstants.HDFS_URI_SCHEME; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index b63d26b85a..bb555ef259 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -25,8 +25,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; @@ -73,7 +73,8 @@ @InterfaceAudience.Private public class NameNodeProxies { - private static final Log LOG = LogFactory.getLog(NameNodeProxies.class); + private static final Logger LOG = + LoggerFactory.getLogger(NameNodeProxies.class); /** * Creates the namenode proxy with the passed protocol. This will handle diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SWebHdfsDtFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SWebHdfsDtFetcher.java index 46f9b00769..18dd720f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SWebHdfsDtFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SWebHdfsDtFetcher.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.io.Text; @@ -28,7 +28,8 @@ * DtFetcher for SWebHdfsFileSystem using the base class HdfsDtFetcher impl. */ public class SWebHdfsDtFetcher extends HdfsDtFetcher { - private static final Log LOG = LogFactory.getLog(SWebHdfsDtFetcher.class); + private static final Logger LOG = + LoggerFactory.getLogger(SWebHdfsDtFetcher.class); private static final String SERVICE_NAME = WebHdfsConstants.SWEBHDFS_SCHEME; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/WebHdfsDtFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/WebHdfsDtFetcher.java index c2bb8522e8..e8ef5d734e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/WebHdfsDtFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/WebHdfsDtFetcher.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.io.Text; @@ -28,7 +28,8 @@ * DtFetcher for WebHdfsFileSystem using the base class HdfsDtFetcher impl. */ public class WebHdfsDtFetcher extends HdfsDtFetcher { - private static final Log LOG = LogFactory.getLog(WebHdfsDtFetcher.class); + private static final Logger LOG = + LoggerFactory.getLogger(WebHdfsDtFetcher.class); private static final String SERVICE_NAME = WebHdfsConstants.WEBHDFS_SCHEME; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java index 5425bd5af5..5d881d0938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java @@ -21,14 +21,14 @@ import java.io.IOException; import java.net.SocketTimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.net.unix.DomainSocket; @InterfaceAudience.Private public class DomainPeerServer implements PeerServer { - static final Log LOG = LogFactory.getLog(DomainPeerServer.class); + static final Logger LOG = LoggerFactory.getLogger(DomainPeerServer.class); private final DomainSocket sock; DomainPeerServer(DomainSocket sock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java index 40d2b330de..9fc6692b4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java @@ -23,8 +23,8 @@ import java.net.SocketTimeoutException; import java.nio.channels.ServerSocketChannel; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; @@ -32,7 +32,7 @@ @InterfaceAudience.Private public class TcpPeerServer implements PeerServer { - static final Log LOG = LogFactory.getLog(TcpPeerServer.class); + static final Logger LOG = LoggerFactory.getLogger(TcpPeerServer.class); private final ServerSocket serverSocket; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index b52e312943..6302b2ac30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -23,8 +23,8 @@ import java.util.Map; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; @@ -46,7 +46,7 @@ * {@link QuorumCall} instances. */ class AsyncLoggerSet { - static final Log LOG = LogFactory.getLog(AsyncLoggerSet.class); + static final Logger LOG = LoggerFactory.getLogger(AsyncLoggerSet.class); private final List loggers; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index bd452923ea..ba2b20a7bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -30,8 +30,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -65,7 +65,7 @@ */ @InterfaceAudience.Private public class QuorumJournalManager implements JournalManager { - static final Log LOG = LogFactory.getLog(QuorumJournalManager.class); + static final Logger LOG = LoggerFactory.getLogger(QuorumJournalManager.class); // Timeouts for which the QJM will wait for each of the following actions. private final int startSegmentTimeoutMs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java index e967527385..81b3f8c1a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java @@ -32,8 +32,8 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.text.StringEscapeUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -67,7 +67,8 @@ public class GetJournalEditServlet extends HttpServlet { private static final long serialVersionUID = -4635891628211723009L; - private static final Log LOG = LogFactory.getLog(GetJournalEditServlet.class); + private static final Logger LOG = + LoggerFactory.getLogger(GetJournalEditServlet.class); static final String STORAGEINFO_PARAM = "storageInfo"; static final String JOURNAL_ID_PARAM = "jid"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 7e88afa78d..39afabc079 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -32,8 +32,8 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.Range; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; @@ -79,7 +79,7 @@ * the same JVM. */ public class Journal implements Closeable { - static final Log LOG = LogFactory.getLog(Journal.class); + static final Logger LOG = LoggerFactory.getLogger(Journal.class); // Current writing state @@ -1045,7 +1045,7 @@ private void persistPaxosData(long segmentTxId, public synchronized void doPreUpgrade() throws IOException { // Do not hold file lock on committedTxnId, because the containing // directory will be renamed. It will be reopened lazily on next access. - IOUtils.cleanup(LOG, committedTxnId); + IOUtils.cleanupWithLogger(LOG, committedTxnId); storage.getJournalManager().doPreUpgrade(); } @@ -1087,7 +1087,7 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { lastWriterEpoch.set(prevLastWriterEpoch.get()); committedTxnId.set(prevCommittedTxnId.get()); } finally { - IOUtils.cleanup(LOG, prevCommittedTxnId); + IOUtils.cleanupWithLogger(LOG, prevCommittedTxnId); } } @@ -1109,7 +1109,7 @@ public Boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, public synchronized void doRollback() throws IOException { // Do not hold file lock on committedTxnId, because the containing // directory will be renamed. It will be reopened lazily on next access. - IOUtils.cleanup(LOG, committedTxnId); + IOUtils.cleanupWithLogger(LOG, committedTxnId); storage.getJournalManager().doRollback(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 11a5c0451e..3df69f1448 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -21,8 +21,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; @@ -68,7 +68,7 @@ */ @InterfaceAudience.Private public class JournalNode implements Tool, Configurable, JournalNodeMXBean { - public static final Log LOG = LogFactory.getLog(JournalNode.class); + public static final Logger LOG = LoggerFactory.getLogger(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; private JournalNodeHttpServer httpServer; @@ -285,7 +285,7 @@ public void stop(int rc) { } for (Journal j : journalsById.values()) { - IOUtils.cleanup(LOG, j); + IOUtils.cleanupWithLogger(LOG, j); } DefaultMetricsSystem.shutdown(); @@ -403,7 +403,7 @@ private void registerJNMXBean() { private class ErrorReporter implements StorageErrorReporter { @Override public void reportErrorOnFile(File f) { - LOG.fatal("Error reported on file " + f + "... exiting", + LOG.error("Error reported on file " + f + "... exiting", new Exception()); stop(1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 0f11026b1d..bfa9a222e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -19,7 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -61,7 +61,7 @@ @VisibleForTesting public class JournalNodeRpcServer implements QJournalProtocol, InterQJournalProtocol { - private static final Log LOG = JournalNode.LOG; + private static final Logger LOG = JournalNode.LOG; private static final int HANDLER_COUNT = 5; private final JournalNode jn; private Server server; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index 3d0cf34e59..21fbbe41d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -29,8 +29,8 @@ import java.util.Map; import org.apache.commons.lang3.ArrayUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -58,7 +58,8 @@ @InterfaceAudience.Private public class BlockTokenSecretManager extends SecretManager { - public static final Log LOG = LogFactory.getLog(BlockTokenSecretManager.class); + public static final Logger LOG = + LoggerFactory.getLogger(BlockTokenSecretManager.class); public static final Token DUMMY_TOKEN = new Token(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 3547c96a80..ca0e643459 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.Map.Entry; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection; @@ -62,8 +62,8 @@ public class DelegationTokenSecretManager extends AbstractDelegationTokenSecretManager { - private static final Log LOG = LogFactory - .getLog(DelegationTokenSecretManager.class); + private static final Logger LOG = LoggerFactory + .getLogger(DelegationTokenSecretManager.class); private final FSNamesystem namesystem; private final SerializerCompat serializerCompat = new SerializerCompat(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 426c7ab074..a58e391b01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -36,8 +36,8 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -172,7 +172,7 @@ @InterfaceAudience.Private public class Balancer { - static final Log LOG = LogFactory.getLog(Balancer.class); + static final Logger LOG = LoggerFactory.getLogger(Balancer.class); static final Path BALANCER_ID_PATH = new Path("/system/balancer.id"); @@ -724,7 +724,7 @@ static int run(Collection namenodes, final BalancerParameters p, } } finally { for(NameNodeConnector nnc : connectors) { - IOUtils.cleanup(LOG, nnc); + IOUtils.cleanupWithLogger(LOG, nnc); } } return ExitStatus.SUCCESS.getExitCode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 060c013e37..8a71417bef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -44,8 +44,8 @@ import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; @@ -88,7 +88,7 @@ /** Dispatching block replica moves between datanodes. */ @InterfaceAudience.Private public class Dispatcher { - static final Log LOG = LogFactory.getLog(Dispatcher.class); + static final Logger LOG = LoggerFactory.getLogger(Dispatcher.class); /** * the period of time to delay the usage of a DataNode after hitting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index 2b3c1935e0..114167ca44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -31,8 +31,8 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -62,7 +62,8 @@ */ @InterfaceAudience.Private public class NameNodeConnector implements Closeable { - private static final Log LOG = LogFactory.getLog(NameNodeConnector.class); + private static final Logger LOG = + LoggerFactory.getLogger(NameNodeConnector.class); public static final int DEFAULT_MAX_IDLE_ITERATIONS = 5; private static boolean write2IdFile = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java index 8435b46e7a..8f76e8bbcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java @@ -25,8 +25,8 @@ import java.util.Random; import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -39,8 +39,8 @@ */ public class AvailableSpaceBlockPlacementPolicy extends BlockPlacementPolicyDefault { - private static final Log LOG = LogFactory - .getLog(AvailableSpaceBlockPlacementPolicy.class); + private static final Logger LOG = LoggerFactory + .getLogger(AvailableSpaceBlockPlacementPolicy.class); private static final Random RAND = new Random(); private int balancedPreference = (int) (100 * DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 1131506b6b..430c0d44a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -24,8 +24,8 @@ import com.google.common.base.Preconditions; import com.google.common.net.InetAddresses; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -73,7 +73,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeManager { - static final Log LOG = LogFactory.getLog(DatanodeManager.class); + static final Logger LOG = LoggerFactory.getLogger(DatanodeManager.class); private final Namesystem namesystem; private final BlockManager blockManager; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index 59f907fe08..b7bf6747b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -50,7 +50,8 @@ * resolutions are only done during the loading time to minimize the latency. */ public class HostFileManager extends HostConfigManager { - private static final Log LOG = LogFactory.getLog(HostFileManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(HostFileManager.class); private Configuration conf; private HostSet includes = new HostSet(); private HostSet excludes = new HostSet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 637c679b9f..498a09351c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.common; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -53,7 +53,7 @@ public class JspHelper { public static final String CURRENT_CONF = "current.conf"; public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME; public static final String NAMENODE_ADDRESS = "nnaddr"; - private static final Log LOG = LogFactory.getLog(JspHelper.class); + private static final Logger LOG = LoggerFactory.getLogger(JspHelper.class); /** Private constructor for preventing creating JspHelper object. */ private JspHelper() {} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index 40c048c5a3..051e2d2c52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -32,8 +32,9 @@ import javax.management.ObjectName; import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; @@ -43,7 +44,8 @@ */ public class MetricsLoggerTask implements Runnable { - public static final Log LOG = LogFactory.getLog(MetricsLoggerTask.class); + public static final Logger LOG = + LoggerFactory.getLogger(MetricsLoggerTask.class); private static ObjectName objectName = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java index 5dee16a929..4e30e50414 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java @@ -38,8 +38,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -57,7 +57,8 @@ @InterfaceAudience.Private public final class Util { - private final static Log LOG = LogFactory.getLog(Util.class.getName()); + private final static Logger LOG = + LoggerFactory.getLogger(Util.class.getName()); public final static String FILE_LENGTH = "File-Length"; public final static String CONTENT_LENGTH = "Content-Length"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 10951e927f..99584d9f3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -39,8 +39,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang3.time.FastDateFormat; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.AutoCloseableLock; @@ -60,7 +60,8 @@ */ @InterfaceAudience.Private public class DirectoryScanner implements Runnable { - private static final Log LOG = LogFactory.getLog(DirectoryScanner.class); + private static final Logger LOG = + LoggerFactory.getLogger(DirectoryScanner.class); private static final int MILLIS_PER_SECOND = 1000; private static final String START_MESSAGE = "Periodic Directory Tree Verification scan" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java index 83ee5f6753..2da3b1e8f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -37,7 +37,8 @@ */ @InterfaceAudience.Private class ProfilingFileIoEvents { - static final Log LOG = LogFactory.getLog(ProfilingFileIoEvents.class); + static final Logger LOG = + LoggerFactory.getLogger(ProfilingFileIoEvents.class); private final boolean isEnabled; private final int sampleRangeMax; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java index ea9e72ce44..3df83cfbad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java @@ -32,8 +32,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdfs.ExtendedBlockId; @@ -81,7 +81,8 @@ * The counterpart of this class on the client is {@link DfsClientShmManager}. */ public class ShortCircuitRegistry { - public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class); + public static final Logger LOG = + LoggerFactory.getLogger(ShortCircuitRegistry.class); private static final int SHM_LENGTH = 8192; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java index efe222f6ed..67a66fd0ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; @@ -46,7 +46,8 @@ public class AvailableSpaceVolumeChoosingPolicy implements VolumeChoosingPolicy, Configurable { - private static final Log LOG = LogFactory.getLog(AvailableSpaceVolumeChoosingPolicy.class); + private static final Logger LOG = + LoggerFactory.getLogger(AvailableSpaceVolumeChoosingPolicy.class); private Object[] syncLocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java index b9bcf1ff27..2d924c0236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; @@ -31,7 +31,8 @@ */ public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy { - public static final Log LOG = LogFactory.getLog(RoundRobinVolumeChoosingPolicy.class); + public static final Logger LOG = + LoggerFactory.getLogger(RoundRobinVolumeChoosingPolicy.class); // curVolumes stores the RR counters of each storage type. // The ordinal of storage type in org.apache.hadoop.fs.StorageType diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 3f9de78d1c..2adfb6bb52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -32,8 +32,8 @@ import java.util.Scanner; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CachingGetSpaceUsed; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -71,7 +71,7 @@ * This class is synchronized by {@link FsVolumeImpl}. */ class BlockPoolSlice { - static final Log LOG = LogFactory.getLog(BlockPoolSlice.class); + static final Logger LOG = LoggerFactory.getLogger(BlockPoolSlice.class); private final String bpid; private final FsVolumeImpl volume; // volume to which this BlockPool belongs to @@ -764,7 +764,7 @@ void shutdown(BlockListAsLongs blocksListToPersist) { } if (dfsUsage instanceof CachingGetSpaceUsed) { - IOUtils.cleanup(LOG, ((CachingGetSpaceUsed) dfsUsage)); + IOUtils.cleanupWithLogger(LOG, ((CachingGetSpaceUsed) dfsUsage)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java index 4929b5e94e..81213a033f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java @@ -30,8 +30,8 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; @@ -59,7 +59,8 @@ * They should be combined. */ class FsDatasetAsyncDiskService { - public static final Log LOG = LogFactory.getLog(FsDatasetAsyncDiskService.class); + public static final Logger LOG = + LoggerFactory.getLogger(FsDatasetAsyncDiskService.class); // ThreadPool core pool size private static final int CORE_THREADS_PER_VOLUME = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java index d6969c42e6..a77faf2cec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -45,7 +45,8 @@ * They should be combined. */ class RamDiskAsyncLazyPersistService { - public static final Log LOG = LogFactory.getLog(RamDiskAsyncLazyPersistService.class); + public static final Logger LOG = + LoggerFactory.getLogger(RamDiskAsyncLazyPersistService.class); // ThreadPool core pool size private static final int CORE_THREADS_PER_VOLUME = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java index 335ed703ab..07e520117f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java @@ -20,8 +20,8 @@ import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -36,7 +36,8 @@ @InterfaceAudience.Private @InterfaceStability.Unstable public abstract class RamDiskReplicaTracker { - static final Log LOG = LogFactory.getLog(RamDiskReplicaTracker.class); + static final Logger LOG = + LoggerFactory.getLogger(RamDiskReplicaTracker.class); FsDatasetImpl fsDataset; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 4349c26b05..1c7850608a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -41,8 +41,8 @@ import io.netty.handler.ssl.SslHandler; import io.netty.handler.stream.ChunkedWriteHandler; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -87,7 +87,7 @@ public class DatanodeHttpServer implements Closeable { private final RestCsrfPreventionFilter restCsrfPreventionFilter; private InetSocketAddress httpAddress; private InetSocketAddress httpsAddress; - static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class); + static final Logger LOG = LoggerFactory.getLogger(DatanodeHttpServer.class); // HttpServer threads are only used for the web UI and basic servlets, so // set them to the minimum possible diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java index 4958bb5920..be29eaf58a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java @@ -30,7 +30,7 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.util.ReferenceCountUtil; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.security.http.RestCsrfPreventionFilter; @@ -46,7 +46,7 @@ final class RestCsrfPreventionFilterHandler extends SimpleChannelInboundHandler { - private static final Log LOG = DatanodeHttpServer.LOG; + private static final Logger LOG = DatanodeHttpServer.LOG; private final RestCsrfPreventionFilter restCsrfPreventionFilter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java index 9d659f1f57..9a2e0b71a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java @@ -34,7 +34,7 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpRequestEncoder; import io.netty.handler.codec.http.HttpResponseEncoder; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import java.net.InetSocketAddress; @@ -53,7 +53,7 @@ class SimpleHttpProxyHandler extends SimpleChannelInboundHandler { private String uri; private Channel proxiedChannel; private final InetSocketAddress host; - static final Log LOG = DatanodeHttpServer.LOG; + static final Logger LOG = DatanodeHttpServer.LOG; SimpleHttpProxyHandler(InetSocketAddress host) { this.host = host; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java index fa6f676dd3..366f47f296 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java @@ -13,8 +13,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -43,7 +43,7 @@ public class DataNodeUGIProvider { private final ParameterParser params; @VisibleForTesting static Cache ugiCache; - public static final Log LOG = LogFactory.getLog(Client.class); + public static final Logger LOG = LoggerFactory.getLogger(Client.class); DataNodeUGIProvider(ParameterParser params) { this.params = params; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java index dce1f02cea..b56b0d09ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java @@ -23,7 +23,7 @@ import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; @@ -43,7 +43,7 @@ import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8; class ExceptionHandler { - static Log LOG = WebHdfsHandler.LOG; + private static final Logger LOG = WebHdfsHandler.LOG; static DefaultFullHttpResponse exceptionCaught(Throwable cause) { Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java index b5654ab8bd..3d928ff3ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java @@ -23,7 +23,7 @@ import io.netty.handler.codec.http.DefaultHttpResponse; import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.LastHttpContent; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.io.IOUtils; @@ -37,7 +37,7 @@ class HdfsWriter extends SimpleChannelInboundHandler { private final DFSClient client; private final OutputStream out; private final DefaultHttpResponse response; - private static final Log LOG = WebHdfsHandler.LOG; + private static final Logger LOG = WebHdfsHandler.LOG; HdfsWriter(DFSClient client, OutputStream out, DefaultHttpResponse response) { this.client = client; @@ -82,8 +82,8 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } private void releaseDfsResources() { - IOUtils.cleanup(LOG, out); - IOUtils.cleanup(LOG, client); + IOUtils.cleanupWithLogger(LOG, out); + IOUtils.cleanupWithLogger(LOG, client); } private void releaseDfsResourcesAndThrow() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java index 9a4b670f1b..b01e3c983a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java @@ -29,8 +29,8 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.QueryStringDecoder; import io.netty.handler.stream.ChunkedStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; @@ -87,8 +87,8 @@ import static org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.HDFS_DELEGATION_KIND; public class WebHdfsHandler extends SimpleChannelInboundHandler { - static final Log LOG = LogFactory.getLog(WebHdfsHandler.class); - static final Log REQLOG = LogFactory.getLog("datanode.webhdfs"); + static final Logger LOG = LoggerFactory.getLogger(WebHdfsHandler.class); + static final Logger REQLOG = LoggerFactory.getLogger("datanode.webhdfs"); public static final String WEBHDFS_PREFIX = WebHdfsFileSystem.PATH_PREFIX; public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length(); public static final String APPLICATION_OCTET_STREAM = @@ -295,7 +295,7 @@ private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException { dfsclient.close(); dfsclient = null; } finally { - IOUtils.cleanup(LOG, dfsclient); + IOUtils.cleanupWithLogger(LOG, dfsclient); } final byte[] js = JsonUtil.toJsonString(checksum).getBytes(StandardCharsets.UTF_8); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index c7a53e1c8f..80df71195d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -22,8 +22,8 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.cli.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -70,7 +70,7 @@ @InterfaceAudience.Private public class Mover { - static final Log LOG = LogFactory.getLog(Mover.class); + static final Logger LOG = LoggerFactory.getLogger(Mover.class); private static class StorageMap { private final StorageGroupMap sources @@ -659,7 +659,7 @@ static int run(Map> namenodes, Configuration conf) final ExitStatus r = m.run(); if (r == ExitStatus.SUCCESS) { - IOUtils.cleanup(LOG, nnc); + IOUtils.cleanupWithLogger(LOG, nnc); iter.remove(); } else if (r != ExitStatus.IN_PROGRESS) { if (r == ExitStatus.NO_MOVE_PROGRESS) { @@ -682,7 +682,7 @@ static int run(Map> namenodes, Configuration conf) return ExitStatus.SUCCESS.getExitCode(); } finally { for (NameNodeConnector nnc : connectors) { - IOUtils.cleanup(LOG, nnc); + IOUtils.cleanupWithLogger(LOG, nnc); } } } @@ -720,7 +720,7 @@ private static String[] readPathFile(String file) throws IOException { } } } finally { - IOUtils.cleanup(LOG, reader); + IOUtils.cleanupWithLogger(LOG, reader); } return list.toArray(new String[list.size()]); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index a2613d999d..20b1d25434 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -21,8 +21,6 @@ import javax.annotation.Nonnull; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java index aeee87dc80..186bc3d727 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -30,7 +30,8 @@ @InterfaceAudience.Private public class CheckpointConf { - private static final Log LOG = LogFactory.getLog(CheckpointConf.class); + private static final Logger LOG = + LoggerFactory.getLogger(CheckpointConf.class); /** How often to checkpoint regardless of number of txns */ private final long checkpointPeriod; // in seconds diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index 318acfbef4..14749d01ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -28,8 +28,8 @@ import java.net.URL; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; @@ -56,8 +56,8 @@ * (1) time or (2) the size of the edits file. */ class Checkpointer extends Daemon { - public static final Log LOG = - LogFactory.getLog(Checkpointer.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(Checkpointer.class.getName()); private final BackupNode backupNode; volatile boolean shouldRun; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 95f3fee34d..14548e9e0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -22,8 +22,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.security.AccessControlException; @@ -48,8 +48,8 @@ public class ContentSummaryComputationContext { private int sleepNanoSec = 0; public static final String REPLICATED = "Replicated"; - public static final Log LOG = LogFactory - .getLog(ContentSummaryComputationContext.class); + public static final Logger LOG = LoggerFactory + .getLogger(ContentSummaryComputationContext.class); private FSPermissionChecker pc; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java index 2fb369c950..e3b8e425ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java @@ -21,8 +21,8 @@ import javax.servlet.http.HttpServletRequest; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.security.UserGroupInformation; @@ -34,7 +34,8 @@ abstract class DfsServlet extends HttpServlet { /** For java.io.Serializable */ private static final long serialVersionUID = 1L; - static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName()); + static final Logger LOG = + LoggerFactory.getLogger(DfsServlet.class.getCanonicalName()); protected UserGroupInformation getUGI(HttpServletRequest request, Configuration conf) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java index 14d5b5464e..43fc949dc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java @@ -21,8 +21,8 @@ import java.net.InetSocketAddress; import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.server.common.Storage; @@ -43,7 +43,8 @@ * int, int, byte[]) */ class EditLogBackupOutputStream extends EditLogOutputStream { - private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); + private static final Logger LOG = + LoggerFactory.getLogger(EditLogFileOutputStream.class); static final int DEFAULT_BUFFER_SIZE = 256; private final JournalProtocol backupNode; // RPC proxy to backup node diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index 36c2232d3b..95a305e735 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -29,8 +29,8 @@ import java.net.URL; import java.security.PrivilegedExceptionAction; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.LayoutFlags; @@ -71,7 +71,7 @@ static private enum State { private FSEditLogOp.Reader reader = null; private FSEditLogLoader.PositionTrackingInputStream tracker = null; private DataInputStream dataIn = null; - static final Log LOG = LogFactory.getLog(EditLogInputStream.class); + static final Logger LOG = LoggerFactory.getLogger(EditLogInputStream.class); /** * Open an EditLogInputStream for the given file. @@ -161,7 +161,7 @@ private void init(boolean verifyLayoutVersion) state = State.OPEN; } finally { if (reader == null) { - IOUtils.cleanup(LOG, dataIn, tracker, bin, fStream); + IOUtils.cleanupWithLogger(LOG, dataIn, tracker, bin, fStream); state = State.CLOSED; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 830814c70e..9f06ce9d5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -26,8 +26,8 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -42,7 +42,8 @@ */ @InterfaceAudience.Private public class EditLogFileOutputStream extends EditLogOutputStream { - private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); + private static final Logger LOG = + LoggerFactory.getLogger(EditLogFileOutputStream.class); public static final int MIN_PREALLOCATION_LENGTH = 1024 * 1024; private File file; @@ -161,7 +162,7 @@ public void close() throws IOException { fp.close(); fp = null; } finally { - IOUtils.cleanup(LOG, fc, fp); + IOUtils.cleanupWithLogger(LOG, fc, fp); doubleBuf = null; fc = null; fp = null; @@ -174,7 +175,7 @@ public void abort() throws IOException { if (fp == null) { return; } - IOUtils.cleanup(LOG, fp); + IOUtils.cleanupWithLogger(LOG, fp); fp = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index 4e1dab069b..93f35f76e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -24,8 +24,8 @@ import java.util.Arrays; import org.apache.commons.codec.binary.Hex; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer; @@ -43,7 +43,8 @@ */ @InterfaceAudience.Private public class EditsDoubleBuffer { - protected static final Log LOG = LogFactory.getLog(EditsDoubleBuffer.class); + protected static final Logger LOG = + LoggerFactory.getLogger(EditsDoubleBuffer.class); private TxnBuffer bufCurrent; // current buffer for writing private TxnBuffer bufReady; // buffer ready for flushing diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 5990c2273f..7f3937993b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -26,8 +26,8 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.util.ExitUtil; @@ -35,7 +35,7 @@ import com.google.common.base.Preconditions; class FSEditLogAsync extends FSEditLog implements Runnable { - static final Log LOG = LogFactory.getLog(FSEditLog.class); + static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class); // use separate mutex to avoid possible deadlock when stopping the thread. private final Object syncThreadLock = new Object(); @@ -203,7 +203,7 @@ public void run() { private void terminate(Throwable t) { String message = "Exception while edit logging: "+t.getMessage(); - LOG.fatal(message, t); + LOG.error(message, t); ExitUtil.terminate(1, message); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 82e35bd353..f3b6b84397 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -28,8 +28,8 @@ import java.util.EnumSet; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; @@ -121,7 +121,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class FSEditLogLoader { - static final Log LOG = LogFactory.getLog(FSEditLogLoader.class.getName()); + static final Logger LOG = + LoggerFactory.getLogger(FSEditLogLoader.class.getName()); static final long REPLAY_TRANSACTION_LOG_INTERVAL = 1000; // 1sec private final FSNamesystem fsNamesys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 6d107be789..3d347d929b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -37,8 +37,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -83,7 +83,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class FSImage implements Closeable { - public static final Log LOG = LogFactory.getLog(FSImage.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(FSImage.class.getName()); protected FSEditLog editLog = null; private boolean isUpgradeFinalized = false; @@ -1136,7 +1137,7 @@ public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf, getStorage().updateNameDirSize(); if (exitAfterSave.get()) { - LOG.fatal("NameNode process will exit now... The saved FsImage " + + LOG.error("NameNode process will exit now... The saved FsImage " + nnf + " is potentially corrupted."); ExitUtil.terminate(-1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 83f9c9386c..ae2a037146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -37,7 +37,7 @@ import java.util.Map; import java.util.TreeMap; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -181,7 +181,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class FSImageFormat { - private static final Log LOG = FSImage.LOG; + private static final Logger LOG = FSImage.LOG; // Static-only class private FSImageFormat() {} @@ -231,7 +231,7 @@ public void load(File file, boolean requireSameLayoutVersion) loader.load(file); } } finally { - IOUtils.cleanup(LOG, is); + IOUtils.cleanupWithLogger(LOG, is); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index a233d2639b..1571d57af3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -26,8 +26,8 @@ import java.util.Iterator; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; @@ -104,7 +104,8 @@ public final class FSImageFormatPBINode { XAttr.NameSpace.values(); - private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class); + private static final Logger LOG = + LoggerFactory.getLogger(FSImageFormatPBINode.class); public final static class Loader { public static PermissionStatus loadPermission(long id, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java index 64693852a5..1d97ace752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java @@ -29,8 +29,8 @@ import java.util.List; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileUtil; @@ -51,8 +51,8 @@ @InterfaceAudience.Private @InterfaceStability.Unstable class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector { - private static final Log LOG = - LogFactory.getLog(FSImagePreTransactionalStorageInspector.class); + private static final Logger LOG = + LoggerFactory.getLogger(FSImagePreTransactionalStorageInspector.class); /* Flag if there is at least one storage dir that doesn't contain the newest * fstime */ @@ -136,7 +136,7 @@ static long readCheckpointTime(StorageDirectory sd) throws IOException { in.close(); in = null; } finally { - IOUtils.cleanup(LOG, in); + IOUtils.cleanupWithLogger(LOG, in); } } return timeStamp; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java index a0465a5beb..b04007513c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java @@ -28,8 +28,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; @@ -39,7 +39,7 @@ import com.google.common.collect.Lists; class FSImageTransactionalStorageInspector extends FSImageStorageInspector { - public static final Log LOG = LogFactory.getLog( + public static final Logger LOG = LoggerFactory.getLogger( FSImageTransactionalStorageInspector.class); private boolean needToSave = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 74c9f10482..1caf7c2009 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -307,9 +307,9 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import org.apache.log4j.Logger; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; -import org.apache.log4j.Logger; import org.eclipse.jetty.util.ajax.JSON; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index f70963c361..146869d5b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -24,8 +24,8 @@ import java.util.Stack; import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -47,7 +47,7 @@ * Some of the helper methods are guarded by {@link FSNamesystem#readLock()}. */ public class FSPermissionChecker implements AccessControlEnforcer { - static final Log LOG = LogFactory.getLog(UserGroupInformation.class); + static final Logger LOG = LoggerFactory.getLogger(UserGroupInformation.class); private static String getPath(byte[][] components, int start, int end) { return DFSUtil.byteArray2PathString(components, start, end - start + 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 185ad731b2..d08a64497f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -60,7 +60,8 @@ */ @InterfaceAudience.Private public class FileJournalManager implements JournalManager { - private static final Log LOG = LogFactory.getLog(FileJournalManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(FileJournalManager.class); private final Configuration conf; private final StorageDirectory sd; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 207d97726b..2123f4ea99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -25,8 +25,8 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Path; @@ -56,7 +56,7 @@ */ @InterfaceAudience.Private public abstract class INode implements INodeAttributes, Diff.Element { - public static final Log LOG = LogFactory.getLog(INode.class); + public static final Logger LOG = LoggerFactory.getLogger(INode.class); /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/ private INode parent = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 50ead610c6..04fb50e351 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -19,8 +19,8 @@ import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -36,7 +36,7 @@ * Contains INodes information resolved from a given path. */ public class INodesInPath { - public static final Log LOG = LogFactory.getLog(INodesInPath.class); + public static final Logger LOG = LoggerFactory.getLogger(INodesInPath.class); /** * @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 7a26df9ef0..9028b362e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.security.SecurityUtil; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; @@ -72,7 +72,7 @@ public class ImageServlet extends HttpServlet { private static final long serialVersionUID = -7669068179452648952L; - private static final Log LOG = LogFactory.getLog(ImageServlet.class); + private static final Logger LOG = LoggerFactory.getLogger(ImageServlet.class); public final static String CONTENT_DISPOSITION = "Content-Disposition"; public final static String HADOOP_IMAGE_EDITS_HEADER = "X-Image-Edits-Name"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 868df017ca..7be7073c5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -30,8 +30,8 @@ import java.util.SortedSet; import java.util.concurrent.CopyOnWriteArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; @@ -54,7 +54,7 @@ @InterfaceAudience.Private public class JournalSet implements JournalManager { - static final Log LOG = LogFactory.getLog(FSEditLog.class); + static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class); // we want local logs to be ordered earlier in the collection, and true // is considered larger than false, so reverse the comparator @@ -387,7 +387,7 @@ private void mapJournalsAndReportErrors( if (jas.isRequired()) { final String msg = "Error: " + status + " failed for required journal (" + jas + ")"; - LOG.fatal(msg, t); + LOG.error(msg, t); // If we fail on *any* of the required journals, then we must not // continue on any of the other journals. Abort them to ensure that // retry behavior doesn't allow them to keep going in any way. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java index 8831b49adb..dad5779b68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java @@ -22,14 +22,15 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** Context data for an ongoing NameNode metadata recovery process. */ @InterfaceAudience.Private @InterfaceStability.Evolving public final class MetaRecoveryContext { - public static final Log LOG = LogFactory.getLog(MetaRecoveryContext.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(MetaRecoveryContext.class.getName()); public final static int FORCE_NONE = 0; public final static int FORCE_FIRST_CHOICE = 1; public final static int FORCE_ALL = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index 2a83541d38..fc54dfcfd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -28,8 +28,8 @@ import java.util.List; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; @@ -56,7 +56,7 @@ public class NNStorageRetentionManager { private final int numCheckpointsToRetain; private final long numExtraEditsToRetain; private final int maxExtraEditsSegmentsToRetain; - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( NNStorageRetentionManager.class); private final NNStorage storage; private final StoragePurger purger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java index b3fff749fa..9cca97ab74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java @@ -27,8 +27,8 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.Collections; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -38,7 +38,8 @@ public abstract class NNUpgradeUtil { - private static final Log LOG = LogFactory.getLog(NNUpgradeUtil.class); + private static final Logger LOG = + LoggerFactory.getLogger(NNUpgradeUtil.class); /** * Return true if this storage dir can roll back to the previous storage diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java index cf4f9a739d..4b8b797551 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java @@ -20,8 +20,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Caches frequently used names to facilitate reuse. @@ -62,7 +62,7 @@ int get() { } } - static final Log LOG = LogFactory.getLog(NameCache.class.getName()); + static final Logger LOG = LoggerFactory.getLogger(NameCache.class.getName()); /** indicates initialization is in progress */ private boolean initialized = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java index 7b1dbc6f2a..898f57e4b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java @@ -25,8 +25,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; @@ -47,7 +47,8 @@ */ @InterfaceAudience.Private public class NameNodeResourceChecker { - private static final Log LOG = LogFactory.getLog(NameNodeResourceChecker.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(NameNodeResourceChecker.class.getName()); // Space (in bytes) reserved per volume. private final long duReserved; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 0201ca1161..5d664cbb33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -36,8 +36,8 @@ import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -112,7 +112,8 @@ */ @InterfaceAudience.Private public class NamenodeFsck implements DataEncryptionKeyFactory { - public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(NameNode.class.getName()); // return string marking fsck status public static final String CORRUPT_STATUS = "is CORRUPT"; @@ -358,7 +359,7 @@ public void fsck() { blockIdCK(blk); sb.append(blk + "\n"); } - LOG.info(sb); + LOG.info("{}", sb.toString()); namenode.getNamesystem().logFsckEvent("/", remoteAddress); out.flush(); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index a73206b31e..6c42c82dc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Comparator; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; @@ -37,7 +37,7 @@ * different subset of the available edits. */ class RedundantEditLogInputStream extends EditLogInputStream { - public static final Log LOG = LogFactory.getLog( + public static final Logger LOG = LoggerFactory.getLogger( RedundantEditLogInputStream.class.getName()); private int curIdx; private long prevTxId; @@ -152,7 +152,7 @@ public long getLastTxId() { @Override public void close() throws IOException { - IOUtils.cleanup(LOG, streams); + IOUtils.cleanupWithLogger(LOG, streams); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 4d7b7473ee..1423b30801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -38,8 +38,8 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -103,8 +103,8 @@ public class SecondaryNameNode implements Runnable, static{ HdfsConfiguration.init(); } - public static final Log LOG = - LogFactory.getLog(SecondaryNameNode.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(SecondaryNameNode.class.getName()); private final long starttime = Time.now(); private volatile long lastCheckpointTime = 0; @@ -367,12 +367,12 @@ public void doWork() { // Prevent a huge number of edits from being created due to // unrecoverable conditions and endless retries. if (checkpointImage.getMergeErrorCount() > maxRetries) { - LOG.fatal("Merging failed " + + LOG.error("Merging failed " + checkpointImage.getMergeErrorCount() + " times."); terminate(1); } } catch (Throwable e) { - LOG.fatal("Throwable Exception in doCheckpoint", e); + LOG.error("Throwable Exception in doCheckpoint", e); e.printStackTrace(); terminate(1, e); } @@ -676,7 +676,7 @@ boolean shouldCheckpointBasedOnCount() throws IOException { public static void main(String[] argv) throws Exception { CommandLineOpts opts = SecondaryNameNode.parseArgs(argv); if (opts == null) { - LOG.fatal("Failed to parse options"); + LOG.error("Failed to parse options"); terminate(1); } else if (opts.shouldPrintHelp()) { opts.usage(); @@ -703,7 +703,7 @@ public static void main(String[] argv) throws Exception { secondary.join(); } } catch (Throwable e) { - LOG.fatal("Failed to start secondary namenode", e); + LOG.error("Failed to start secondary namenode", e); terminate(1); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StartupProgressServlet.java index 9fb0c624c6..449a1aa62a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StartupProgressServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StartupProgressServlet.java @@ -103,7 +103,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) json.writeEndArray(); json.writeEndObject(); } finally { - IOUtils.cleanup(LOG, json); + IOUtils.cleanupWithLogger(LOG, json); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java index 771a43ef6f..14ce00098f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java @@ -32,8 +32,8 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -95,7 +95,8 @@ public static TransferResult getResultForCode(int code){ @VisibleForTesting static int timeout = 0; - private static final Log LOG = LogFactory.getLog(TransferFsImage.class); + private static final Logger LOG = + LoggerFactory.getLogger(TransferFsImage.class); public static void downloadMostRecentImageToDirectory(URL infoServer, File dir) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 4d6716f04f..aed90a5300 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -31,8 +31,8 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; @@ -76,7 +76,8 @@ */ @InterfaceAudience.Private public class BootstrapStandby implements Tool, Configurable { - private static final Log LOG = LogFactory.getLog(BootstrapStandby.class); + private static final Logger LOG = + LoggerFactory.getLogger(BootstrapStandby.class); private String nsId; private String nnId; private List remoteNNs; @@ -182,14 +183,14 @@ private int doRun() throws IOException { } if (nsInfo == null) { - LOG.fatal( + LOG.error( "Unable to fetch namespace information from any remote NN. Possible NameNodes: " + remoteNNs); return ERR_CODE_FAILED_CONNECT; } if (!checkLayoutVersion(nsInfo)) { - LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion() + LOG.error("Layout version on remote node (" + nsInfo.getLayoutVersion() + ") does not match " + "this node's layout version (" + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")"); return ERR_CODE_INVALID_VERSION; @@ -382,7 +383,7 @@ private boolean checkLogsAvailableForRead(FSImage image, long imageTxId, "Please copy these logs into the shared edits storage " + "or call saveNamespace on the active node.\n" + "Error: " + e.getLocalizedMessage(); - LOG.fatal(msg, e); + LOG.error(msg, e); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index b306b8d565..4ba2aa3bf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -36,8 +36,8 @@ import com.google.common.collect.Iterators; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -73,7 +73,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class EditLogTailer { - public static final Log LOG = LogFactory.getLog(EditLogTailer.class); + public static final Logger LOG = LoggerFactory.getLogger(EditLogTailer.class); /** * StandbyNode will hold namesystem lock to apply at most this many journal @@ -471,7 +471,7 @@ private void doWork() { // interrupter should have already set shouldRun to false continue; } catch (Throwable t) { - LOG.fatal("Unknown error encountered while tailing edits. " + + LOG.error("Unknown error encountered while tailing edits. " + "Shutting down standby NN.", t); terminate(1, t); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index a8ab798299..619d439fba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -56,8 +56,8 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileEncryptionInfo; @@ -117,7 +117,8 @@ @Path("") @ResourceFilters(ParamFilter.class) public class NamenodeWebHdfsMethods { - public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class); + public static final Logger LOG = + LoggerFactory.getLogger(NamenodeWebHdfsMethods.class); private static final UriFsPathParam ROOT = new UriFsPathParam(""); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index c5571be5b7..aa67e72dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -40,8 +40,8 @@ import com.google.common.base.Joiner; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationTaskStatus; @@ -114,7 +114,7 @@ public class DFSAdmin extends FsShell { HdfsConfiguration.init(); } - private static final Log LOG = LogFactory.getLog(DFSAdmin.class); + private static final Logger LOG = LoggerFactory.getLogger(DFSAdmin.class); /** * An abstract class for the execution of a file system command diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index 4c0ddb2985..71a66d47e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Collection; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAAdmin; @@ -38,7 +38,7 @@ */ public class DFSHAAdmin extends HAAdmin { - private static final Log LOG = LogFactory.getLog(DFSHAAdmin.class); + private static final Logger LOG = LoggerFactory.getLogger(DFSHAAdmin.class); private String nameserviceId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index e0a4f70f30..5ae535a2c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -30,8 +30,8 @@ import java.util.List; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -61,8 +61,8 @@ @InterfaceAudience.Private public class DFSZKFailoverController extends ZKFailoverController { - private static final Log LOG = - LogFactory.getLog(DFSZKFailoverController.class); + private static final Logger LOG = + LoggerFactory.getLogger(DFSZKFailoverController.class); private final AccessControlList adminAcl; /* the same as superclass's localTarget, but with the more specfic NN type */ private final NNHAServiceTarget localNNTarget; @@ -194,7 +194,7 @@ public static void main(String args[]) try { System.exit(zkfc.run(parser.getRemainingArgs())); } catch (Throwable t) { - LOG.fatal("DFSZKFailOverController exiting due to earlier exception " + LOG.error("DFSZKFailOverController exiting due to earlier exception " + t); terminate(1, t); } @@ -243,7 +243,7 @@ private void getLocalNNThreadDump() { new StringBuilder("-- Local NN thread dump -- \n"); localNNThreadDumpContent.append(out); localNNThreadDumpContent.append("\n -- Local NN thread dump -- "); - LOG.info(localNNThreadDumpContent); + LOG.info("{}", localNNThreadDumpContent.toString()); isThreadDumpCaptured = true; } catch (IOException e) { LOG.warn("Can't get local NN thread dump due to " + e.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index c6ea91ca87..7d1e88fde7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -28,8 +28,8 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -57,8 +57,8 @@ public class DelegationTokenFetcher { private static final String CANCEL = "cancel"; private static final String HELP = "help"; private static final String HELP_SHORT = "h"; - private static final Log LOG = LogFactory - .getLog(DelegationTokenFetcher.class); + private static final Logger LOG = LoggerFactory + .getLogger(DelegationTokenFetcher.class); private static final String PRINT = "print"; private static final String RENEW = "renew"; private static final String RENEWER = "renewer"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java index e03e787a98..e71e4d8216 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java @@ -21,8 +21,8 @@ import java.io.PrintStream; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -43,7 +43,7 @@ @InterfaceAudience.Private public class GetGroups extends GetGroupsBase { - private static final Log LOG = LogFactory.getLog(GetGroups.class); + private static final Logger LOG = LoggerFactory.getLogger(GetGroups.class); static final String USAGE = "Usage: hdfs groups [username ...]"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java index d2634b38fe..76f70a1037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer; @@ -40,8 +40,8 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader { private final boolean fixTxIds; private final boolean recoveryMode; private long nextTxId; - public static final Log LOG = - LogFactory.getLog(OfflineEditsBinaryLoader.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(OfflineEditsBinaryLoader.class.getName()); /** * Constructor @@ -102,7 +102,7 @@ public void loadEdits() throws IOException { } visitor.close(null); } finally { - IOUtils.cleanup(LOG, inputStream); + IOUtils.cleanupWithLogger(LOG, inputStream); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java index 2c504608eb..ad51b72259 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java @@ -30,8 +30,8 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.QueryStringDecoder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.util.StringUtils; @@ -58,7 +58,8 @@ * Implement the read-only WebHDFS API for fsimage. */ class FSImageHandler extends SimpleChannelInboundHandler { - public static final Log LOG = LogFactory.getLog(FSImageHandler.class); + public static final Logger LOG = + LoggerFactory.getLogger(FSImageHandler.class); private final FSImageLoader image; private final ChannelGroup activeChannels; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index 61494f4bcc..380f35aadd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -34,8 +34,8 @@ import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclEntry; @@ -65,7 +65,8 @@ * file status of the namespace of the fsimage. */ class FSImageLoader { - public static final Log LOG = LogFactory.getLog(FSImageHandler.class); + public static final Logger LOG = + LoggerFactory.getLogger(FSImageHandler.class); private final String[] stringTable; // byte representation of inodes, sorted by id diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java index 7d0a4924f1..a97bb72fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java @@ -54,8 +54,8 @@ import com.google.protobuf.ByteString; import com.google.protobuf.TextFormat; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.AclEntry; @@ -96,8 +96,8 @@ @InterfaceAudience.Private @InterfaceStability.Unstable class OfflineImageReconstructor { - public static final Log LOG = - LogFactory.getLog(OfflineImageReconstructor.class); + public static final Logger LOG = + LoggerFactory.getLogger(OfflineImageReconstructor.class); /** * The output stream. @@ -1839,7 +1839,7 @@ public static void run(String inputPath, String outputPath) new OfflineImageReconstructor(out, reader); oir.processXml(); } finally { - IOUtils.cleanup(LOG, reader, fis, out, fout); + IOUtils.cleanupWithLogger(LOG, reader, fis, out, fout); } // Write the md5 file MD5FileUtils.saveMD5File(new File(outputPath), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java index 46a9c75c79..dd50ab003c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java @@ -30,8 +30,8 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; @@ -44,7 +44,8 @@ */ @InterfaceAudience.Private public class OfflineImageViewer { - public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class); + public static final Logger LOG = + LoggerFactory.getLogger(OfflineImageViewer.class); private final static String usage = "Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" @@ -145,7 +146,7 @@ public void go() throws IOException { LOG.error("Failed to load image file."); } } - IOUtils.cleanup(LOG, in, tracker); + IOUtils.cleanupWithLogger(LOG, in, tracker); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java index e4afa99461..34a85a6dce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java @@ -28,8 +28,8 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; @@ -44,7 +44,8 @@ public class OfflineImageViewerPB { private static final String HELP_OPT = "-h"; private static final String HELP_LONGOPT = "--help"; - public static final Log LOG = LogFactory.getLog(OfflineImageViewerPB.class); + public static final Logger LOG = + LoggerFactory.getLogger(OfflineImageViewerPB.class); private final static String usage = "Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" + "Offline Image Viewer\n" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java index a50e828e4a..767ecd809e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java @@ -32,8 +32,8 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.codec.string.StringEncoder; import io.netty.util.concurrent.GlobalEventExecutor; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.security.UserGroupInformation; @@ -47,7 +47,8 @@ * namespace. */ public class WebImageViewer implements Closeable { - public static final Log LOG = LogFactory.getLog(WebImageViewer.class); + public static final Logger LOG = + LoggerFactory.getLogger(WebImageViewer.class); private Channel channel; private InetSocketAddress address; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java index a89b8cb07b..33f976a7f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java @@ -23,8 +23,8 @@ import java.io.FilterOutputStream; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIOException; @@ -46,7 +46,7 @@ public class AtomicFileOutputStream extends FilterOutputStream { private static final String TMP_EXTENSION = ".tmp"; - private final static Log LOG = LogFactory.getLog( + private final static Logger LOG = LoggerFactory.getLogger( AtomicFileOutputStream.class); private final File origFile; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java index c7bf9a67f9..a190c9ef40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.NoSuchElementException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A low memory linked hash set implementation, which uses an array for storing @@ -66,7 +66,8 @@ public String toString() { protected static final int MINIMUM_CAPACITY = 16; static final int MAXIMUM_CAPACITY = 1 << 30; - private static final Log LOG = LogFactory.getLog(LightWeightHashSet.class); + private static final Logger LOG = + LoggerFactory.getLogger(LightWeightHashSet.class); /** * An internal array of entries, which are the rows of the hash table. The diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java index d87ffbf315..95dcf7181c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java @@ -29,8 +29,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; @@ -42,7 +42,7 @@ * that the Unix "md5sum" utility writes. */ public abstract class MD5FileUtils { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( MD5FileUtils.class); public static final String MD5_SUFFIX = ".md5"; @@ -84,7 +84,7 @@ private static Matcher readStoredMd5(File md5File) throws IOException { } catch (IOException ioe) { throw new IOException("Error reading md5 file at " + md5File, ioe); } finally { - IOUtils.cleanup(LOG, reader); + IOUtils.cleanupWithLogger(LOG, reader); } Matcher matcher = LINE_REGEX.matcher(md5Line); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java index 67bb2bb9a4..777dd87cfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java @@ -23,8 +23,8 @@ import java.io.IOException; import java.io.InputStreamReader; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; @@ -37,7 +37,7 @@ */ @InterfaceAudience.Private public class PersistentLongFile { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( PersistentLongFile.class); private final File file; @@ -99,7 +99,7 @@ public static long readFile(File file, long defaultVal) throws IOException { br.close(); br = null; } finally { - IOUtils.cleanup(LOG, br); + IOUtils.cleanupWithLogger(LOG, br); } } return val; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java index 6f1257c5e7..2e0a17efa4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java @@ -27,8 +27,8 @@ import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; @@ -42,7 +42,8 @@ /** Handle exceptions. */ @Provider public class ExceptionHandler implements ExceptionMapper { - public static final Log LOG = LogFactory.getLog(ExceptionHandler.class); + public static final Logger LOG = + LoggerFactory.getLogger(ExceptionHandler.class); private static Exception toCause(Exception e) { final Throwable t = e.getCause(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java index 28321cb613..2f8dfa5b36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java @@ -20,8 +20,8 @@ import static org.junit.Assert.assertTrue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CLICommandCacheAdmin; import org.apache.hadoop.cli.util.CLICommandTypes; @@ -44,7 +44,8 @@ public class TestCacheAdminCLI extends CLITestHelper { - public static final Log LOG = LogFactory.getLog(TestCacheAdminCLI.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestCacheAdminCLI.class); protected MiniDFSCluster dfsCluster = null; protected FileSystem fs = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index 417d31ba52..90b4f11a66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -37,8 +37,8 @@ import org.apache.commons.collections.map.LinkedMap; import org.apache.commons.lang3.SystemUtils; import org.apache.commons.lang3.mutable.MutableBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.ClientContext; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -78,8 +78,8 @@ * This class tests if EnhancedByteBufferAccess works correctly. */ public class TestEnhancedByteBufferAccess { - private static final Log LOG = - LogFactory.getLog(TestEnhancedByteBufferAccess.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestEnhancedByteBufferAccess.class.getName()); static private TemporarySocketDirectory sockDir; @@ -575,7 +575,7 @@ public void testIndirectFallbackReads() throws Exception { fis = new FileInputStream(testPath); testFallbackImpl(fis, original); } finally { - IOUtils.cleanup(LOG, fos, fis); + IOUtils.cleanupWithLogger(LOG, fos, fis); new File(testPath).delete(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java index 34164f42d8..4ecca5e55a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java @@ -20,12 +20,13 @@ import org.junit.Test; import static org.junit.Assert.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.NativeCodeLoader; public class TestHdfsNativeCodeLoader { - static final Log LOG = LogFactory.getLog(TestHdfsNativeCodeLoader.class); + static final Logger LOG = + LoggerFactory.getLogger(TestHdfsNativeCodeLoader.class); private static boolean requireTestJni() { String rtj = System.getProperty("require.test.libhadoop"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java index 655d4534cd..ef4c04d0f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.fs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -33,8 +33,8 @@ import org.mockito.Mockito; public class TestUnbuffer { - private static final Log LOG = - LogFactory.getLog(TestUnbuffer.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestUnbuffer.class.getName()); @Rule public ExpectedException exception = ExpectedException.none(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java index 5d96b7bb76..1bc6b2c351 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java @@ -29,8 +29,8 @@ import java.util.Random; import com.google.common.collect.Lists; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; @@ -53,7 +53,8 @@ * This class provide utilities for testing of the admin operations of nodes. */ public class AdminStatesBaseTest { - public static final Log LOG = LogFactory.getLog(AdminStatesBaseTest.class); + public static final Logger LOG = + LoggerFactory.getLogger(AdminStatesBaseTest.class); static final long seed = 0xDEADBEEFL; static final int blockSize = 8192; static final int fileSize = 16384; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java index 268bdf9df7..f7d90d2b19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java @@ -26,8 +26,8 @@ import java.util.Arrays; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -44,7 +44,7 @@ public class AppendTestUtil { */ static final Long RANDOM_NUMBER_GENERATOR_SEED = null; - static final Log LOG = LogFactory.getLog(AppendTestUtil.class); + static final Logger LOG = LoggerFactory.getLogger(AppendTestUtil.class); private static final Random SEED = new Random(); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java index 7f1792fdd0..515a0a0719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -24,9 +24,8 @@ import java.io.InputStream; import java.io.OutputStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.ChecksumFileSystem; @@ -37,7 +36,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; +import org.slf4j.event.Level; /** * This class benchmarks the performance of the local file system, raw local @@ -172,10 +171,8 @@ private static void printUsage() { @Override public int run(String[] args) throws IOException { // silence the minidfs cluster - Log hadoopLog = LogFactory.getLog("org"); - if (hadoopLog instanceof Log4JLogger) { - GenericTestUtils.setLogLevel(hadoopLog, Level.WARN); - } + Logger hadoopLog = LoggerFactory.getLogger("org"); + GenericTestUtils.setLogLevel(hadoopLog, Level.WARN); int reps = 1; if (args.length == 1) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index fb4616a98a..3e22b565dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -82,8 +82,8 @@ import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.BlockLocation; @@ -205,7 +205,7 @@ /** Utilities for HDFS tests */ public class DFSTestUtil { - private static final Log LOG = LogFactory.getLog(DFSTestUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(DFSTestUtil.class); private static final Random gen = new Random(); private static final String[] dirNames = { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index 9e7b598b9b..a8f7378ca0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; @@ -41,7 +41,8 @@ * */ public class FileAppendTest4 { - public static final Log LOG = LogFactory.getLog(FileAppendTest4.class); + public static final Logger LOG = + LoggerFactory.getLogger(FileAppendTest4.class); private static final int BYTES_PER_CHECKSUM = 4; private static final int PACKET_SIZE = BYTES_PER_CHECKSUM; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index a2e59515d8..11265b81eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -67,8 +67,8 @@ import com.google.common.base.Supplier; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Multimap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -141,7 +141,8 @@ public class MiniDFSCluster implements AutoCloseable { private static final String NAMESERVICE_ID_PREFIX = "nameserviceId"; - private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class); + private static final Logger LOG = + LoggerFactory.getLogger(MiniDFSCluster.class); /** System property to set the data dir: {@value} */ public static final String PROP_TEST_BUILD_DATA = GenericTestUtils.SYSPROP_TEST_DATA_DIR; @@ -2004,7 +2005,7 @@ public void shutdown(boolean deleteDfsDir, boolean closeFileSystem) { LOG.info("Shutting down the Mini HDFS Cluster"); if (checkExitOnShutdown) { if (ExitUtil.terminateCalled()) { - LOG.fatal("Test resulted in an unexpected exit", + LOG.error("Test resulted in an unexpected exit", ExitUtil.getFirstExitException()); ExitUtil.resetFirstExitException(); throw new AssertionError("Test resulted in an unexpected exit"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java index 5c011e31ba..8b4e9e5ef7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java @@ -21,8 +21,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -39,7 +39,8 @@ public class MiniDFSClusterWithNodeGroup extends MiniDFSCluster { private static String[] NODE_GROUPS = null; - private static final Log LOG = LogFactory.getLog(MiniDFSClusterWithNodeGroup.class); + private static final Logger LOG = + LoggerFactory.getLogger(MiniDFSClusterWithNodeGroup.class); public MiniDFSClusterWithNodeGroup(Builder builder) throws IOException { super(builder); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java index 7057010663..e0e2c8b402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; @@ -50,8 +48,7 @@ abstract public class ReadStripedFileWithDecodingHelper { LoggerFactory.getLogger(ReadStripedFileWithDecodingHelper.class); static { - ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)) - .getLogger().setLevel(org.apache.log4j.Level.ALL); + GenericTestUtils.setLogLevel(BlockPlacementPolicy.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockManager.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockManager.blockLog, Level.DEBUG); GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.DEBUG); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java index 301f6a7d73..e7d8b38aed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java @@ -21,8 +21,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -39,7 +39,8 @@ * Test abandoning blocks, which clients do on pipeline creation failure. */ public class TestAbandonBlock { - public static final Log LOG = LogFactory.getLog(TestAbandonBlock.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestAbandonBlock.class); private static final Configuration CONF = new HdfsConfiguration(); static final String FILE_NAME_PREFIX diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java index 3ff705588a..105836e1b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java @@ -28,8 +28,8 @@ import java.net.URI; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; @@ -64,8 +64,8 @@ * values before interpreting them.) */ public class TestAclsEndToEnd { - private static final Log LOG = - LogFactory.getLog(TestAclsEndToEnd.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestAclsEndToEnd.class.getName()); private static final String TEXT = "The blue zone is for loading and unloading only. " + "Please park in the red zone."; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index e56f8c7401..def2ab788f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -34,8 +34,8 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -62,7 +62,8 @@ public class TestAppendSnapshotTruncate { static { GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL); } - private static final Log LOG = LogFactory.getLog(TestAppendSnapshotTruncate.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestAppendSnapshotTruncate.class); private static final int BLOCK_SIZE = 1024; private static final int DATANODE_NUM = 4; private static final short REPLICATION = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 6bbe3a10bc..1310f45fba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -27,8 +27,8 @@ import java.util.concurrent.TimeoutException; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.tools.DFSAdmin; @@ -43,7 +43,8 @@ public class TestBalancerBandwidth { final static private Configuration conf = new Configuration(); final static private int NUM_OF_DATANODES = 2; final static private int DEFAULT_BANDWIDTH = 1024*1024; - public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestBalancerBandwidth.class); private static final Charset UTF8 = Charset.forName("UTF-8"); private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final PrintStream outStream = new PrintStream(outContent); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java index 7287b5c8be..e664f99163 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java @@ -22,8 +22,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -36,7 +36,8 @@ import org.junit.Test; public class TestBlockMissingException { - final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing"); + final static Logger LOG = + LoggerFactory.getLogger("org.apache.hadoop.hdfs.TestBlockMissing"); final static int NUM_DATANODES = 3; Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index ce2e79b73e..935a639920 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -24,8 +24,8 @@ import java.util.Random; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -55,8 +55,8 @@ * replica. */ public class TestClientReportBadBlock { - private static final Log LOG = LogFactory - .getLog(TestClientReportBadBlock.class); + private static final Logger LOG = LoggerFactory + .getLogger(TestClientReportBadBlock.class); static final long BLOCK_SIZE = 64 * 1024; private static int buffersize; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index 3e0ad6daf2..85a4d19539 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; @@ -35,7 +35,7 @@ * mini-cluster. */ public class TestConnCache { - static final Log LOG = LogFactory.getLog(TestConnCache.class); + static final Logger LOG = LoggerFactory.getLogger(TestConnCache.class); static final int BLOCK_SIZE = 4096; static final int FILE_SIZE = 3 * BLOCK_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index f9d04601e5..64103b4529 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -36,8 +36,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 880e3dfc42..14d0ee0060 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -50,8 +50,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.ChecksumException; @@ -113,8 +113,8 @@ public class TestDFSClientRetries { private static final String ADDRESS = "0.0.0.0"; final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; - public static final Log LOG = - LogFactory.getLog(TestDFSClientRetries.class.getName()); + public static final Logger LOG = + LoggerFactory.getLogger(TestDFSClientRetries.class.getName()); static private Configuration conf = null; private static class TestServer extends Server { @@ -523,7 +523,7 @@ public Boolean answer(InvocationOnMock invocation) throws Throwable { stm.close(); stm = null; } finally { - IOUtils.cleanup(LOG, stm); + IOUtils.cleanupWithLogger(LOG, stm); } // Make sure the mock was actually properly injected. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java index d0df9fecf5..bf9e4a0811 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java @@ -25,8 +25,8 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -43,7 +43,7 @@ */ public class TestDFSFinalize { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestDFSFinalize"); private Configuration conf; private int testCounter = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java index 1b462a9a8e..b0b85e75af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileSystem; @@ -48,7 +48,7 @@ public class TestDFSInotifyEventInputStream { private static final int BLOCK_SIZE = 1024; - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDFSInotifyEventInputStream.class); public static EventBatch waitForNextEvents(DFSInotifyEventInputStream eis) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 2705e67e4f..15ce06b69f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -30,8 +30,8 @@ import java.util.Map; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -51,7 +51,8 @@ /** Unit tests for permission */ public class TestDFSPermission { - public static final Log LOG = LogFactory.getLog(TestDFSPermission.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDFSPermission.class); final private static Configuration conf = new HdfsConfiguration(); final private static String GROUP1_NAME = "group1"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 8bc8b0df8e..b2da68ad57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -26,8 +26,8 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -50,7 +50,7 @@ */ public class TestDFSRollback { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestDFSRollback"); private Configuration conf; private int testCounter = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 1d2042ec3e..5266fe409b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -38,8 +38,8 @@ import com.google.common.collect.Lists; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.log4j.Level; import org.junit.Test; import org.apache.hadoop.conf.Configuration; @@ -87,7 +87,7 @@ * This class tests commands from DFSShell. */ public class TestDFSShell { - private static final Log LOG = LogFactory.getLog(TestDFSShell.class); + private static final Logger LOG = LoggerFactory.getLogger(TestDFSShell.class); private static final AtomicInteger counter = new AtomicInteger(); private final int SUCCESS = 0; private final int ERROR = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java index 0c09edafd2..7e7f4aa83e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStartupVersions.java @@ -24,8 +24,8 @@ import java.io.File; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; @@ -41,7 +41,7 @@ */ public class TestDFSStartupVersions { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestDFSStartupVersions"); private MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java index cd51631ba8..60839dc17b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java @@ -27,8 +27,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; @@ -44,7 +44,7 @@ */ public class TestDFSStorageStateRecovery { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery"); private Configuration conf = null; private int testCounter = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 48ecf9ae5e..57341714f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; @@ -62,8 +62,8 @@ public class TestDFSStripedInputStream { - public static final Log LOG = - LogFactory.getLog(TestDFSStripedInputStream.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDFSStripedInputStream.class); private MiniDFSCluster cluster; private Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithRandomECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithRandomECPolicy.java index 568b018457..4843095252 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithRandomECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStreamWithRandomECPolicy.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; /** @@ -28,14 +28,14 @@ public class TestDFSStripedInputStreamWithRandomECPolicy extends TestDFSStripedInputStream { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDFSStripedInputStreamWithRandomECPolicy.class.getName()); private ErasureCodingPolicy ecPolicy; public TestDFSStripedInputStreamWithRandomECPolicy() { ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy(); - LOG.info(ecPolicy); + LOG.info("{}", ecPolicy.toString()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index 4b9e876388..865a736b0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -25,8 +25,8 @@ import java.io.InputStream; import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -46,7 +46,7 @@ import org.junit.rules.Timeout; public class TestDFSStripedOutputStream { - public static final Log LOG = LogFactory.getLog( + public static final Logger LOG = LoggerFactory.getLogger( TestDFSStripedOutputStream.class); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java index f6711498cc..cfa7ad5d0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.io.erasurecode.ECSchema; /** @@ -30,12 +30,12 @@ public class TestDFSStripedOutputStreamWithFailureWithRandomECPolicy extends private final ECSchema schema; - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDFSStripedOutputStreamWithRandomECPolicy.class.getName()); public TestDFSStripedOutputStreamWithFailureWithRandomECPolicy() { schema = StripedFileTestUtil.getRandomNonDefaultECPolicy().getSchema(); - LOG.info(schema); + LOG.info("{}", schema.toString()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithRandomECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithRandomECPolicy.java index 9a783cdabd..eddfda5fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithRandomECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithRandomECPolicy.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; /** @@ -28,14 +28,14 @@ public class TestDFSStripedOutputStreamWithRandomECPolicy extends TestDFSStripedOutputStream { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDFSStripedOutputStreamWithRandomECPolicy.class.getName()); private ErasureCodingPolicy ecPolicy; public TestDFSStripedOutputStreamWithRandomECPolicy() { ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy(); - LOG.info(ecPolicy); + LOG.info("{}", ecPolicy.toString()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 0d9f50258f..1c33cc4b5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -31,8 +31,8 @@ import java.io.IOException; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -62,7 +62,8 @@ public class TestDFSUpgrade { // TODO: Avoid hard-coding expected_txid. The test should be more robust. private static final int EXPECTED_TXID = 61; - private static final Log LOG = LogFactory.getLog(TestDFSUpgrade.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestDFSUpgrade.class.getName()); private Configuration conf; private int testCounter = 0; private MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index e42e08cf77..5469ebbb75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -29,8 +29,7 @@ import java.util.TreeMap; import java.util.zip.CRC32; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; @@ -65,8 +64,8 @@ */ public class TestDFSUpgradeFromImage { - private static final Log LOG = LogFactory - .getLog(TestDFSUpgradeFromImage.class); + private static final org.slf4j.Logger LOG = LoggerFactory + .getLogger(TestDFSUpgradeFromImage.class); private static final File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java index 3351b68373..c57ef941f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataStream.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.util.Random; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -48,8 +48,8 @@ public static void setup() throws IOException { @Test(timeout = 60000) public void testDfsClient() throws IOException, InterruptedException { - LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory - .getLog(DataStreamer.class)); + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory + .getLogger(DataStreamer.class)); byte[] toWrite = new byte[PACKET_SIZE]; new Random(1).nextBytes(toWrite); final Path path = new Path("/file1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 7a2ac1ba3f..b9da5f446f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -33,8 +33,8 @@ import java.nio.ByteBuffer; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; @@ -73,7 +73,7 @@ */ public class TestDataTransferProtocol { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestDataTransferProtocol"); private static final DataChecksum DEFAULT_CHECKSUM = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 6421e8b42a..37042dbb17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -56,7 +56,8 @@ */ public class TestDatanodeRegistration { - public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDatanodeRegistration.class); private static class MonitorDNS extends SecurityManager { int lookups = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index fea377f2a4..b4d6fc9950 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -26,8 +26,8 @@ import java.util.Comparator; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -49,7 +49,7 @@ * This test ensures the all types of data node report work correctly. */ public class TestDatanodeReport { - static final Log LOG = LogFactory.getLog(TestDatanodeReport.class); + static final Logger LOG = LoggerFactory.getLogger(TestDatanodeReport.class); final static private Configuration conf = new HdfsConfiguration(); final static private int NUM_OF_DATANODES = 4; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java index c9d831a29a..51a28d294f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java @@ -19,8 +19,8 @@ import static org.junit.Assert.assertEquals; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -32,7 +32,7 @@ * mini-cluster. */ public class TestDisableConnCache { - static final Log LOG = LogFactory.getLog(TestDisableConnCache.class); + static final Logger LOG = LoggerFactory.getLogger(TestDisableConnCache.class); static final int BLOCK_SIZE = 4096; static final int FILE_SIZE = 3 * BLOCK_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index 27a5b77bcf..59230edeb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -33,8 +33,8 @@ import java.util.concurrent.TimeoutException; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -84,7 +84,8 @@ public static Collection data() { return params; } - private static final Log LOG = LogFactory.getLog(TestEncryptedTransfer.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestEncryptedTransfer.class); private static final String PLAIN_TEXT = "this is very secret plain text"; private static final Path TEST_PATH = new Path("/non-encrypted-file"); @@ -167,9 +168,9 @@ private void testEncryptedRead(String algorithm, String cipherSuite, FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster(); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(SaslDataTransferServer.class)); + LoggerFactory.getLogger(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(DataTransferSaslUtil.class)); + LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); assertEquals(checksum, fs.getFileChecksum(TEST_PATH)); @@ -238,7 +239,7 @@ public void testClientThatDoesNotSupportEncryption() throws IOException { DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(DataNode.class)); + LoggerFactory.getLogger(DataNode.class)); try { assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); if (resolverClazz != null && @@ -458,9 +459,9 @@ private void testEncryptedWrite(int numDns) throws IOException { fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(SaslDataTransferServer.class)); + LoggerFactory.getLogger(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(DataTransferSaslUtil.class)); + LoggerFactory.getLogger(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java index 8acf4bf286..85e25dd687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestExternalBlockReader.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; import com.google.common.primitives.Ints; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; @@ -39,8 +39,8 @@ import java.util.UUID; public class TestExternalBlockReader { - private static final Log LOG = - LogFactory.getLog(TestExternalBlockReader.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestExternalBlockReader.class); private static long SEED = 1234; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 12d92538a9..c26c648fd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -50,7 +50,8 @@ * a free port and start on it. */ public class TestHDFSServerPorts { - public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestHDFSServerPorts.class); // reset default 0.0.0.0 addresses in order to avoid IPv6 problem static final String THIS_HOST = getFullHostName() + ":0"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java index b81cdb1320..fb28726b63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java @@ -24,8 +24,8 @@ import java.io.IOException; import java.util.UUID; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -46,7 +46,7 @@ */ public class TestHDFSTrash { - public static final Log LOG = LogFactory.getLog(TestHDFSTrash.class); + public static final Logger LOG = LoggerFactory.getLogger(TestHDFSTrash.class); private static MiniDFSCluster cluster = null; private static FileSystem fs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index e49cf5838c..a8affa2627 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -26,8 +26,8 @@ import java.util.Map; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; @@ -50,7 +50,7 @@ public class TestInjectionForSimulatedStorage { private final int numBlocks = 4; private final int filesize = blockSize*numBlocks; private final int numDataNodes = 4; - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index ec7a07753c..137571cac4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -45,7 +45,8 @@ public class TestLargeBlock { GenericTestUtils.setLogLevel(TestLargeBlock.LOG, Level.ALL); } */ - private static final Log LOG = LogFactory.getLog(TestLargeBlock.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestLargeBlock.class); // should we verify the data read back from the file? (slow) static final boolean verifyData = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index fa74fadd89..381fe7148b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -31,8 +31,8 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; @@ -69,7 +69,7 @@ static int leaseCount(MiniDFSCluster cluster) { static final String dirString = "/test/lease"; final Path dir = new Path(dirString); - static final Log LOG = LogFactory.getLog(TestLease.class); + static final Logger LOG = LoggerFactory.getLogger(TestLease.class); final Configuration conf = new HdfsConfiguration(); @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 940e13e651..d65fed2b93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -30,8 +30,8 @@ import java.util.Map; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -62,7 +62,8 @@ public class TestLeaseRecovery2 { - public static final Log LOG = LogFactory.getLog(TestLeaseRecovery2.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestLeaseRecovery2.class); { GenericTestUtils.setLogLevel(DataNode.LOG, Level.TRACE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java index ca2fe92ea2..f0dd5a0bc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataInputStream; @@ -44,8 +44,8 @@ */ public class TestMissingBlocksAlert { - private static final Log LOG = - LogFactory.getLog(TestMissingBlocksAlert.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestMissingBlocksAlert.class); @Test public void testMissingBlocksAlert() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index d54164fc3c..a3f4dbc7b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -25,8 +25,8 @@ import java.nio.ByteBuffer; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -46,7 +46,7 @@ @Ignore public class TestParallelReadUtil { - static final Log LOG = LogFactory.getLog(TestParallelReadUtil.class); + static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class); static BlockReaderTestUtil util = null; static DFSClient dfsClient = null; static final int FILE_SIZE_K = 256; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index 5804d35fef..7125b0e7c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -24,8 +24,8 @@ import java.util.List; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -41,7 +41,7 @@ import org.junit.Test; public class TestPipelines { - public static final Log LOG = LogFactory.getLog(TestPipelines.class); + public static final Logger LOG = LoggerFactory.getLogger(TestPipelines.class); private static final short REPL_FACTOR = 3; private static final int RAND_LIMIT = 2000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index 756adbe0ef..4c864b384a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; @@ -38,8 +38,8 @@ * in the block locations returned by the NameNode). */ public class TestReadStripedFileWithMissingBlocks { - public static final Log LOG = LogFactory - .getLog(TestReadStripedFileWithMissingBlocks.class); + public static final Logger LOG = LoggerFactory + .getLogger(TestReadStripedFileWithMissingBlocks.class); private MiniDFSCluster cluster; private DistributedFileSystem fs; private DFSClient dfsClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 2adddb6156..2abfff7876 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -34,8 +34,8 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -66,7 +66,8 @@ import org.junit.Test; public class TestReconstructStripedFile { - public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestReconstructStripedFile.class); private ErasureCodingPolicy ecPolicy; private int dataBlkNum; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java index 9591cb4347..432a29794c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeFailureReplication.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; @@ -47,8 +47,8 @@ * if a replacement could not be found. */ public class TestReplaceDatanodeFailureReplication { - static final Log LOG = LogFactory - .getLog(TestReplaceDatanodeFailureReplication.class); + static final Logger LOG = LoggerFactory + .getLogger(TestReplaceDatanodeFailureReplication.class); static final String DIR = "/" + TestReplaceDatanodeFailureReplication.class.getSimpleName() + "/"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index aa5c70faa5..2e455f7d34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -23,8 +23,8 @@ import java.util.Arrays; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -45,7 +45,8 @@ * This class tests that data nodes are correctly replaced on failure. */ public class TestReplaceDatanodeOnFailure { - static final Log LOG = LogFactory.getLog(TestReplaceDatanodeOnFailure.class); + static final Logger LOG = + LoggerFactory.getLogger(TestReplaceDatanodeOnFailure.class); static final String DIR = "/" + TestReplaceDatanodeOnFailure.class.getSimpleName() + "/"; static final short REPLICATION = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index b4f0c2b064..7c5a0ced3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -33,8 +33,6 @@ import java.util.List; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java index 0545b040f3..a6b26065d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java @@ -32,8 +32,8 @@ import javax.management.ReflectionException; import javax.management.openmbean.CompositeDataSupport; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -66,7 +66,8 @@ * This class tests rolling upgrade. */ public class TestRollingUpgrade { - private static final Log LOG = LogFactory.getLog(TestRollingUpgrade.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRollingUpgrade.class); public static void runCmd(DFSAdmin dfsadmin, boolean success, String... args) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index f25d28f22c..0fde81ecdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -29,8 +29,8 @@ import java.security.PrivilegedExceptionAction; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -66,7 +66,7 @@ * Tests to verify safe mode correctness. */ public class TestSafeMode { - public static final Log LOG = LogFactory.getLog(TestSafeMode.class); + public static final Logger LOG = LoggerFactory.getLogger(TestSafeMode.class); private static final Path TEST_PATH = new Path("/test"); private static final int BLOCK_SIZE = 1024; private static final String NEWLINE = System.getProperty("line.separator"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index 52cf163e26..aa146945d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; @@ -44,8 +44,8 @@ */ public class TestUnsetAndChangeDirectoryEcPolicy { - public static final Log LOG = - LogFactory.getLog(TestUnsetAndChangeDirectoryEcPolicy.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestUnsetAndChangeDirectoryEcPolicy.class); private MiniDFSCluster cluster; private Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java index 623dafff8b..3a9065a152 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteRead.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; @@ -64,7 +64,8 @@ public class TestWriteRead { private boolean truncateOption = false; private final boolean abortTestOnFailure = true; - static private Log LOG = LogFactory.getLog(TestWriteRead.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestWriteRead.class); @Before public void initJunitModeTest() throws Exception { @@ -95,7 +96,6 @@ public void shutdown() { // Equivalence of @Before for cluster mode testing. private void initClusterModeTest() throws IOException { - LOG = LogFactory.getLog(TestWriteRead.class); LOG.info("initClusterModeTest"); conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 805bcea85b..49f578a823 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; @@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -46,7 +45,8 @@ import java.util.Random; public class TestWriteReadStripedFile { - public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestWriteReadStripedFile.class); private final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID( SystemErasureCodingPolicies.RS_3_2_POLICY_ID); @@ -63,11 +63,10 @@ public class TestWriteReadStripedFile { private Configuration conf = new HdfsConfiguration(); static { - GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.ALL); - GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL); - GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL); - ((Log4JLogger)LogFactory.getLog(BlockPlacementPolicy.class)) - .getLogger().setLevel(Level.ALL); + GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(BlockPlacementPolicy.LOG, Level.TRACE); } @Rule diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java index c859b71097..76893615f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -34,8 +34,8 @@ import java.util.concurrent.atomic.AtomicInteger; public class TestWriteStripedFileWithFailure { - public static final Log LOG = LogFactory - .getLog(TestWriteStripedFileWithFailure.class); + public static final Logger LOG = LoggerFactory + .getLogger(TestWriteStripedFileWithFailure.class); private MiniDFSCluster cluster; private FileSystem fs; private Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java index 26d96b2309..42b1928e46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.net; import com.google.common.collect.Sets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -45,8 +45,8 @@ * DFSNetworkTopology. */ public class TestDFSNetworkTopology { - private static final Log LOG = - LogFactory.getLog(TestDFSNetworkTopology.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDFSNetworkTopology.class); private final static DFSNetworkTopology CLUSTER = DFSNetworkTopology.getInstance(new Configuration()); private DatanodeDescriptor[] dataNodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java index e349da2294..3546c89938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java @@ -18,15 +18,16 @@ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.junit.Test; import static org.junit.Assert.fail; public class TestLocatedBlock { - public static final Log LOG = LogFactory.getLog(TestLocatedBlock.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestLocatedBlock.class); @Test(timeout = 10000) public void testAddCachedLocWhenEmpty() { @@ -43,4 +44,4 @@ public void testAddCachedLocWhenEmpty() { LOG.info("Expected exception:", e); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index efbc1d846c..d6612c1c70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -34,7 +34,7 @@ import java.net.SocketTimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; @@ -139,7 +139,7 @@ public void testServerSaslNoClientSasl() throws Exception { clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(DataNode.class)); + LoggerFactory.getLogger(DataNode.class)); try { doTest(clientConf); Assert.fail("Should fail if SASL data transfer protection is not " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index f936d75f06..b81b710c00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -29,8 +29,8 @@ import java.util.concurrent.TimeoutException; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -92,7 +92,8 @@ private JNInfo(JournalNode node) { } } - private static final Log LOG = LogFactory.getLog(MiniJournalCluster.class); + private static final Logger LOG = + LoggerFactory.getLogger(MiniJournalCluster.class); private final File baseDir; private final JNInfo[] nodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java index f1f74dcb14..6a68bd4331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.qjournal; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -38,7 +38,8 @@ public class MiniQJMHACluster { private MiniDFSCluster cluster; private MiniJournalCluster journalCluster; private final Configuration conf; - private static final Log LOG = LogFactory.getLog(MiniQJMHACluster.class); + private static final Logger LOG = + LoggerFactory.getLogger(MiniQJMHACluster.class); public static final String NAMESERVICE = "ns1"; private static final Random RANDOM = new Random(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java index 18adc4ecff..8e8bb22229 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.qjournal; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import static org.junit.Assert.*; import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java index 0fc142929f..a8099cd46c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java @@ -24,8 +24,8 @@ import java.net.URI; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger; @@ -41,7 +41,8 @@ public class TestEpochsAreUnique { - private static final Log LOG = LogFactory.getLog(TestEpochsAreUnique.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestEpochsAreUnique.class); private static final String JID = "testEpochsAreUnique-jid"; private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index 40f213e707..946358c7a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -37,8 +37,8 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; @@ -67,7 +67,7 @@ public class TestQJMWithFaults { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestQJMWithFaults.class); private static final String RAND_SEED_PROPERTY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index 00bec22564..f7c3a27404 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -40,8 +40,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; @@ -72,7 +72,7 @@ * For true unit tests, see {@link TestQuorumJournalManagerUnit}. */ public class TestQuorumJournalManager { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestQuorumJournalManager.class); private MiniJournalCluster cluster; @@ -108,7 +108,7 @@ public void setup() throws Exception { @After public void shutdown() throws IOException, InterruptedException, TimeoutException { - IOUtils.cleanup(LOG, toClose.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, toClose.toArray(new Closeable[0])); // Should not leak clients between tests -- this can cause flaky tests. // (See HDFS-4643) @@ -172,7 +172,7 @@ public void testReaderWhileAnotherWrites() throws Exception { verifyEdits(streams, 1, 3); assertNull(stream.readOp()); } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); streams.clear(); } @@ -187,7 +187,7 @@ public void testReaderWhileAnotherWrites() throws Exception { assertEquals(3, stream.getLastTxId()); verifyEdits(streams, 1, 3); } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); streams.clear(); } @@ -205,7 +205,7 @@ public void testReaderWhileAnotherWrites() throws Exception { verifyEdits(streams, 1, 6); } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); streams.clear(); } } @@ -234,7 +234,7 @@ public void testOneJNMissingSegments() throws Exception { readerQjm.selectInputStreams(streams, 1, false); verifyEdits(streams, 1, 9); } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); readerQjm.close(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java index 9e1e3bb9dc..ebd1b15b4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -56,7 +56,7 @@ */ public class TestQuorumJournalManagerUnit { static { - GenericTestUtils.setLogLevel(QuorumJournalManager.LOG, Level.ALL); + GenericTestUtils.setLogLevel(QuorumJournalManager.LOG, Level.TRACE); } private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java index 79f3598754..9d5af1cfd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java @@ -41,8 +41,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import static org.junit.Assert.assertTrue; @@ -57,7 +57,7 @@ */ public class TestJournalNodeRespectsBindHostKeys { - public static final Log LOG = LogFactory.getLog( + public static final Logger LOG = LoggerFactory.getLogger( TestJournalNodeRespectsBindHostKeys.class); private static final String WILDCARD_ADDRESS = "0.0.0.0"; private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java index 6112b6a987..e50c397b9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java @@ -28,8 +28,8 @@ import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -53,7 +53,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -63,7 +63,8 @@ public class TestDelegationToken { private MiniDFSCluster cluster; private DelegationTokenSecretManager dtSecretManager; private Configuration config; - private static final Log LOG = LogFactory.getLog(TestDelegationToken.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDelegationToken.class); @Before public void setUp() throws Exception { @@ -170,7 +171,7 @@ public void testAddDelegationTokensDFSApi() throws Exception { @Test public void testDelegationTokenWebHdfsApi() throws Exception { - GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL); + GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE); final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); //get file system as JobTracker diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java index 9718bc6fff..d79ec61470 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java @@ -29,8 +29,8 @@ import java.util.ArrayList; import java.util.Enumeration; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -67,7 +67,8 @@ public class TestDelegationTokenForProxyUser { private static UserGroupInformation ugi; private static UserGroupInformation proxyUgi; - private static final Log LOG = LogFactory.getLog(TestDoAsEffectiveUser.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDoAsEffectiveUser.class); private static void configureSuperUserIPAddresses(Configuration conf, String superUserShortName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index aaddb3654a..a8f424005f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -39,8 +39,8 @@ import java.util.GregorianCalendar; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -94,7 +94,8 @@ /** Unit tests for block tokens */ public class TestBlockToken { - public static final Log LOG = LogFactory.getLog(TestBlockToken.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestBlockToken.class); private static final String ADDRESS = "0.0.0.0"; static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 35ebe781ff..e640526729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -74,8 +74,8 @@ import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -126,7 +126,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; @@ -136,10 +136,10 @@ * This class tests if a balancer schedules tasks correctly. */ public class TestBalancer { - private static final Log LOG = LogFactory.getLog(TestBalancer.class); + private static final Logger LOG = LoggerFactory.getLogger(TestBalancer.class); static { - GenericTestUtils.setLogLevel(Balancer.LOG, Level.ALL); + GenericTestUtils.setLogLevel(Balancer.LOG, Level.TRACE); GenericTestUtils.setLogLevel(Dispatcher.LOG, Level.DEBUG); } @@ -1022,7 +1022,7 @@ private static int runBalancer(Collection namenodes, } } finally { for(NameNodeConnector nnc : connectors) { - IOUtils.cleanup(LOG, nnc); + IOUtils.cleanupWithLogger(LOG, nnc); } } return ExitStatus.SUCCESS.getExitCode(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index c8929d9cf0..cafde33394 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -30,7 +30,7 @@ import java.util.Set; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Test; @@ -58,10 +58,10 @@ * Test balancer with multiple NameNodes */ public class TestBalancerWithMultipleNameNodes { - static final Log LOG = Balancer.LOG; + static final Logger LOG = Balancer.LOG; { - GenericTestUtils.setLogLevel(LOG, Level.ALL); - DFSTestUtil.setNameNodeLogLevel(Level.ALL); + GenericTestUtils.setLogLevel(LOG, Level.TRACE); + DFSTestUtil.setNameNodeLogLevel(org.apache.log4j.Level.TRACE); } @@ -352,7 +352,7 @@ private static void sleep(long ms) { try { Thread.sleep(ms); } catch(InterruptedException e) { - LOG.error(e); + LOG.error("{}", e); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index a6732c7c97..97687619b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -27,8 +27,8 @@ import java.util.Set; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; @@ -54,7 +54,7 @@ * This class tests if a balancer schedules tasks correctly. */ public class TestBalancerWithNodeGroup { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup"); final private static long CAPACITY = 5000L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java index 99986e6ac8..c2a5a097ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Set; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -158,4 +157,4 @@ DatanodeStorageInfo[] chooseTarget(int numOfReplicas, chosenNodes, false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java index 2dfa9003b5..fa0dd70a7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdfs.server.namenode.INodeId.INVALID_INODE_ID; import static org.hamcrest.core.Is.is; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; @@ -38,8 +38,8 @@ public class TestBlockInfo { - private static final Log LOG = LogFactory - .getLog("org.apache.hadoop.hdfs.TestBlockInfo"); + private static final Logger LOG = LoggerFactory + .getLogger("org.apache.hadoop.hdfs.TestBlockInfo"); @Test public void testIsDeleted() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 58ca2e3d59..0097da80c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -21,8 +21,7 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.LinkedListMultimap; import com.google.common.collect.Lists; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; @@ -75,8 +74,7 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -140,7 +138,8 @@ public class TestBlockManager { */ private static final int NUM_TEST_ITERS = 30; private static final int BLOCK_SIZE = 64*1024; - private static final Log LOG = LogFactory.getLog(TestBlockManager.class); + private static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(TestBlockManager.class); private FSNamesystem fsn; private BlockManager bm; @@ -1156,7 +1155,8 @@ public void run() { // spam the block manager with IBRs to verify queuing is occurring. @Test public void testAsyncIBR() throws Exception { - Logger.getRootLogger().setLevel(Level.WARN); + GenericTestUtils.setLogLevel( + LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME), Level.WARN); // will create files with many small blocks. final int blkSize = 4*1024; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java index 3cc1b026ad..20a5fca7b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockReportRateLimiting.java @@ -24,8 +24,8 @@ import com.google.common.base.Joiner; import com.google.common.base.Supplier; import com.google.common.util.concurrent.Uninterruptibles; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -44,7 +44,8 @@ import java.util.concurrent.atomic.AtomicReference; public class TestBlockReportRateLimiting { - static final Log LOG = LogFactory.getLog(TestBlockReportRateLimiting.class); + static final Logger LOG = + LoggerFactory.getLogger(TestBlockReportRateLimiting.class); private static void setFailure(AtomicReference failure, String what) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 2bf6045b6f..5e59443f1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -23,8 +23,8 @@ import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; @@ -44,7 +44,8 @@ import org.slf4j.event.Level; public class TestBlocksWithNotEnoughRacks { - public static final Log LOG = LogFactory.getLog(TestBlocksWithNotEnoughRacks.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestBlocksWithNotEnoughRacks.class); static { GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE); GenericTestUtils.setLogLevel(LOG, Level.TRACE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java index 0643346fee..eda60bd9e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java @@ -21,8 +21,8 @@ import java.util.Iterator; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; @@ -31,7 +31,8 @@ import org.junit.Test; public class TestCachedBlocksList { - public static final Log LOG = LogFactory.getLog(TestCachedBlocksList.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestCachedBlocksList.class); @Test(timeout=60000) public void testSingleList() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 299df56b2b..9716d4061c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -29,8 +29,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; @@ -48,7 +48,7 @@ */ public class TestCorruptReplicaInfo { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestCorruptReplicaInfo.class); private final Map replicaMap = new HashMap<>(); private final Map stripedBlocksMap = new HashMap<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index dd6f40adea..aa7f4d2f7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -34,8 +34,8 @@ import java.util.Map.Entry; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileUtil; @@ -75,7 +75,8 @@ public class TestDatanodeManager { - public static final Log LOG = LogFactory.getLog(TestDatanodeManager.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDatanodeManager.class); //The number of times the registration / removal of nodes should happen final int NUM_ITERATIONS = 500; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java index 05b6d3023b..f64767afe2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java @@ -22,8 +22,8 @@ import java.util.ArrayList; import java.util.Collection; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -69,7 +69,8 @@ public class TestNameNodePrunesMissingStorages { - static final Log LOG = LogFactory.getLog(TestNameNodePrunesMissingStorages.class); + static final Logger LOG = + LoggerFactory.getLogger(TestNameNodePrunesMissingStorages.class); private static void runTest(final String testCaseName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java index cb2ee9c850..40f54cba34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java @@ -24,8 +24,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -52,7 +52,8 @@ * and then the under replicated block gets replicated to the datanode. */ public class TestRBWBlockInvalidation { - private static final Log LOG = LogFactory.getLog(TestRBWBlockInvalidation.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRBWBlockInvalidation.class); private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) { @@ -236,7 +237,7 @@ public void testRWRInvalidation() throws Exception { assertEquals("old gs data\n" + "new gs data\n", ret); } } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 8a0f75ed7f..25b2a02883 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -28,8 +28,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -54,8 +54,8 @@ * collision handling. */ public class TestSequentialBlockGroupId { - private static final Log LOG = LogFactory - .getLog("TestSequentialBlockGroupId"); + private static final Logger LOG = LoggerFactory + .getLogger("TestSequentialBlockGroupId"); private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java index bfda3931c9..89fe8a40ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockId.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,7 +43,8 @@ * collision handling. */ public class TestSequentialBlockId { - private static final Log LOG = LogFactory.getLog("TestSequentialBlockId"); + private static final Logger LOG = + LoggerFactory.getLogger("TestSequentialBlockId"); final int BLOCK_SIZE = 1024; final int IO_SIZE = BLOCK_SIZE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java index e4f9697f46..e033f18f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestGetUriFromString.java @@ -23,8 +23,8 @@ import java.io.IOException; import java.net.URI; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.junit.Test; /** @@ -32,7 +32,8 @@ * for Windows and Unix style file paths. */ public class TestGetUriFromString { - private static final Log LOG = LogFactory.getLog(TestGetUriFromString.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestGetUriFromString.class); private static final String RELATIVE_FILE_PATH = "relativeFilePath"; private static final String ABSOLUTE_PATH_UNIX = "/tmp/file1"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 25eca88ae4..04b99e65f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -22,8 +22,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -53,8 +53,8 @@ * dependencies to {@link MiniDFSCluster}. */ public class DataNodeTestUtils { - private static final Log LOG = - LogFactory.getLog(DataNodeTestUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(DataNodeTestUtils.class); private static final String DIR_FAILURE_SUFFIX = ".origin"; public final static String TEST_CLUSTER_ID = "testClusterID"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index 4863ca18f9..26a9f378c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -36,8 +36,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -85,7 +85,7 @@ public class TestBPOfferService { private static final String FAKE_BPID = "fake bpid"; private static final String FAKE_CLUSTERID = "fake cluster"; - protected static final Log LOG = LogFactory.getLog( + protected static final Logger LOG = LoggerFactory.getLogger( TestBPOfferService.class); private static final ExtendedBlock FAKE_BLOCK = new ExtendedBlock(FAKE_BPID, 12345L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java index 38c8a38683..f44075014e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBatchIbr.java @@ -30,8 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -44,7 +44,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.util.Time; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Test; @@ -53,7 +53,7 @@ * and the namenode allows closing a file with COMMITTED blocks. */ public class TestBatchIbr { - public static final Log LOG = LogFactory.getLog(TestBatchIbr.class); + public static final Logger LOG = LoggerFactory.getLogger(TestBatchIbr.class); private static final short NUM_DATANODES = 4; private static final int BLOCK_SIZE = 1024; @@ -66,7 +66,8 @@ public class TestBatchIbr { static { GenericTestUtils.setLogLevel( - LogFactory.getLog(IncrementalBlockReportManager.class), Level.ALL); + LoggerFactory.getLogger(IncrementalBlockReportManager.class), + Level.TRACE); } static HdfsConfiguration newConf(long ibrInterval) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java index 61321e420e..fb65d0e222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java @@ -22,8 +22,8 @@ import java.util.Collections; import java.util.Comparator; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -56,7 +56,8 @@ * the same DataNode. Excess replicas on the same DN should be ignored by the NN. */ public class TestBlockHasMultipleReplicasOnSameDN { - public static final Log LOG = LogFactory.getLog(TestBlockHasMultipleReplicasOnSameDN.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestBlockHasMultipleReplicasOnSameDN.class); private static final short NUM_DATANODES = 2; private static final int BLOCK_SIZE = 1024; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java index 63ae36d936..e061e18242 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolManager.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -40,7 +40,8 @@ public class TestBlockPoolManager { - private final Log LOG = LogFactory.getLog(TestBlockPoolManager.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestBlockPoolManager.class); private final DataNode mockDN = Mockito.mock(DataNode.class); private BlockPoolManager bpm; private final StringBuilder log = new StringBuilder(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 07fd4ae58d..80f9fb6b00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -61,8 +61,8 @@ import java.util.concurrent.atomic.AtomicReference; import com.google.common.collect.Iterators; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; @@ -107,7 +107,7 @@ import org.apache.hadoop.test.GenericTestUtils.SleepAnswer; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -124,7 +124,8 @@ * This tests if sync all replicas in block recovery works correctly. */ public class TestBlockRecovery { - private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestBlockRecovery.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; private DataNode dn; @@ -173,8 +174,8 @@ public class TestBlockRecovery { 11 * cellSize}, {36 * cellSize}}, }; static { - GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL); - GenericTestUtils.setLogLevel(LOG, Level.ALL); + GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(LOG, Level.TRACE); } private final long diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index f75b23dd3f..67b41f8061 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -31,8 +31,8 @@ import java.util.Random; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -63,7 +63,7 @@ * This class tests if block replacement request to data nodes work correctly. */ public class TestBlockReplacement { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( "org.apache.hadoop.hdfs.TestBlockReplacement"); MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java index 753c3a8d6f..438be89be9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBpServiceActorScheduler.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.Scheduler; import org.junit.Rule; import org.junit.Test; @@ -43,7 +43,8 @@ * using a few different values . */ public class TestBpServiceActorScheduler { - protected static final Log LOG = LogFactory.getLog(TestBpServiceActorScheduler.class); + protected static final Logger LOG = + LoggerFactory.getLogger(TestBpServiceActorScheduler.class); @Rule public Timeout timeout = new Timeout(300000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index 7194385090..ce65b6b674 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -51,8 +51,8 @@ * This file tests the erasure coding metrics in DataNode. */ public class TestDataNodeErasureCodingMetrics { - public static final Log LOG = LogFactory. - getLog(TestDataNodeErasureCodingMetrics.class); + public static final Logger LOG = LoggerFactory. + getLogger(TestDataNodeErasureCodingMetrics.class); private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); private final int dataBlocks = ecPolicy.getNumDataUnits(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java index 4afacd9a7d..6879dc0eab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java @@ -21,8 +21,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -39,8 +39,8 @@ * This class tests various cases where faults are injected to DataNode. */ public class TestDataNodeFaultInjector { - private static final Log LOG = LogFactory - .getLog(TestDataNodeFaultInjector.class); + private static final Logger LOG = LoggerFactory + .getLogger(TestDataNodeFaultInjector.class); private static class MetricsDataNodeFaultInjector extends DataNodeFaultInjector { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index 6530720b36..c19c849319 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -74,8 +74,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -98,7 +98,7 @@ import static org.mockito.Mockito.timeout; public class TestDataNodeHotSwapVolumes { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDataNodeHotSwapVolumes.class); private static final int BLOCK_SIZE = 512; private static final int DEFAULT_STORAGES_PER_DATANODE = 2; @@ -757,7 +757,7 @@ private static void assertFileLocksReleased(Collection dirs) try { FsDatasetTestUtil.assertFileLockReleased(dir); } catch (IOException e) { - LOG.warn(e); + LOG.warn("{}", e); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java index 07a26cc60e..df8be22206 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeInitStorage.java @@ -20,8 +20,8 @@ import java.io.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -38,7 +38,8 @@ * FsDataSet initialization. */ public class TestDataNodeInitStorage { - public static final Log LOG = LogFactory.getLog(TestDataNodeInitStorage.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeInitStorage.class); static private class SimulatedFsDatasetVerifier extends SimulatedFSDataset { static class Factory extends FsDatasetSpi.Factory { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index 3546ad880c..c0d3a4944b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -29,8 +29,8 @@ import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -53,7 +53,8 @@ */ public class TestDataNodeMXBean extends SaslDataTransferTestCase { - public static final Log LOG = LogFactory.getLog(TestDataNodeMXBean.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeMXBean.class); @Test public void testDataNodeMXBean() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 7b3dea70e5..98ccd8eea6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -35,8 +35,8 @@ import com.google.common.collect.Lists; import net.jcip.annotations.NotThreadSafe; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -65,7 +65,8 @@ @NotThreadSafe public class TestDataNodeMetrics { - private static final Log LOG = LogFactory.getLog(TestDataNodeMetrics.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeMetrics.class); @Test public void testDataNodeMetrics() throws Exception { @@ -253,7 +254,7 @@ public void testTimeoutMetric() throws Exception { assertTrue("expected to see networkErrors", allDnc.indexOf("networkErrors") >= 0); } finally { - IOUtils.cleanup(LOG, streams.toArray(new Closeable[0])); + IOUtils.cleanupWithLogger(LOG, streams.toArray(new Closeable[0])); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 32fda37016..4c443afc19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -33,8 +33,9 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileUtil; @@ -59,7 +60,8 @@ * Test periodic logging of DataNode metrics. */ public class TestDataNodeMetricsLogger { - static final Log LOG = LogFactory.getLog(TestDataNodeMetricsLogger.class); + static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeMetricsLogger.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 8e1e23673a..4a49477c85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -29,8 +29,8 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -47,8 +47,8 @@ import org.junit.Test; public class TestDataNodeMultipleRegistrations { - private static final Log LOG = - LogFactory.getLog(TestDataNodeMultipleRegistrations.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeMultipleRegistrations.class); Configuration conf; @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 1dfd3c31db..4e6f5699ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -28,8 +28,8 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -48,7 +48,8 @@ */ public class TestDataNodeReconfiguration { - private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestBlockRecovery.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; private final static InetSocketAddress NN_ADDR = new InetSocketAddress( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java index f08606ec5e..04f6ce1f59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java @@ -27,8 +27,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -56,7 +56,8 @@ * finalize and rollback. */ public class TestDataNodeRollingUpgrade { - private static final Log LOG = LogFactory.getLog(TestDataNodeRollingUpgrade.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeRollingUpgrade.class); private static final short REPL_FACTOR = 1; private static final int BLOCK_SIZE = 1024 * 1024; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java index 4d9f11981d..ead7baa72c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTcpNoDelay.java @@ -21,8 +21,8 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; @@ -55,8 +55,8 @@ * Checks that used sockets have TCP_NODELAY set when configured. */ public class TestDataNodeTcpNoDelay { - private static final Log LOG = - LogFactory.getLog(TestDataNodeTcpNoDelay.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeTcpNoDelay.class); private static Configuration baseConf; @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index f7c716d871..326c54c8f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -37,8 +37,8 @@ import javax.management.MBeanServer; import javax.management.ObjectName; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.FileSystem; @@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -68,11 +68,11 @@ */ public class TestDataNodeVolumeFailureReporting { - private static final Log LOG = - LogFactory.getLog(TestDataNodeVolumeFailureReporting.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeVolumeFailureReporting.class); { GenericTestUtils.setLogLevel(TestDataNodeVolumeFailureReporting.LOG, - Level.ALL); + Level.TRACE); } private FileSystem fs; @@ -103,7 +103,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger(LOG, fs); if (cluster != null) { cluster.shutdown(); cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java index aa9a70728d..d2c9c62eb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java @@ -26,8 +26,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -50,8 +50,8 @@ * Test class for DataNodeVolumeMetrics. */ public class TestDataNodeVolumeMetrics { - private static final Log LOG = - LogFactory.getLog(TestDataNodeVolumeMetrics.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDataNodeVolumeMetrics.class); private static final int BLOCK_SIZE = 1024; private static final short REPL = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java index bb1d9eff0e..634968b505 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java @@ -31,8 +31,8 @@ import java.util.concurrent.ThreadLocalRandom; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -55,7 +55,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -68,7 +68,7 @@ * This tests DatanodeProtocol retry policy */ public class TestDatanodeProtocolRetryPolicy { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestDatanodeProtocolRetryPolicy.class); private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory() + "data"; @@ -84,7 +84,7 @@ public class TestDatanodeProtocolRetryPolicy { DFSTestUtil.getLocalDatanodeRegistration(); static { - GenericTestUtils.setLogLevel(LOG, Level.ALL); + GenericTestUtils.setLogLevel(LOG, Level.TRACE); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java index f2a5d089bc..38eb0545c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java @@ -27,8 +27,8 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; @@ -39,7 +39,8 @@ import org.junit.Test; public class TestDatanodeRegister { - public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestDatanodeRegister.class); // Invalid address private static final InetSocketAddress INVALID_ADDR = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 893fe205d5..312bc86bd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -44,8 +44,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.FileSystem; @@ -83,7 +83,8 @@ * between blocks on the disk and block in memory. */ public class TestDirectoryScanner { - private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDirectoryScanner.class); private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_GEN_STAMP = 9999; @@ -138,7 +139,7 @@ private long truncateBlockFile() throws IOException { LOG.info("Truncated block file " + f.getAbsolutePath()); return b.getBlockId(); } finally { - IOUtils.cleanup(LOG, channel, s); + IOUtils.cleanupWithLogger(LOG, channel, s); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java index bf0e3c11bd..d168c2e9c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.*; @@ -50,7 +50,8 @@ * {@link DFSConfigKeys#DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY} */ public class TestDnRespectsBlockReportSplitThreshold { - public static final Log LOG = LogFactory.getLog(TestStorageReport.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestStorageReport.class); private static final int BLOCK_SIZE = 1024; private static final short REPL_FACTOR = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 2dbd5b9bd0..a0c6498331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -38,8 +38,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -88,8 +87,7 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; +import org.slf4j.event.Level; import com.google.common.base.Supplier; import com.google.common.primitives.Ints; @@ -98,7 +96,8 @@ @NotThreadSafe public class TestFsDatasetCache { - private static final Log LOG = LogFactory.getLog(TestFsDatasetCache.class); + private static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(TestFsDatasetCache.class); // Most Linux installs allow a default of 64KB locked memory static final long CACHE_CAPACITY = 64 * 1024; @@ -126,7 +125,8 @@ public class TestFsDatasetCache { private static DataNodeFaultInjector oldInjector; static { - LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG); + GenericTestUtils.setLogLevel( + LoggerFactory.getLogger(FsDatasetCache.class), Level.DEBUG); } @BeforeClass @@ -263,7 +263,7 @@ private static long[] getBlockSizes(HdfsBlockLocation[] locs) blockChannel = blockInputStream.getChannel(); sizes[i] = blockChannel.size(); } finally { - IOUtils.cleanup(LOG, blockChannel, blockInputStream); + IOUtils.cleanupWithLogger(LOG, blockChannel, blockInputStream); } } return sizes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java index 03553fed4f..3a6276c7d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java @@ -25,8 +25,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.Block; @@ -48,7 +48,8 @@ * block additions/deletions. */ public class TestIncrementalBlockReports { - public static final Log LOG = LogFactory.getLog(TestIncrementalBlockReports.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestIncrementalBlockReports.class); private static final short DN_COUNT = 1; private static final long DUMMY_BLOCK_ID = 5678; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java index 78b5cf91c0..c556d0d381 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBrVariations.java @@ -26,8 +26,9 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.UUID; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -45,7 +46,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -61,7 +61,8 @@ * in the future). */ public class TestIncrementalBrVariations { - public static final Log LOG = LogFactory.getLog(TestIncrementalBrVariations.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestIncrementalBrVariations.class); private static final short NUM_DATANODES = 1; static final int BLOCK_SIZE = 1024; @@ -79,13 +80,13 @@ public class TestIncrementalBrVariations { private DatanodeRegistration dn0Reg; // DataNodeRegistration for dn0 static { - GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL); - GenericTestUtils.setLogLevel(BlockManager.blockLog, Level.ALL); - GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, Level.ALL); + GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.TRACE); + GenericTestUtils.setLogLevel(BlockManager.blockLog, Level.TRACE); + GenericTestUtils.setLogLevel(NameNode.blockStateChangeLog, Level.TRACE); GenericTestUtils - .setLogLevel(LogFactory.getLog(FSNamesystem.class), Level.ALL); - GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL); - GenericTestUtils.setLogLevel(TestIncrementalBrVariations.LOG, Level.ALL); + .setLogLevel(LoggerFactory.getLogger(FSNamesystem.class), Level.TRACE); + GenericTestUtils.setLogLevel(DataNode.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(TestIncrementalBrVariations.LOG, Level.TRACE); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java index 22cba6d8b1..9d5cde3e74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java @@ -26,8 +26,8 @@ import java.net.InetSocketAddress; import java.util.Collections; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; @@ -61,7 +61,8 @@ */ public class TestReadOnlySharedStorage { - public static final Log LOG = LogFactory.getLog(TestReadOnlySharedStorage.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestReadOnlySharedStorage.class); private static final short NUM_DATANODES = 3; private static final int RO_NODE_INDEX = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java index 5f62ddb084..e2bae62030 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -49,7 +49,8 @@ import static org.mockito.Matchers.anyLong; public class TestStorageReport { - public static final Log LOG = LogFactory.getLog(TestStorageReport.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestStorageReport.class); private static final short REPL_FACTOR = 1; private static final StorageType storageType = StorageType.SSD; // pick non-default. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java index 708fbaf30a..357f1ec22b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java @@ -20,8 +20,8 @@ import java.util.Collection; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClientAdapter; @@ -44,7 +44,8 @@ /** Test transferring RBW between datanodes */ public class TestTransferRbw { - private static final Log LOG = LogFactory.getLog(TestTransferRbw.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestTransferRbw.class); { GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index baaed9f338..c4d1e57e72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -20,8 +20,8 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileExistsException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.DF; @@ -68,8 +68,8 @@ @InterfaceStability.Unstable @InterfaceAudience.Private public class FsDatasetImplTestUtils implements FsDatasetTestUtils { - private static final Log LOG = - LogFactory.getLog(FsDatasetImplTestUtils.class); + private static final Logger LOG = + LoggerFactory.getLogger(FsDatasetImplTestUtils.class); private final FsDatasetImpl dataset; private static final DataChecksum DEFAULT_CHECKSUM = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index aae59ddc5e..ece5739f88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -43,8 +43,8 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -89,7 +89,8 @@ public abstract class LazyPersistTestCase { private static final String JMX_SERVICE_NAME = "DataNode"; protected static final int LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC = 3; protected static final int LAZY_WRITER_INTERVAL_SEC = 1; - protected static final Log LOG = LogFactory.getLog(LazyPersistTestCase.class); + protected static final Logger LOG = + LoggerFactory.getLogger(LazyPersistTestCase.class); protected static final short REPL_FACTOR = 1; protected final long osPageSize = NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java index 2daca86320..559828bda8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestSpaceReservation.java @@ -20,8 +20,8 @@ import com.google.common.base.Supplier; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.hamcrest.core.Is.is; @@ -69,7 +69,7 @@ * replica being written (RBW) & Replica being copied from another DN. */ public class TestSpaceReservation { - static final Log LOG = LogFactory.getLog(TestSpaceReservation.class); + static final Logger LOG = LoggerFactory.getLogger(TestSpaceReservation.class); private static final int DU_REFRESH_INTERVAL_MSEC = 500; private static final int STORAGES_PER_DATANODE = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java index 7df03333b4..f6a5d9c469 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java @@ -22,8 +22,8 @@ import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -63,7 +63,8 @@ * Tests diskbalancer with a mock mover. */ public class TestDiskBalancerWithMockMover { - static final Log LOG = LogFactory.getLog(TestDiskBalancerWithMockMover.class); + static final Logger LOG = + LoggerFactory.getLogger(TestDiskBalancerWithMockMover.class); @Rule public ExpectedException thrown = ExpectedException.none(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 0e9784bb4e..1b3faa0eee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -26,8 +26,8 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -61,7 +61,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Test; @@ -72,13 +72,13 @@ * Test the data migration tool (for Archival Storage) */ public class TestStorageMover { - static final Log LOG = LogFactory.getLog(TestStorageMover.class); + static final Logger LOG = LoggerFactory.getLogger(TestStorageMover.class); static { - GenericTestUtils.setLogLevel(LogFactory.getLog(BlockPlacementPolicy.class), - Level.ALL); - GenericTestUtils.setLogLevel(LogFactory.getLog(Dispatcher.class), - Level.ALL); - GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.ALL); + GenericTestUtils.setLogLevel( + LoggerFactory.getLogger(BlockPlacementPolicy.class), Level.TRACE); + GenericTestUtils.setLogLevel(LoggerFactory.getLogger(Dispatcher.class), + Level.TRACE); + GenericTestUtils.setLogLevel(DataTransferProtocol.LOG, Level.TRACE); } private static final int BLOCK_SIZE = 1024; @@ -570,7 +570,7 @@ public void testMigrateOpenFileToArchival() throws Exception { // make sure the writing can continue out.writeBytes("world!"); ((DFSOutputStream) out.getWrappedStream()).hsync(); - IOUtils.cleanup(LOG, out); + IOUtils.cleanupWithLogger(LOG, out); lbs = test.dfs.getClient().getLocatedBlocks( barFile.toString(), BLOCK_SIZE); @@ -585,7 +585,7 @@ public void testMigrateOpenFileToArchival() throws Exception { byte[] buf = new byte[13]; // read from offset 1024 in.readFully(BLOCK_SIZE, buf, 0, buf.length); - IOUtils.cleanup(LOG, in); + IOUtils.cleanupWithLogger(LOG, in); Assert.assertEquals("hello, world!", new String(buf)); } finally { test.shutdownCluster(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 9f678cf39b..883e43ca90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -43,8 +43,8 @@ import java.util.Set; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.FsPermission; @@ -75,7 +75,8 @@ */ public abstract class FSImageTestUtil { - public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class); + public static final Logger LOG = + LoggerFactory.getLogger(FSImageTestUtil.class); /** * The position in the fsimage header where the txid is @@ -562,28 +563,15 @@ public static void assertReasonableNameCurrentDir(File curDir) assertNotNull(image); } - public static void logStorageContents(Log LOG, NNStorage storage) { - LOG.info("current storages and corresponding sizes:"); + public static void logStorageContents(Logger log, NNStorage storage) { + log.info("current storages and corresponding sizes:"); for (StorageDirectory sd : storage.dirIterable(null)) { File curDir = sd.getCurrentDir(); - LOG.info("In directory " + curDir); + log.info("In directory " + curDir); File[] files = curDir.listFiles(); Arrays.sort(files); for (File f : files) { - LOG.info(" file " + f.getAbsolutePath() + "; len = " + f.length()); - } - } - } - - public static void logStorageContents(org.slf4j.Logger LOG, NNStorage storage) { - LOG.info("current storages and corresponding sizes:"); - for (StorageDirectory sd : storage.dirIterable(null)) { - File curDir = sd.getCurrentDir(); - LOG.info("In directory {}", curDir); - File[] files = curDir.listFiles(); - Arrays.sort(files); - for (File f : files) { - LOG.info(" file {}; len = {}", f.getAbsolutePath(), f.length()); + log.info(" file " + f.getAbsolutePath() + "; len = " + f.length()); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 3a3c47177a..a9c4578ec1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -28,8 +28,8 @@ import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CreateFlag; @@ -103,7 +103,8 @@ * documentation accordingly. */ public class NNThroughputBenchmark implements Tool { - private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class); + private static final Logger LOG = + LoggerFactory.getLogger(NNThroughputBenchmark.class); private static final int BLOCK_SIZE = 16; private static final String GENERAL_OPTIONS_USAGE = "[-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G]"; @@ -145,7 +146,7 @@ void close() { } static void setNameNodeLoggingLevel(Level logLevel) { - LOG.fatal("Log level = " + logLevel.toString()); + LOG.error("Log level = " + logLevel.toString()); // change log level to NameNode logs DFSTestUtil.setNameNodeLogLevel(logLevel); GenericTestUtils.setLogLevel(LogManager.getLogger( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index d637af5b49..e7f51ce2e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -22,8 +22,8 @@ import java.io.IOException; import java.util.Iterator; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -45,8 +45,8 @@ @InterfaceStability.Unstable public class OfflineEditsViewerHelper { - private static final Log LOG = - LogFactory.getLog(OfflineEditsViewerHelper.class); + private static final Logger LOG = + LoggerFactory.getLogger(OfflineEditsViewerHelper.class); final long blockSize = 512; MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 1aa77266f0..13cd16f71a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -24,8 +24,8 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.permission.FsPermission; @@ -46,7 +46,8 @@ * FSNamesystem.getAdditionalBlock(). */ public class TestAddBlockRetry { - public static final Log LOG = LogFactory.getLog(TestAddBlockRetry.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestAddBlockRetry.class); private static final short REPLICATION = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java index 7e3a030b60..da4e71e3c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java @@ -29,8 +29,8 @@ import java.io.IOException; import java.net.InetSocketAddress; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -53,8 +53,8 @@ public class TestAllowFormat { public static final String NAME_NODE_HOST = "localhost:"; public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:"; - private static final Log LOG = - LogFactory.getLog(TestAllowFormat.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestAllowFormat.class.getName()); private static final File DFS_BASE_DIR = new File(PathUtils.getTestDir(TestAllowFormat.class), "dfs"); private static Configuration config; private static MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java index 9a4f98b0d1..3db16e83ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogAtDebug.java @@ -20,8 +20,8 @@ import com.google.common.base.Joiner; import com.google.common.base.Optional; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -43,7 +43,7 @@ * Test that the HDFS Audit logger respects DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST. */ public class TestAuditLogAtDebug { - static final Log LOG = LogFactory.getLog(TestAuditLogAtDebug.class); + static final Logger LOG = LoggerFactory.getLogger(TestAuditLogAtDebug.class); @Rule public Timeout timeout = new Timeout(300000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 267896464f..5ae9a3eb97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -29,8 +29,8 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -52,7 +52,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Before; import org.junit.Test; @@ -62,12 +62,13 @@ import com.google.common.collect.Lists; public class TestBackupNode { - public static final Log LOG = LogFactory.getLog(TestBackupNode.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestBackupNode.class); static { - GenericTestUtils.setLogLevel(Checkpointer.LOG, Level.ALL); - GenericTestUtils.setLogLevel(BackupImage.LOG, Level.ALL); + GenericTestUtils.setLogLevel(Checkpointer.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(BackupImage.LOG, Level.TRACE); } static final String BASE_DIR = MiniDFSCluster.getBaseDirectory(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 551670e1d7..fa1d3d4a47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -42,8 +42,8 @@ import java.util.List; import org.apache.commons.lang3.time.DateUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CacheFlag; @@ -95,7 +95,7 @@ import com.google.common.base.Supplier; public class TestCacheDirectives { - static final Log LOG = LogFactory.getLog(TestCacheDirectives.class); + static final Logger LOG = LoggerFactory.getLogger(TestCacheDirectives.class); private static final UserGroupInformation unprivilegedUser = UserGroupInformation.createRemoteUser("unprivilegedUser"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index d0c0a63913..353b3b381b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -47,8 +47,6 @@ import org.apache.commons.cli.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -87,7 +85,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -109,7 +107,7 @@ public class TestCheckpoint { static { - GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL); + GenericTestUtils.setLogLevel(FSImage.LOG, Level.TRACE); } static final Logger LOG = LoggerFactory.getLogger(TestCheckpoint.class); @@ -873,7 +871,7 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception { } LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(Storage.class)); + LoggerFactory.getLogger(Storage.class)); try { // try to lock the storage that's already locked savedSd.lock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index db8c0293c0..0e648e397e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -35,8 +35,8 @@ import java.util.List; import java.util.Properties; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -54,7 +54,8 @@ import org.junit.Test; public class TestClusterId { - private static final Log LOG = LogFactory.getLog(TestClusterId.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestClusterId.class); File hdfsDir; Configuration config; @@ -489,4 +490,4 @@ public void testNNFormatSuccess() throws Exception { config.setBoolean(DFSConfigKeys.DFS_REFORMAT_DISABLED, true); DFSTestUtil.formatNameNode(config); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index df74107569..46b8db1cd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -27,8 +27,8 @@ import java.io.IOException; import java.util.HashSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -64,7 +64,8 @@ * appropriate exceptions/failure response */ public class TestDeadDatanode { - private static final Log LOG = LogFactory.getLog(TestDeadDatanode.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDeadDatanode.class); private MiniDFSCluster cluster; @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index ce522217c9..d24bffad37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -54,8 +54,8 @@ import java.util.regex.Pattern; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FileSystem; @@ -168,7 +168,7 @@ void fromXml(Stanza st) throws InvalidXmlException { } } - static final Log LOG = LogFactory.getLog(TestEditLog.class); + static final Logger LOG = LoggerFactory.getLogger(TestEditLog.class); static final int NUM_DATA_NODES = 0; @@ -1596,7 +1596,7 @@ public void testResetThreadLocalCachedOps() throws IOException { fileSys.create(file2).close(); // Restart and assert the above stated expectations. - IOUtils.cleanup(LOG, fileSys); + IOUtils.cleanupWithLogger(LOG, fileSys); cluster.restartNameNode(); fileSys = cluster.getFileSystem(); assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty()); @@ -1605,7 +1605,7 @@ public void testResetThreadLocalCachedOps() throws IOException { assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty()); assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty()); } finally { - IOUtils.cleanup(LOG, fileSys); + IOUtils.cleanupWithLogger(LOG, fileSys); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index cd329a617e..3eca80f386 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -32,8 +32,8 @@ import java.net.URL; import java.util.EnumMap; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -47,8 +47,8 @@ import org.mockito.Mockito; public class TestEditLogFileInputStream { - private static final Log LOG = - LogFactory.getLog(TestEditLogFileInputStream.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestEditLogFileInputStream.class); private static final byte[] FAKE_LOG_DATA = TestEditLog.HADOOP20_SOME_EDITS; private final static File TEST_DIR = PathUtils diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java index 10f571c4d0..2858e4e7f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java @@ -32,8 +32,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -86,7 +86,8 @@ public TestEditLogRace(boolean useAsyncEditLog) { private static final String NAME_DIR = MiniDFSCluster.getBaseDirectory() + "name-0-1"; - private static final Log LOG = LogFactory.getLog(TestEditLogRace.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestEditLogRace.class); // This test creates NUM_THREADS threads and each thread continuously writes // transactions @@ -420,7 +421,7 @@ public void run() { true); LOG.info("mkdirs complete"); } catch (Throwable ioe) { - LOG.fatal("Got exception", ioe); + LOG.error("Got exception", ioe); deferredException.set(ioe); waitToEnterFlush.countDown(); } @@ -522,7 +523,7 @@ public void run() { editLog.logSync(); LOG.info("edit thread: logSync complete"); } catch (Throwable ioe) { - LOG.fatal("Got exception", ioe); + LOG.error("Got exception", ioe); deferredException.set(ioe); sleepingBeforeSync.countDown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java index bdd48e873c..45a785be75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirAttrOp.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; @@ -34,7 +34,8 @@ * Test {@link FSDirAttrOp}. */ public class TestFSDirAttrOp { - public static final Log LOG = LogFactory.getLog(TestFSDirAttrOp.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestFSDirAttrOp.class); private boolean unprotectedSetTimes(long atime, long atime0, long precision, long mtime, boolean force) throws QuotaExceededException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index c35c95a735..d143013fde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -28,8 +28,8 @@ import java.util.Random; import com.google.common.collect.ImmutableList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -58,7 +58,8 @@ * Test {@link FSDirectory}, the in-memory namespace tree. */ public class TestFSDirectory { - public static final Log LOG = LogFactory.getLog(TestFSDirectory.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestFSDirectory.class); private static final long seed = 0; private static final short REPLICATION = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index c074ae1681..daeeff29c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -62,7 +62,7 @@ import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -94,8 +94,8 @@ private static Configuration getConf() { } static { - GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL); - GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL); + GenericTestUtils.setLogLevel(FSImage.LOG, Level.TRACE); + GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.TRACE); } private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 58ecc8a760..6f31b58d1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; @@ -60,7 +60,7 @@ public class TestFSImageWithSnapshot { { SnapshotTestHelper.disableLogs(); - GenericTestUtils.setLogLevel(INode.LOG, Level.ALL); + GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE); } static final long seed = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java index 3352fd00cc..f5a112c7ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java @@ -29,7 +29,7 @@ import java.util.EnumSet; import java.util.Random; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -52,8 +52,8 @@ public class TestFavoredNodesEndToEnd { { - GenericTestUtils.setLogLevel(LogFactory.getLog(BlockPlacementPolicy.class), - Level.ALL); + GenericTestUtils.setLogLevel( + LoggerFactory.getLogger(BlockPlacementPolicy.class), Level.TRACE); } private static MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index 4e4c64bd10..f258caea0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -34,8 +34,8 @@ import java.util.List; import java.util.PriorityQueue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -53,7 +53,8 @@ import com.google.common.collect.ImmutableList; public class TestFileJournalManager { - static final Log LOG = LogFactory.getLog(TestFileJournalManager.class); + static final Logger LOG = + LoggerFactory.getLogger(TestFileJournalManager.class); private Configuration conf; @@ -107,11 +108,12 @@ static long getNumberOfTransactions(FileJournalManager jm, long fromTxId, numTransactions++; } } finally { - IOUtils.cleanup(LOG, elis); + IOUtils.cleanupWithLogger(LOG, elis); } } } finally { - IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0])); + IOUtils.cleanupWithLogger( + LOG, allStreams.toArray(new EditLogInputStream[0])); } return numTransactions; } @@ -420,8 +422,9 @@ private static EditLogInputStream getJournalInputStream(FileJournalManager jm, return ret; } } finally { - IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0])); - IOUtils.cleanup(LOG, elis); + IOUtils.cleanupWithLogger( + LOG, allStreams.toArray(new EditLogInputStream[0])); + IOUtils.cleanupWithLogger(LOG, elis); } return null; } @@ -445,7 +448,7 @@ public void testReadFromMiddleOfEditLog() throws CorruptionException, FSEditLogOp op = elis.readOp(); assertEquals("read unexpected op", op.getTransactionId(), 5); } finally { - IOUtils.cleanup(LOG, elis); + IOUtils.cleanupWithLogger(LOG, elis); } } @@ -475,7 +478,7 @@ public void testExcludeInProgressStreams() throws CorruptionException, assertTrue(lastReadOp.getTransactionId() <= 100); } } finally { - IOUtils.cleanup(LOG, elis); + IOUtils.cleanupWithLogger(LOG, elis); } } @@ -502,7 +505,7 @@ public void testDoPreUpgradeIOError() throws IOException { } jm.doPreUpgrade(); } finally { - IOUtils.cleanup(LOG, jm); + IOUtils.cleanupWithLogger(LOG, jm); // Restore permissions on storage directory and make sure we can delete. FileUtil.setWritable(storageDir, true); FileUtil.fullyDelete(storageDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 51a94e73f5..f1083875e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -33,8 +33,8 @@ import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; @@ -65,17 +65,17 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestFileTruncate { static { - GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL); - GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL); + GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.TRACE); + GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.TRACE); } - static final Log LOG = LogFactory.getLog(TestFileTruncate.class); + static final Logger LOG = LoggerFactory.getLogger(TestFileTruncate.class); static final int BLOCK_SIZE = 4; static final short REPLICATION = 3; static final int DATANODE_NUM = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index d95c2beae9..0a2b53812f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -60,9 +60,8 @@ import java.util.regex.Pattern; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataOutputStream; @@ -136,8 +135,8 @@ * A JUnit test for doing fsck. */ public class TestFsck { - private static final Log LOG = - LogFactory.getLog(TestFsck.class.getName()); + private static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(TestFsck.class.getName()); static final String AUDITLOG_FILE = GenericTestUtils.getTempPath("TestFsck-audit.log"); @@ -171,13 +170,15 @@ static String runFsck(Configuration conf, int expectedErrCode, throws Exception { ByteArrayOutputStream bStream = new ByteArrayOutputStream(); PrintStream out = new PrintStream(bStream, true); - GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.ALL); + GenericTestUtils.setLogLevel( + FSPermissionChecker.LOG, org.slf4j.event.Level.TRACE); int errCode = ToolRunner.run(new DFSck(conf, out), path); LOG.info("OUTPUT = " + bStream.toString()); if (checkErrorCode) { assertEquals(expectedErrCode, errCode); } - GenericTestUtils.setLogLevel(FSPermissionChecker.LOG, Level.INFO); + GenericTestUtils.setLogLevel( + FSPermissionChecker.LOG, org.slf4j.event.Level.INFO); return bStream.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java index 124b301cb9..e414296796 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsckWithMultipleNameNodes.java @@ -22,8 +22,8 @@ import java.util.Random; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -44,7 +44,8 @@ * Test fsck with multiple NameNodes */ public class TestFsckWithMultipleNameNodes { - static final Log LOG = LogFactory.getLog(TestFsckWithMultipleNameNodes.class); + static final Logger LOG = + LoggerFactory.getLogger(TestFsckWithMultipleNameNodes.class); { DFSTestUtil.setNameNodeLogLevel(Level.ALL); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index b5e0efe064..4839783c95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -27,8 +27,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; @@ -51,7 +51,8 @@ import org.junit.Test; public class TestHDFSConcat { - public static final Log LOG = LogFactory.getLog(TestHDFSConcat.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestHDFSConcat.class); private static final short REPL_FACTOR = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index 79e7acc260..e86413d8bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -22,8 +22,8 @@ import java.lang.management.ManagementFactory; import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; @@ -50,8 +50,8 @@ */ @RunWith(Parameterized.class) public class TestHostsFiles { - private static final Log LOG = - LogFactory.getLog(TestHostsFiles.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestHostsFiles.class.getName()); private Class hostFileMgrClass; public TestHostsFiles(Class hostFileMgrClass) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 2246357343..1392f9d9eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -31,8 +31,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; import org.apache.hadoop.fs.FSDataOutputStream; @@ -82,7 +82,7 @@ public class TestINodeFile { static { FileSystem.enableSymlinks(); } - public static final Log LOG = LogFactory.getLog(TestINodeFile.class); + public static final Logger LOG = LoggerFactory.getLogger(TestINodeFile.class); static final short BLOCKBITS = 48; static final long BLKSIZE_MAXVALUE = ~(0xffffL << BLOCKBITS); @@ -1066,7 +1066,7 @@ public void testDotdotInodePath() throws Exception { assertTrue(parentId == status.getFileId()); } finally { - IOUtils.cleanup(LOG, client); + IOUtils.cleanupWithLogger(LOG, client); if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java index 3027c79739..df36322e9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,7 +39,8 @@ * deletion completes and handles new requests from other clients */ public class TestLargeDirectoryDelete { - private static final Log LOG = LogFactory.getLog(TestLargeDirectoryDelete.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestLargeDirectoryDelete.class); private static final Configuration CONF = new HdfsConfiguration(); private static final int TOTAL_BLOCKS = 10000; private MiniDFSCluster mc = null; @@ -168,7 +169,7 @@ public void run() { try { execute(); } catch (Throwable throwable) { - LOG.warn(throwable); + LOG.warn("{}", throwable); setThrown(throwable); } finally { synchronized (this) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java index 70550d5785..337e3728a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java @@ -31,8 +31,8 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.FSDataOutputStream; @@ -66,7 +66,8 @@ public class TestListOpenFiles { private static MiniDFSCluster cluster = null; private static DistributedFileSystem fs = null; private static NamenodeProtocols nnRpc = null; - private static final Log LOG = LogFactory.getLog(TestListOpenFiles.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestListOpenFiles.class); @Before public void setUp() throws IOException { @@ -254,4 +255,4 @@ public void run() { } } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java index b8dc44e89f..baf999d6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java @@ -26,8 +26,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -49,7 +49,7 @@ public class TestNNStorageRetentionFunctional { private static final File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( TestNNStorageRetentionFunctional.class); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 1abca36b8c..7a2fc9abef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -26,8 +26,8 @@ import java.io.IOException; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -50,7 +50,7 @@ */ public class TestNameEditsConfigs { - private static final Log LOG = LogFactory.getLog(FSEditLog.class); + private static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class); static final long SEED = 0xDEADBEEFL; static final int BLOCK_SIZE = 4096; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 9a0e67c379..25642faffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -20,8 +20,9 @@ import com.google.common.base.Supplier; import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.util.MBeans; @@ -48,7 +49,8 @@ * Test periodic logging of NameNode metrics. */ public class TestNameNodeMetricsLogger { - static final Log LOG = LogFactory.getLog(TestNameNodeMetricsLogger.class); + static final Logger LOG = + LoggerFactory.getLogger(TestNameNodeMetricsLogger.class); @Rule public Timeout timeout = new Timeout(300000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java index 0a1b129771..3265bed80c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java @@ -26,8 +26,8 @@ import static org.junit.Assert.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.Path; @@ -54,8 +54,8 @@ public class TestNameNodeReconfigure { - public static final Log LOG = LogFactory - .getLog(TestNameNodeReconfigure.class); + public static final Logger LOG = LoggerFactory + .getLogger(TestNameNodeReconfigure.class); private MiniDFSCluster cluster; private final int customizedBlockInvalidateLimit = 500; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 877f43cde3..26701a59d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -33,8 +33,8 @@ import java.util.Set; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -85,7 +85,8 @@ private static Configuration getConf() { return conf; } - private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestNameNodeRecovery.class); private static final StartupOption recoverStartOpt = StartupOption.RECOVER; private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class); @@ -164,7 +165,7 @@ static void runEditLogTest(EditLogTestSetup elts) throws IOException { // We should have read every valid transaction. assertTrue(validTxIds.isEmpty()); } finally { - IOUtils.cleanup(LOG, elfos, elfis); + IOUtils.cleanupWithLogger(LOG, elfos, elfis); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java index 80856469c2..594b07b583 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java @@ -33,8 +33,8 @@ import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.junit.Assert.assertTrue; @@ -52,7 +52,8 @@ * - DFS_NAMENODE_HTTPS_BIND_HOST_KEY */ public class TestNameNodeRespectsBindHostKeys { - public static final Log LOG = LogFactory.getLog(TestNameNodeRespectsBindHostKeys.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestNameNodeRespectsBindHostKeys.class); private static final String WILDCARD_ADDRESS = "0.0.0.0"; private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0"; private static String keystoresDir; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java index f9bfc37374..1b0a671ff0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeStatusMXBean.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -40,7 +40,7 @@ */ public class TestNameNodeStatusMXBean { - public static final Log LOG = LogFactory.getLog( + public static final Logger LOG = LoggerFactory.getLogger( TestNameNodeStatusMXBean.class); @Test(timeout = 120000L) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index b7f0cfc1f6..4343b0acd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -25,8 +25,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -52,7 +52,8 @@ * This tests InterDataNodeProtocol for block handling. */ public class TestNamenodeCapacityReport { - private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestNamenodeCapacityReport.class); /** * The following test first creates a file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java index c0757e6520..a53b319eab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java @@ -21,26 +21,26 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import org.apache.commons.logging.Log; - import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; - import org.apache.hadoop.fs.ContentSummary; - import org.apache.hadoop.fs.Path; - import org.apache.hadoop.fs.StorageType; - import org.apache.hadoop.hdfs.DFSConfigKeys; - import org.apache.hadoop.hdfs.DFSTestUtil; - import org.apache.hadoop.hdfs.DistributedFileSystem; - import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; - import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; - import org.junit.Before; - import org.junit.Test; +import org.junit.Before; +import org.junit.Test; - import java.io.IOException; +import java.io.IOException; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.is; @@ -58,7 +58,8 @@ public class TestQuotaByStorageType { private DistributedFileSystem dfs; private FSNamesystem fsn; - protected static final Log LOG = LogFactory.getLog(TestQuotaByStorageType.class); + protected static final Logger LOG = + LoggerFactory.getLogger(TestQuotaByStorageType.class); @Before public void setUp() throws Exception { @@ -944,4 +945,4 @@ public void testStorageSpaceQuotaPerQuotaClear() throws IOException { testDirNode.asDirectory().getDirectoryWithQuotaFeature().getQuota() .getTypeSpace(StorageType.SSD)); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java index 9031b2b7a5..8fa870186c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java @@ -59,7 +59,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.DelayAnswer; import org.apache.hadoop.test.Whitebox; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -79,7 +79,7 @@ */ public class TestSaveNamespace { static { - GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL); + GenericTestUtils.setLogLevel(FSImage.LOG, Level.TRACE); } private static final Logger LOG = LoggerFactory.getLogger(TestSaveNamespace.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 24016087da..3e5fe75640 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -38,8 +38,7 @@ import java.util.Iterator; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -88,8 +87,8 @@ public class TestStartup { public static final String NAME_NODE_HOST = "localhost:"; public static final String WILDCARD_HTTP_HOST = "0.0.0.0:"; - private static final Log LOG = - LogFactory.getLog(TestStartup.class.getName()); + private static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(TestStartup.class.getName()); private Configuration config; private File hdfsDir=null; static final long seed = 0xAAAAEEFL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 6f4546db88..aa10291ef0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -33,8 +33,8 @@ import java.util.Iterator; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.cli.CLITestCmdDFS; import org.apache.hadoop.cli.util.CLICommandDFSAdmin; import org.apache.hadoop.cli.util.CommandExecutor; @@ -60,8 +60,8 @@ public class TestStorageRestore { public static final String NAME_NODE_HOST = "localhost:"; public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:"; - private static final Log LOG = - LogFactory.getLog(TestStorageRestore.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestStorageRestore.class.getName()); private Configuration config; private File hdfsDir=null; static final long seed = 0xAAAAEEFL; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 468e47fd18..59afd8e030 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; @@ -63,7 +63,7 @@ * This class tests INodeFile with striped feature. */ public class TestStripedINodeFile { - public static final Log LOG = LogFactory.getLog(TestINodeFile.class); + public static final Logger LOG = LoggerFactory.getLogger(TestINodeFile.class); private static final PermissionStatus perm = new PermissionStatus( "userName", null, FsPermission.getDefault()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 169bbeef11..bbe29cf134 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -32,8 +32,8 @@ import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -55,7 +55,7 @@ * Static utility functions useful for testing HA. */ public abstract class HATestUtil { - private static final Log LOG = LogFactory.getLog(HATestUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(HATestUtil.class); private static final String LOGICAL_HOSTNAME = "ha-nn-uri-%d"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 48d505c748..defa6e53e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -28,8 +28,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import com.google.common.base.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -51,7 +51,8 @@ import com.google.common.collect.ImmutableList; public class TestBootstrapStandby { - private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestBootstrapStandby.class); private static final int maxNNCount = 3; private static final int STARTING_PORT = 20000; @@ -197,14 +198,14 @@ public void testSharedEditsMissingLogs() throws Exception { // Trying to bootstrap standby should now fail since the edit // logs aren't available in the shared dir. LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(BootstrapStandby.class)); + LoggerFactory.getLogger(BootstrapStandby.class)); try { assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, forceBootstrap(1)); } finally { logs.stopCapturing(); } - GenericTestUtils.assertMatches(logs.getOutput(), - "FATAL.*Unable to read transaction ids 1-3 from the configured shared"); + assertTrue(logs.getOutput().contains( + "Unable to read transaction ids 1-3 from the configured shared")); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java index 7b0a0e1925..c470cc6f1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSUpgradeWithHA.java @@ -28,8 +28,8 @@ import java.net.URISyntaxException; import java.util.Collection; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; @@ -60,7 +60,8 @@ */ public class TestDFSUpgradeWithHA { - private static final Log LOG = LogFactory.getLog(TestDFSUpgradeWithHA.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDFSUpgradeWithHA.class); private Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index b900c8b875..7076ec674b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import com.google.common.base.Joiner; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -65,8 +65,8 @@ **/ public class TestDelegationTokensWithHA { private static final Configuration conf = new Configuration(); - private static final Log LOG = - LogFactory.getLog(TestDelegationTokensWithHA.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDelegationTokensWithHA.class); private static MiniDFSCluster cluster; private static NameNode nn0; private static NameNode nn1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index c88ac57c27..68b3e2ba87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -60,7 +60,7 @@ @RunWith(Parameterized.class) public class TestEditLogTailer { static { - GenericTestUtils.setLogLevel(FSEditLog.LOG, org.slf4j.event.Level.DEBUG); + GenericTestUtils.setLogLevel(FSEditLog.LOG, Level.DEBUG); } @Parameters diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java index 4405c5b7b6..4387a3372b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java @@ -27,8 +27,8 @@ import java.util.Collections; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.HAUtil; @@ -52,8 +52,8 @@ * and startup of the standby node. */ public class TestEditLogsDuringFailover { - private static final Log LOG = - LogFactory.getLog(TestEditLogsDuringFailover.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestEditLogsDuringFailover.class); private static final int NUM_DIRS_IN_LOG = 5; static { @@ -159,7 +159,7 @@ private void testFailoverFinalizesAndReadsInProgress( outs.write(new byte[] { 0x18, 0x00, 0x00, 0x00 } ); LOG.error("editLogFile = " + editLogFile); } finally { - IOUtils.cleanup(LOG, outs); + IOUtils.cleanupWithLogger(LOG, outs); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 284e283515..8785993840 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -33,8 +33,8 @@ import java.util.LinkedList; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -66,8 +66,8 @@ @RunWith(Parameterized.class) public class TestFailureToReadEdits { - private static final Log LOG = - LogFactory.getLog(TestFailureToReadEdits.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestFailureToReadEdits.class); private static final String TEST_DIR1 = "/test1"; private static final String TEST_DIR2 = "/test2"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index 8665e09469..cc8ead1687 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -32,13 +32,13 @@ import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Test; public class TestHAFsck { static { - GenericTestUtils.setLogLevel(DFSUtil.LOG, Level.ALL); + GenericTestUtils.setLogLevel(DFSUtil.LOG, Level.TRACE); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java index 432f7df091..8beba74c6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -44,7 +44,8 @@ */ public class TestHAMetrics { - private static final Log LOG = LogFactory.getLog(TestHAMetrics.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestHAMetrics.class); @Test(timeout = 300000) public void testHAMetrics() throws Exception { @@ -117,7 +118,7 @@ public void testHAMetrics() throws Exception { newMillisSinceLastLoadedEdits, millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits); } finally { - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger(LOG, fs); cluster.shutdown(); } } @@ -170,7 +171,7 @@ public void testHAInodeCount() throws Exception { nn0 = cluster.getNamesystem(0); assertEquals(5, nn0.getFilesTotal()); } finally { - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger(LOG, fs); cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java index c81f058366..f9445fa12e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java @@ -30,8 +30,8 @@ import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -63,7 +63,7 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.Whitebox; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -75,7 +75,8 @@ * Tests that exercise safemode in an HA cluster. */ public class TestHASafeMode { - private static final Log LOG = LogFactory.getLog(TestHASafeMode.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestHASafeMode.class); private static final int BLOCK_SIZE = 1024; private NameNode nn0; private NameNode nn1; @@ -83,8 +84,8 @@ public class TestHASafeMode { private MiniDFSCluster cluster; static { - DFSTestUtil.setNameNodeLogLevel(Level.ALL); - GenericTestUtils.setLogLevel(FSImage.LOG, Level.ALL); + DFSTestUtil.setNameNodeLogLevel(org.apache.log4j.Level.TRACE); + GenericTestUtils.setLogLevel(FSImage.LOG, Level.TRACE); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index 6b84f1ee17..59c9695060 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import com.google.common.util.concurrent.Uninterruptibles; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -41,7 +41,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.test.MultithreadedTestUtil.TestContext; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -62,7 +62,7 @@ * and failback between two namenodes. */ public class TestHAStateTransitions { - protected static final Log LOG = LogFactory.getLog( + protected static final Logger LOG = LoggerFactory.getLogger( TestStandbyIsHot.class); private static final Path TEST_DIR = new Path("/test"); private static final Path TEST_FILE_PATH = new Path(TEST_DIR, "foo"); @@ -73,7 +73,7 @@ public class TestHAStateTransitions { RequestSource.REQUEST_BY_USER_FORCED); static { - GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.ALL); + GenericTestUtils.setLogLevel(EditLogTailer.LOG, Level.TRACE); } /** @@ -420,7 +420,7 @@ private static void testFailoverAfterCrashDuringLogRoll(boolean writeHeader) createEmptyInProgressEditLog(cluster, nn0, writeHeader); cluster.transitionToActive(1); } finally { - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger(LOG, fs); cluster.shutdown(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java index 5015f9e183..0705c197f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java @@ -27,8 +27,8 @@ import java.io.IOException; import java.net.URISyntaxException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -50,7 +50,8 @@ public class TestInitializeSharedEdits { - private static final Log LOG = LogFactory.getLog(TestInitializeSharedEdits.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestInitializeSharedEdits.class); private static final Path TEST_PATH = new Path("/test"); private Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index bcdf511e74..bbcbaaa796 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -36,8 +36,8 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.CacheFlag; @@ -92,7 +92,8 @@ import org.junit.Test; public class TestRetryCacheWithHA { - private static final Log LOG = LogFactory.getLog(TestRetryCacheWithHA.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRetryCacheWithHA.class); private static final int BlockSize = 1024; private static ErasureCodingPolicy defaultEcPolicy = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java index 537c18902e..6eda1e3550 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestSeveralNameNodes.java @@ -25,8 +25,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -42,7 +42,8 @@ */ public class TestSeveralNameNodes { - private static final Log LOG = LogFactory.getLog(TestSeveralNameNodes.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestSeveralNameNodes.class); /** ms between failovers between NNs */ private static final int TIME_BETWEEN_FAILOVERS = 1000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java index 9042f8a866..0e60ee0321 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -39,7 +39,7 @@ * invalidate block, etc. */ public class TestStandbyBlockManagement { - protected static final Log LOG = LogFactory.getLog( + protected static final Logger LOG = LoggerFactory.getLogger( TestStandbyBlockManagement.class); private static final String TEST_FILE_DATA = "hello world"; private static final String TEST_FILE = "/TestStandbyBlockManagement"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java index 5da8178716..2bdada45fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java @@ -25,8 +25,8 @@ import java.net.URI; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -50,8 +50,8 @@ * the standby node. */ public class TestStandbyInProgressTail { - private static final Log LOG = - LogFactory.getLog(TestStandbyInProgressTail.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestStandbyInProgressTail.class); private Configuration conf; private MiniQJMHACluster qjmhaCluster; private MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index 14c9dc264d..04eae6f23e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -21,8 +21,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,7 +50,7 @@ * has namespace information, but also has the correct block reports, etc. */ public class TestStandbyIsHot { - protected static final Log LOG = LogFactory.getLog( + protected static final Logger LOG = LoggerFactory.getLogger( TestStandbyIsHot.class); private static final String TEST_FILE_DATA = "hello highly available world"; private static final String TEST_FILE = "/testStandbyIsHot"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 57a1b01ede..64d6c19c7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -49,8 +49,8 @@ import java.util.Random; import com.google.common.collect.ImmutableList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -88,7 +88,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -108,7 +108,8 @@ public class TestNameNodeMetrics { SystemErasureCodingPolicies.getByID( SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); - public static final Log LOG = LogFactory.getLog(TestNameNodeMetrics.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestNameNodeMetrics.class); // Number of datanodes in the cluster private static final int DATANODE_COUNT = EC_POLICY.getNumDataUnits() + @@ -139,7 +140,7 @@ public class TestNameNodeMetrics { // Enable stale DataNodes checking CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); - GenericTestUtils.setLogLevel(LogFactory.getLog(MetricsAsserts.class), + GenericTestUtils.setLogLevel(LoggerFactory.getLogger(MetricsAsserts.class), Level.DEBUG); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java index 65b2f8c3f9..77c9ecc3a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -58,7 +58,7 @@ */ public class TestINodeFileUnderConstructionWithSnapshot { { - GenericTestUtils.setLogLevel(INode.LOG, Level.ALL); + GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE); SnapshotTestHelper.disableLogs(); } @@ -307,4 +307,4 @@ public void testLease() throws Exception { HdfsConstants.LEASE_HARDLIMIT_PERIOD); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java index 38cd5f434f..04a34160fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java @@ -26,8 +26,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; @@ -50,8 +50,8 @@ import org.junit.Test; public class TestOpenFilesWithSnapshot { - private static final Log LOG = - LogFactory.getLog(TestOpenFilesWithSnapshot.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(TestOpenFilesWithSnapshot.class.getName()); private final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; DistributedFileSystem fs = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index bd72dfd802..987fd505af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; @@ -64,7 +64,8 @@ public class TestRenameWithSnapshots { static { SnapshotTestHelper.disableLogs(); } - private static final Log LOG = LogFactory.getLog(TestRenameWithSnapshots.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestRenameWithSnapshots.class); private static final long SEED = 0; private static final short REPL = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index 1c01eceab2..0f78d98817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -59,7 +59,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -73,7 +73,7 @@ */ public class TestSnapshot { { - GenericTestUtils.setLogLevel(INode.LOG, Level.ALL); + GenericTestUtils.setLogLevel(INode.LOG, Level.TRACE); SnapshotTestHelper.disableLogs(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java index 68fc26f99e..dcb6ddc00a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsCreatePermissions.java @@ -20,8 +20,8 @@ import java.net.HttpURLConnection; import java.net.URL; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -39,7 +39,8 @@ * from dfs CLI for specifying files/directories permissions. */ public class TestWebHdfsCreatePermissions { - static final Log LOG = LogFactory.getLog(TestWebHdfsCreatePermissions.class); + static final Logger LOG = + LoggerFactory.getLogger(TestWebHdfsCreatePermissions.class); { DFSTestUtil.setNameNodeLogLevel(Level.ALL); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java index d00ed62a0a..028e18c346 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java @@ -24,8 +24,8 @@ import java.util.Arrays; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -55,7 +55,8 @@ * Test WebHDFS which provides data locality using HTTP redirection. */ public class TestWebHdfsDataLocality { - static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class); + static final Logger LOG = + LoggerFactory.getLogger(TestWebHdfsDataLocality.class); { DFSTestUtil.setNameNodeLogLevel(Level.ALL); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java index 53878e0716..8b9e7ce710 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java @@ -49,7 +49,6 @@ import java.util.Properties; import java.util.concurrent.TimeoutException; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -1252,7 +1251,7 @@ public void testSPSWhenFileHasExcessRedundancyBlocks() throws Exception { DFSTestUtil.createFile(fs, filePath, 1024, (short) 5, 0); fs.setReplication(filePath, (short) 3); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( - LogFactory.getLog(BlockStorageMovementAttemptedItems.class)); + LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class)); fs.setStoragePolicy(filePath, "COLD"); fs.satisfyStoragePolicy(filePath); DFSTestUtil.waitExpectedStorageType(filePath.toString(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index ac29c3c33f..b2da6a2fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -37,8 +37,8 @@ import net.jcip.annotations.NotThreadSafe; import org.apache.commons.collections.map.LinkedMap; import org.apache.commons.lang3.mutable.MutableBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; @@ -93,7 +93,8 @@ @NotThreadSafe public class TestShortCircuitCache { - static final Log LOG = LogFactory.getLog(TestShortCircuitCache.class); + static final Logger LOG = + LoggerFactory.getLogger(TestShortCircuitCache.class); private static class TestFileDescriptorPair { final TemporarySocketDirectory dir = new TemporarySocketDirectory(); @@ -126,7 +127,7 @@ public FileInputStream[] getFileInputStreams() { } public void close() throws IOException { - IOUtils.cleanup(LOG, fis); + IOUtils.cleanupWithLogger(LOG, fis); dir.close(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index fcf649cbd4..015c9a45c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -28,8 +28,8 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.text.TextStringBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.fs.ChecksumException; @@ -99,7 +99,7 @@ * set/clrSpaceQuote are tested in {@link org.apache.hadoop.hdfs.TestQuota}. */ public class TestDFSAdmin { - private static final Log LOG = LogFactory.getLog(TestDFSAdmin.class); + private static final Logger LOG = LoggerFactory.getLogger(TestDFSAdmin.class); private Configuration conf = null; private MiniDFSCluster cluster; private DFSAdmin admin; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index f1f5793438..528ac4b5df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -27,8 +27,8 @@ import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -52,7 +52,8 @@ import com.google.common.base.Joiner; public class TestDFSHAAdmin { - private static final Log LOG = LogFactory.getLog(TestDFSHAAdmin.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDFSHAAdmin.class); private DFSHAAdmin tool; private final ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index a21a31d9e0..74b5af16d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -26,8 +26,8 @@ import java.io.IOException; import java.io.PrintStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAAdmin; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -52,10 +52,11 @@ */ public class TestDFSHAAdminMiniCluster { static { - GenericTestUtils.setLogLevel(LogFactory.getLog(HAAdmin.class), - Level.ALL); + GenericTestUtils.setLogLevel(LoggerFactory.getLogger(HAAdmin.class), + Level.TRACE); } - private static final Log LOG = LogFactory.getLog(TestDFSHAAdminMiniCluster.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestDFSHAAdminMiniCluster.class); private MiniDFSCluster cluster; private Configuration conf; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index 9e1fa79a52..80155691b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -31,8 +31,8 @@ import java.util.Map; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; @@ -50,8 +50,8 @@ import com.google.common.collect.ImmutableSet; public class TestOfflineEditsViewer { - private static final Log LOG = LogFactory - .getLog(TestOfflineEditsViewer.class); + private static final Logger LOG = LoggerFactory + .getLogger(TestOfflineEditsViewer.class); private static final String buildDir = PathUtils .getTestDirName(TestOfflineEditsViewer.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index c84237cb83..d36be4f0fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -77,8 +77,8 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.output.NullOutputStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -106,7 +106,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -124,7 +124,8 @@ import com.google.common.collect.Maps; public class TestOfflineImageViewer { - private static final Log LOG = LogFactory.getLog(OfflineImageViewerPB.class); + private static final Logger LOG = + LoggerFactory.getLogger(OfflineImageViewerPB.class); private static final int NUM_DIRS = 3; private static final int FILES_PER_DIR = 4; private static final String TEST_RENEWER = "JobTracker"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java index c66c2deb57..5ecec2db3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java @@ -37,8 +37,8 @@ import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -79,8 +79,8 @@ */ public class TestOfflineImageViewerForAcl { - private static final Log LOG = - LogFactory.getLog(TestOfflineImageViewerForAcl.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestOfflineImageViewerForAcl.class); private static File originalFsimage = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java index b758c26726..360ed56e1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java @@ -23,8 +23,8 @@ import java.net.HttpURLConnection; import java.net.URI; import java.net.URL; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; @@ -45,8 +45,8 @@ */ public class TestOfflineImageViewerForContentSummary { - private static final Log LOG = LogFactory - .getLog(TestOfflineImageViewerForContentSummary.class); + private static final Logger LOG = LoggerFactory + .getLogger(TestOfflineImageViewerForContentSummary.class); private static File originalFsimage = null; private static ContentSummary summaryFromDFS = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java index 6c82101c66..74069b0e48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java @@ -30,8 +30,8 @@ import java.util.Map; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -53,8 +53,8 @@ */ public class TestOfflineImageViewerForXAttr { - private static final Log LOG = LogFactory - .getLog(TestOfflineImageViewerForXAttr.class); + private static final Logger LOG = LoggerFactory + .getLogger(TestOfflineImageViewerForXAttr.class); private static File originalFsimage = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java index 50af25582a..6507bf32c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java @@ -30,16 +30,16 @@ import java.util.List; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.Test; public class TestLightWeightHashSet{ - private static final Log LOG = LogFactory - .getLog("org.apache.hadoop.hdfs.TestLightWeightHashSet"); + private static final Logger LOG = LoggerFactory + .getLogger("org.apache.hadoop.hdfs.TestLightWeightHashSet"); private final ArrayList list = new ArrayList(); private final int NUM = 100; private LightWeightHashSet set; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java index f923920946..6c55f28637 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java @@ -27,16 +27,16 @@ import java.util.List; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.Test; public class TestLightWeightLinkedSet { - private static final Log LOG = LogFactory - .getLog("org.apache.hadoop.hdfs.TestLightWeightLinkedSet"); + private static final Logger LOG = LoggerFactory + .getLogger("org.apache.hadoop.hdfs.TestLightWeightLinkedSet"); private final ArrayList list = new ArrayList(); private final int NUM = 100; private LightWeightLinkedSet set; @@ -432,4 +432,4 @@ public void testResetBookmarkPlacesBookmarkAtHead() { it = set.getBookmark(); assertEquals(it.next(), list.get(0)); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java index dd2174e555..e88937a629 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java @@ -44,7 +44,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -52,7 +52,7 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest { { - GenericTestUtils.setLogLevel(ExceptionHandler.LOG, Level.ALL); + GenericTestUtils.setLogLevel(ExceptionHandler.LOG, Level.TRACE); } private static MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index cbc428a469..bb1c39807c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -51,8 +51,8 @@ import com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; @@ -111,7 +111,7 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.test.Whitebox; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; @@ -130,7 +130,7 @@ /** Test WebHDFS */ public class TestWebHDFS { - static final Log LOG = LogFactory.getLog(TestWebHDFS.class); + static final Logger LOG = LoggerFactory.getLogger(TestWebHDFS.class); static final Random RANDOM = new Random(); @@ -296,7 +296,7 @@ static void verifyPread(FileSystem fs, Path p, long offset, long length, /** Test client retry with namenode restarting. */ @Test(timeout=300000) public void testNamenodeRestart() throws Exception { - GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL); + GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE); final Configuration conf = WebHdfsTestUtil.createConf(); TestDFSClientRetries.namenodeRestartTest(conf, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 47438217ad..7b44515388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -36,8 +36,8 @@ import java.util.Collection; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -64,7 +64,8 @@ @RunWith(Parameterized.class) public class TestWebHdfsTimeouts { - private static final Log LOG = LogFactory.getLog(TestWebHdfsTimeouts.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestWebHdfsTimeouts.class); private static final int CLIENTS_TO_CONSUME_BACKLOG = 129; private static final int CONNECTION_BACKLOG = 1; @@ -126,8 +127,9 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { - IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()])); - IOUtils.cleanup(LOG, fs); + IOUtils.cleanupWithLogger( + LOG, clients.toArray(new SocketChannel[clients.size()])); + IOUtils.cleanupWithLogger(LOG, fs); if (serverSocket != null) { try { serverSocket.close(); @@ -247,7 +249,7 @@ public void testTwoStepWriteConnectTimeout() throws Exception { GenericTestUtils.assertExceptionContains( fs.getUri().getAuthority() + ": connect timed out", e); } finally { - IOUtils.cleanup(LOG, os); + IOUtils.cleanupWithLogger(LOG, os); } } @@ -267,7 +269,7 @@ public void testTwoStepWriteReadTimeout() throws Exception { } catch (SocketTimeoutException e) { GenericTestUtils.assertExceptionContains("Read timed out", e); } finally { - IOUtils.cleanup(LOG, os); + IOUtils.cleanupWithLogger(LOG, os); } } @@ -331,7 +333,7 @@ public void run() { fail("unexpected IOException in server thread: " + e); } finally { // Clean it all up. - IOUtils.cleanup(LOG, br, isr, in, out); + IOUtils.cleanupWithLogger(LOG, br, isr, in, out); IOUtils.closeSocket(clientSocket); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java index 7bb6db9930..570123d8aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java @@ -20,7 +20,7 @@ import java.net.InetSocketAddress; import java.net.URI; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.log4j.Level; +import org.slf4j.event.Level; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -42,13 +42,13 @@ * Test WebHDFS with multiple NameNodes */ public class TestWebHdfsWithMultipleNameNodes { - static final Log LOG = WebHdfsTestUtil.LOG; + static final Logger LOG = WebHdfsTestUtil.LOG; static private void setLogLevel() { - GenericTestUtils.setLogLevel(LOG, Level.ALL); - GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.ALL); + GenericTestUtils.setLogLevel(LOG, Level.TRACE); + GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE); - DFSTestUtil.setNameNodeLogLevel(Level.ALL); + DFSTestUtil.setNameNodeLogLevel(org.apache.log4j.Level.TRACE); } private static final Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java index 58de14ba91..3dae82a345 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java @@ -25,8 +25,8 @@ import java.security.PrivilegedExceptionAction; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -37,7 +37,8 @@ import org.junit.Assert; public class WebHdfsTestUtil { - public static final Log LOG = LogFactory.getLog(WebHdfsTestUtil.class); + public static final Logger LOG = + LoggerFactory.getLogger(WebHdfsTestUtil.class); public static Configuration createConf() { final Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index 6c145a4b6c..9851ede7af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -25,8 +25,8 @@ import java.util.EnumSet; import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Options; @@ -42,7 +42,7 @@ import org.junit.Test; public class TestParam { - public static final Log LOG = LogFactory.getLog(TestParam.class); + public static final Logger LOG = LoggerFactory.getLogger(TestParam.class); final Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java index 388e7f23a6..188476f024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java @@ -29,8 +29,8 @@ import java.io.IOException; import java.util.Random; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; @@ -48,7 +48,8 @@ /** Unit tests for permission */ public class TestPermission { - public static final Log LOG = LogFactory.getLog(TestPermission.class); + public static final Logger LOG = + LoggerFactory.getLogger(TestPermission.class); final private static Path ROOT_PATH = new Path("/data"); final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java index 7bd29d21dd..ad5b86c837 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java @@ -30,8 +30,8 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -54,7 +54,8 @@ public class TestPermissionSymlinks { - private static final Log LOG = LogFactory.getLog(TestPermissionSymlinks.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestPermissionSymlinks.class); private static final Configuration conf = new HdfsConfiguration(); // Non-super user to run commands with private static final UserGroupInformation user = UserGroupInformation