HDFS-7706. Switch BlockManager logging to use slf4j.

This commit is contained in:
Andrew Wang 2015-01-30 11:32:25 -08:00
parent 12e883007c
commit 951b3608a8
10 changed files with 31 additions and 47 deletions

View File

@ -564,6 +564,8 @@ Release 2.7.0 - UNRELEASED
HDFS-7603. The background replication queue initialization may not let
others run (kihwal)
HDFS-7706. Switch BlockManager logging to use slf4j. (wang)
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -38,7 +38,6 @@
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -89,6 +88,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Keeps information related to the blocks stored in the Hadoop cluster.
@ -96,7 +97,7 @@
@InterfaceAudience.Private
public class BlockManager {
static final Log LOG = LogFactory.getLog(BlockManager.class);
static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
public static final Log blockLog = NameNode.blockStateChangeLog;
private static final String QUEUE_REASON_CORRUPT_STATE =
@ -3619,7 +3620,8 @@ public void run() {
LOG.info("Stopping ReplicationMonitor for testing.");
break;
}
LOG.fatal("ReplicationMonitor thread received Runtime exception. ", t);
LOG.error("ReplicationMonitor thread received Runtime exception. ",
t);
terminate(1, t);
}
}

View File

@ -26,7 +26,6 @@
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
@ -37,6 +36,7 @@
import org.apache.hadoop.hdfs.DFSUtil;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
/**
* Keeps a Collection for every named machine containing blocks
@ -67,7 +67,7 @@ class InvalidateBlocks {
printBlockDeletionTime(BlockManager.LOG);
}
private void printBlockDeletionTime(final Log log) {
private void printBlockDeletionTime(final Logger log) {
log.info(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY
+ " is set to " + DFSUtil.durationToString(pendingPeriodInMs));
SimpleDateFormat sdf = new SimpleDateFormat("yyyy MMM dd HH:mm:ss");

View File

@ -28,9 +28,9 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.util.Daemon;
import org.slf4j.Logger;
/***************************************************
* PendingReplicationBlocks does the bookkeeping of all
@ -44,7 +44,7 @@
*
***************************************************/
class PendingReplicationBlocks {
private static final Log LOG = BlockManager.LOG;
private static final Logger LOG = BlockManager.LOG;
private final Map<Block, PendingBlockInfo> pendingReplications;
private final ArrayList<Block> timedOutItems;

View File

@ -81,6 +81,7 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.Level;
import org.junit.Assume;
import org.mockito.internal.util.reflection.Whitebox;
@ -1656,4 +1657,10 @@ public static boolean changeReplicaLength(MiniDFSCluster cluster,
LOG.info("failed to change length of block " + blk);
return false;
}
public static void setNameNodeLogLevel(Level level) {
GenericTestUtils.setLogLevel(LogFactory.getLog(FSNamesystem.class), level);
GenericTestUtils.setLogLevel(LogFactory.getLog(BlockManager.class), level);
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, level);
}
}

View File

@ -19,7 +19,6 @@
import java.text.SimpleDateFormat;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
@ -29,6 +28,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
@ -42,7 +42,7 @@
*/
public class TestPendingInvalidateBlock {
{
((Log4JLogger)BlockManager.LOG).getLogger().setLevel(Level.DEBUG);
GenericTestUtils.setLogLevel(BlockManager.LOG, Level.DEBUG);
}
private static final int BLOCKSIZE = 1024;

View File

@ -25,9 +25,10 @@
import java.util.List;
import java.util.concurrent.CountDownLatch;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@ -50,7 +51,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -65,9 +65,6 @@
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import com.google.common.base.Supplier;
import com.google.common.collect.Lists;
public class TestDNFencing {
@ -82,9 +79,7 @@ public class TestDNFencing {
private FileSystem fs;
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
@Before

View File

@ -53,6 +53,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.retry.RetryInvocationHandler;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@ -60,6 +61,7 @@
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Test;
import org.mockito.Mockito;
@ -70,12 +72,9 @@
*/
public class TestPipelinesFailover {
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(
"org.apache.hadoop.io.retry.RetryInvocationHandler")).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
GenericTestUtils.setLogLevel(LogFactory.getLog(RetryInvocationHandler
.class), Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
protected static final Log LOG = LogFactory.getLog(

View File

@ -17,13 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -31,22 +26,13 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Supplier;
import static org.junit.Assert.assertEquals;
/**
* Makes sure that standby doesn't do the unnecessary block management such as
@ -60,9 +46,7 @@ public class TestStandbyBlockManagement {
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
@Test(timeout=60000)

View File

@ -23,7 +23,6 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -35,10 +34,8 @@
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.GenericTestUtils;
@ -60,9 +57,7 @@ public class TestStandbyIsHot {
private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
static {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
@Test(timeout=60000)