HDFS-12650. Use slf4j instead of log4j in LeaseManager. Contributed by Ajay Kumar.

This commit is contained in:
Arpit Agarwal 2017-10-23 13:20:46 -07:00
parent 6b8122458e
commit 96be795656
4 changed files with 33 additions and 37 deletions

View File

@ -37,8 +37,6 @@
import java.util.concurrent.Future; import java.util.concurrent.Future;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -50,6 +48,8 @@
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* LeaseManager does the lease housekeeping for writing on files. * LeaseManager does the lease housekeeping for writing on files.
@ -75,7 +75,8 @@
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class LeaseManager { public class LeaseManager {
public static final Log LOG = LogFactory.getLog(LeaseManager.class); public static final Logger LOG = LoggerFactory.getLogger(LeaseManager.class
.getName());
private final FSNamesystem fsnamesystem; private final FSNamesystem fsnamesystem;
private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD; private long softLimit = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD; private long hardLimit = HdfsConstants.LEASE_HARDLIMIT_PERIOD;
@ -142,8 +143,8 @@ synchronized long getNumUnderConstructionBlocks() {
for (Long id : getINodeIdWithLeases()) { for (Long id : getINodeIdWithLeases()) {
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile(); final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
if (!cons.isUnderConstruction()) { if (!cons.isUnderConstruction()) {
LOG.warn("The file " + cons.getFullPathName() LOG.warn("The file {} is not under construction but has lease.",
+ " is not under construction but has lease."); cons.getFullPathName());
continue; continue;
} }
BlockInfo[] blocks = cons.getBlocks(); BlockInfo[] blocks = cons.getBlocks();
@ -155,7 +156,7 @@ synchronized long getNumUnderConstructionBlocks() {
numUCBlocks++; numUCBlocks++;
} }
} }
LOG.info("Number of blocks under construction: " + numUCBlocks); LOG.info("Number of blocks under construction: {}", numUCBlocks);
return numUCBlocks; return numUCBlocks;
} }
@ -250,9 +251,8 @@ public List<INodesInPath> call() {
} }
final long endTimeMs = Time.monotonicNow(); final long endTimeMs = Time.monotonicNow();
if ((endTimeMs - startTimeMs) > 1000) { if ((endTimeMs - startTimeMs) > 1000) {
LOG.info("Took " + (endTimeMs - startTimeMs) + " ms to collect " LOG.info("Took {} ms to collect {} open files with leases {}",
+ iipSet.size() + " open files with leases" + (endTimeMs - startTimeMs), iipSet.size(), ((ancestorDir != null) ?
((ancestorDir != null) ?
" under " + ancestorDir.getFullPathName() : ".")); " under " + ancestorDir.getFullPathName() : "."));
} }
return iipSet; return iipSet;
@ -287,8 +287,8 @@ public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
final INodeFile inodeFile = final INodeFile inodeFile =
fsnamesystem.getFSDirectory().getInode(inodeId).asFile(); fsnamesystem.getFSDirectory().getInode(inodeId).asFile();
if (!inodeFile.isUnderConstruction()) { if (!inodeFile.isUnderConstruction()) {
LOG.warn("The file " + inodeFile.getFullPathName() LOG.warn("The file {} is not under construction but has lease.",
+ " is not under construction but has lease."); inodeFile.getFullPathName());
continue; continue;
} }
openFileEntries.add(new OpenFileEntry( openFileEntries.add(new OpenFileEntry(
@ -348,16 +348,13 @@ synchronized void removeLease(long inodeId) {
private synchronized void removeLease(Lease lease, long inodeId) { private synchronized void removeLease(Lease lease, long inodeId) {
leasesById.remove(inodeId); leasesById.remove(inodeId);
if (!lease.removeFile(inodeId)) { if (!lease.removeFile(inodeId)) {
if (LOG.isDebugEnabled()) { LOG.debug("inode {} not found in lease.files (={})", inodeId, lease);
LOG.debug("inode " + inodeId + " not found in lease.files (=" + lease
+ ")");
}
} }
if (!lease.hasFiles()) { if (!lease.hasFiles()) {
leases.remove(lease.holder); leases.remove(lease.holder);
if (!sortedLeases.remove(lease)) { if (!sortedLeases.remove(lease)) {
LOG.error(lease + " not found in sortedLeases"); LOG.error("{} not found in sortedLeases", lease);
} }
} }
} }
@ -370,8 +367,8 @@ synchronized void removeLease(String holder, INodeFile src) {
if (lease != null) { if (lease != null) {
removeLease(lease, src.getId()); removeLease(lease, src.getId());
} else { } else {
LOG.warn("Removing non-existent lease! holder=" + holder + LOG.warn("Removing non-existent lease! holder={} src={}", holder, src
" src=" + src.getFullPathName()); .getFullPathName());
} }
} }
@ -513,9 +510,7 @@ public void run() {
Thread.sleep(fsnamesystem.getLeaseRecheckIntervalMs()); Thread.sleep(fsnamesystem.getLeaseRecheckIntervalMs());
} catch(InterruptedException ie) { } catch(InterruptedException ie) {
if (LOG.isDebugEnabled()) { LOG.debug("{} is interrupted", name, ie);
LOG.debug(name + " is interrupted", ie);
}
} catch(Throwable e) { } catch(Throwable e) {
LOG.warn("Unexpected throwable: ", e); LOG.warn("Unexpected throwable: ", e);
} }
@ -537,7 +532,7 @@ synchronized boolean checkLeases() {
sortedLeases.first().expiredHardLimit() sortedLeases.first().expiredHardLimit()
&& !isMaxLockHoldToReleaseLease(start)) { && !isMaxLockHoldToReleaseLease(start)) {
Lease leaseToCheck = sortedLeases.first(); Lease leaseToCheck = sortedLeases.first();
LOG.info(leaseToCheck + " has expired hard limit"); LOG.info("{} has expired hard limit", leaseToCheck);
final List<Long> removing = new ArrayList<>(); final List<Long> removing = new ArrayList<>();
// need to create a copy of the oldest lease files, because // need to create a copy of the oldest lease files, because
@ -568,16 +563,16 @@ synchronized boolean checkLeases() {
completed = fsnamesystem.internalReleaseLease( completed = fsnamesystem.internalReleaseLease(
leaseToCheck, p, iip, newHolder); leaseToCheck, p, iip, newHolder);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Cannot release the path " + p + " in the lease " LOG.warn("Cannot release the path {} in the lease {}. It will be "
+ leaseToCheck + ". It will be retried.", e); + "retried.", p, leaseToCheck, e);
continue; continue;
} }
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
if (completed) { if (completed) {
LOG.debug("Lease recovery for inode " + id + " is complete. " + LOG.debug("Lease recovery for inode {} is complete. File closed"
"File closed."); + ".", id);
} else { } else {
LOG.debug("Started block recovery " + p + " lease " + leaseToCheck); LOG.debug("Started block recovery {} lease {}", p, leaseToCheck);
} }
} }
// If a lease recovery happened, we need to sync later. // If a lease recovery happened, we need to sync later.
@ -585,13 +580,13 @@ synchronized boolean checkLeases() {
needSync = true; needSync = true;
} }
} catch (IOException e) { } catch (IOException e) {
LOG.warn("Removing lease with an invalid path: " + p + "," LOG.warn("Removing lease with an invalid path: {},{}", p,
+ leaseToCheck, e); leaseToCheck, e);
removing.add(id); removing.add(id);
} }
if (isMaxLockHoldToReleaseLease(start)) { if (isMaxLockHoldToReleaseLease(start)) {
LOG.debug("Breaking out of checkLeases after " + LOG.debug("Breaking out of checkLeases after {} ms.",
fsnamesystem.getMaxLockHoldToReleaseLeaseMs() + "ms."); fsnamesystem.getMaxLockHoldToReleaseLeaseMs());
break; break;
} }
} }

View File

@ -19,14 +19,13 @@
package org.apache.hadoop.hdfs.server.protocol; package org.apache.hadoop.hdfs.server.protocol;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.KerberosInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** An inter-datanode protocol for updating generation stamp /** An inter-datanode protocol for updating generation stamp
*/ */
@ -35,7 +34,7 @@
clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private @InterfaceAudience.Private
public interface InterDatanodeProtocol { public interface InterDatanodeProtocol {
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class); Logger LOG = LoggerFactory.getLogger(InterDatanodeProtocol.class.getName());
/** /**
* Until version 9, this class InterDatanodeProtocol served as both * Until version 9, this class InterDatanodeProtocol served as both

View File

@ -45,7 +45,8 @@ public class TestDatanodeDeath {
DFSTestUtil.setNameNodeLogLevel(Level.ALL); DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL); GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL); GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL); GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, org.slf4j
.event.Level.TRACE);
} }
static final int blockSize = 8192; static final int blockSize = 8192;

View File

@ -58,7 +58,8 @@ public class TestFileAppend3 {
DFSTestUtil.setNameNodeLogLevel(Level.ALL); DFSTestUtil.setNameNodeLogLevel(Level.ALL);
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL); GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL); GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, Level.ALL); GenericTestUtils.setLogLevel(InterDatanodeProtocol.LOG, org.slf4j
.event.Level.TRACE);
} }
static final long BLOCK_SIZE = 64 * 1024; static final long BLOCK_SIZE = 64 * 1024;