HDFS-9817. Use SLF4J in new classes. Contributed by Anu Engineer

This commit is contained in:
Anu Engineer 2016-03-04 20:16:13 -08:00 committed by Arpit Agarwal
parent 3df0781aa7
commit 747227e9de
8 changed files with 47 additions and 41 deletions

View File

@ -20,8 +20,8 @@
import com.google.common.base.Preconditions;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -60,7 +60,8 @@
@InterfaceAudience.Private
public class DiskBalancer {
private static final Log LOG = LogFactory.getLog(DiskBalancer.class);
private static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
.class);
private final FsDatasetSpi<?> dataset;
private final String dataNodeUUID;
private final BlockMover blockMover;

View File

@ -16,8 +16,8 @@
*/
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
@ -28,7 +28,8 @@
* Connector factory creates appropriate connector based on the URL.
*/
public final class ConnectorFactory {
static final Log LOG = LogFactory.getLog(ConnectorFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(ConnectorFactory.class);
/**
* Constructs an appropriate connector based on the URL.
@ -37,13 +38,13 @@ public final class ConnectorFactory {
*/
public static ClusterConnector getCluster(URI clusterURI, Configuration
conf) throws IOException, URISyntaxException {
LOG.info("Cluster URI : " + clusterURI);
LOG.info("scheme : " + clusterURI.getScheme());
LOG.debug("Cluster URI : {}" , clusterURI);
LOG.debug("scheme : {}" , clusterURI.getScheme());
if (clusterURI.getScheme().startsWith("file")) {
LOG.info("Creating a JsonNodeConnector");
LOG.debug("Creating a JsonNodeConnector");
return new JsonNodeConnector(clusterURI.toURL());
} else {
LOG.info("Creating NameNode connector");
LOG.debug("Creating NameNode connector");
return new DBNameNodeConnector(clusterURI, conf);
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -42,7 +42,8 @@
* given cluster.
*/
class DBNameNodeConnector implements ClusterConnector {
static final Log LOG = LogFactory.getLog(DBNameNodeConnector.class);
private static final Logger LOG =
LoggerFactory.getLogger(DBNameNodeConnector.class);
static final Path DISKBALANCER_ID_PATH = new Path("/system/diskbalancer.id");
private final URI clusterURI;
private final NameNodeConnector connector;
@ -159,4 +160,4 @@ private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node,
}
}
}
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
@ -33,7 +33,8 @@
* A connector that understands JSON data cluster models.
*/
public class JsonNodeConnector implements ClusterConnector {
static final Log LOG = LogFactory.getLog(JsonNodeConnector.class);
private static final Logger LOG =
LoggerFactory.getLogger(JsonNodeConnector.class);
private final URL clusterURI;
/**

View File

@ -19,8 +19,8 @@
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
@ -66,7 +66,8 @@
@JsonIgnoreProperties(ignoreUnknown = true)
public class DiskBalancerCluster {
static final Log LOG = LogFactory.getLog(DiskBalancerCluster.class);
private static final Logger LOG =
LoggerFactory.getLogger(DiskBalancerCluster.class);
private final Set<String> exclusionList;
private final Set<String> inclusionList;
private ClusterConnector clusterConnector;
@ -264,7 +265,7 @@ public void createSnapshot(String snapShotName) throws IOException {
*/
public void createOutPutDirectory() throws IOException {
if (Files.exists(Paths.get(this.getOutput()))) {
LOG.fatal("An output directory already exists at this location. Path : " +
LOG.error("An output directory already exists at this location. Path : " +
this.getOutput());
throw new IOException(
"An output directory already exists at this location. Path : " +
@ -273,7 +274,7 @@ public void createOutPutDirectory() throws IOException {
File f = new File(this.getOutput());
if (!f.mkdirs()) {
LOG.fatal("Unable to create the output directory. Path : " + this
LOG.error("Unable to create the output directory. Path : " + this
.getOutput());
throw new IOException(
"Unable to create the output directory. Path : " + this.getOutput());

View File

@ -19,8 +19,8 @@
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
@ -40,7 +40,8 @@
*/
@JsonIgnoreProperties({"sortedQueue", "volumeCount", "idealUsed"})
public class DiskBalancerVolumeSet {
static final Log LOG = LogFactory.getLog(DiskBalancerVolumeSet.class);
private static final Logger LOG =
LoggerFactory.getLogger(DiskBalancerVolumeSet.class);
private final int maxDisks = 256;
@JsonProperty("transient")
@ -172,7 +173,7 @@ private void skipMisConfiguredVolume(DiskBalancerVolume volume) {
volume.getStorageType(),
volume.getUuid());
LOG.fatal(errMessage);
LOG.error(errMessage);
volume.setSkip(true);
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
@ -42,7 +42,8 @@ public class GreedyPlanner implements Planner {
public static final long MB = 1024L * 1024L;
public static final long GB = MB * 1024L;
public static final long TB = GB * 1024L;
static final Log LOG = LogFactory.getLog(GreedyPlanner.class);
private static final Logger LOG =
LoggerFactory.getLogger(GreedyPlanner.class);
private final float threshold;
/**
@ -108,13 +109,13 @@ public void balanceVolumeSet(DiskBalancerDataNode node,
if (!lowVolume.isSkip() && !highVolume.isSkip()) {
nextStep = computeMove(currentSet, lowVolume, highVolume);
} else {
LOG.debug("Skipping compute move. lowVolume :" + lowVolume.getPath());
LOG.debug("Skipping compute move. highVolume :" + highVolume.getPath());
LOG.debug("Skipping compute move. lowVolume: {} highVolume: {}",
lowVolume.getPath(), highVolume.getPath());
}
applyStep(nextStep, currentSet, lowVolume, highVolume);
if (nextStep != null) {
LOG.debug("Step : " + nextStep.toString());
LOG.debug("Step : {} ", nextStep.toString());
plan.addStep(nextStep);
}
}
@ -179,9 +180,8 @@ private Step computeMove(DiskBalancerVolumeSet currentSet,
// This disk cannot take any more data from any disk.
// Remove it from our computation matrix.
if (maxLowVolumeCanReceive <= 0) {
LOG.debug(lowVolume.getPath() +
" Skipping disk from computation. Maximum data size " +
"achieved.");
LOG.debug("{} Skipping disk from computation. Maximum data size " +
"achieved.", lowVolume.getPath());
lowVolume.setSkip(true);
}
@ -191,9 +191,8 @@ private Step computeMove(DiskBalancerVolumeSet currentSet,
// This volume cannot give any more data, remove it from the
// computation matrix
if (maxHighVolumeCanGive <= 0) {
LOG.debug(highVolume.getPath() +
" Skipping disk from computation. Minimum data size " +
"achieved.");
LOG.debug(" {} Skipping disk from computation. Minimum data size " +
"achieved.", highVolume.getPath());
highVolume.setSkip(true);
}

View File

@ -16,8 +16,8 @@
*/
package org.apache.hadoop.hdfs.server.diskbalancer.planner;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
@ -25,7 +25,8 @@
* Returns a planner based on the user defined tags.
*/
public final class PlannerFactory {
static final Log LOG = LogFactory.getLog(PlannerFactory.class);
private static final Logger LOG =
LoggerFactory.getLogger(PlannerFactory.class);
public static final String GREEDY_PLANNER = "greedyPlanner";