HDFS-10560. DiskBalancer: Reuse ObjectMapper instance to improve the performance. Contributed by Yiqun Lin.

This commit is contained in:
Anu Engineer 2016-08-16 10:20:08 -07:00
parent b427ce12bc
commit b047bc7270
7 changed files with 46 additions and 32 deletions

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.htrace.fasterxml.jackson.annotation.JsonInclude;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import java.io.IOException;
@ -34,6 +35,10 @@
@InterfaceStability.Unstable
@JsonInclude(JsonInclude.Include.NON_DEFAULT)
public class DiskBalancerWorkItem {
private static final ObjectMapper MAPPER = new ObjectMapper();
private static final ObjectReader READER =
new ObjectMapper().reader(DiskBalancerWorkItem.class);
private long startTime;
private long secondsElapsed;
private long bytesToCopy;
@ -74,8 +79,7 @@ public DiskBalancerWorkItem(long bytesToCopy, long bytesCopied) {
*/
public static DiskBalancerWorkItem parseJson(String json) throws IOException {
Preconditions.checkNotNull(json);
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(json, DiskBalancerWorkItem.class);
return READER.readValue(json);
}
/**
@ -169,8 +173,7 @@ public void incBlocksCopied() {
* @throws IOException
*/
public String toJson() throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(this);
return MAPPER.writeValueAsString(this);
}
/**

View File

@ -24,6 +24,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import org.codehaus.jackson.map.SerializationConfig;
import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
@ -38,6 +39,15 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DiskBalancerWorkStatus {
private static final ObjectMapper MAPPER = new ObjectMapper();
private static final ObjectMapper MAPPER_WITH_INDENT_OUTPUT =
new ObjectMapper().enable(
SerializationConfig.Feature.INDENT_OUTPUT);
private static final ObjectReader READER_WORKSTATUS =
new ObjectMapper().reader(DiskBalancerWorkStatus.class);
private static final ObjectReader READER_WORKENTRY = new ObjectMapper()
.reader(defaultInstance().constructCollectionType(List.class,
DiskBalancerWorkEntry.class));
private final List<DiskBalancerWorkEntry> currentState;
private Result result;
@ -92,10 +102,7 @@ public DiskBalancerWorkStatus(Result result, String planID, String planFile,
this.result = result;
this.planID = planID;
this.planFile = planFile;
ObjectMapper mapper = new ObjectMapper();
this.currentState = mapper.readValue(currentState,
defaultInstance().constructCollectionType(
List.class, DiskBalancerWorkEntry.class));
this.currentState = READER_WORKENTRY.readValue(currentState);
}
@ -141,15 +148,11 @@ public List<DiskBalancerWorkEntry> getCurrentState() {
* @throws IOException
**/
public String currentStateString() throws IOException {
ObjectMapper mapper = new ObjectMapper();
mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT);
return mapper.writeValueAsString(currentState);
return MAPPER_WITH_INDENT_OUTPUT.writeValueAsString(currentState);
}
public String toJsonString() throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(this);
return MAPPER.writeValueAsString(this);
}
/**
@ -160,8 +163,7 @@ public String toJsonString() throws IOException {
*/
public static DiskBalancerWorkStatus parseJson(String json) throws
IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(json, DiskBalancerWorkStatus.class);
return READER_WORKSTATUS.readValue(json);
}

View File

@ -34,8 +34,8 @@
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.util.Time;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -263,8 +263,7 @@ public String getVolumeNames() throws DiskBalancerException {
for (Map.Entry<String, FsVolumeSpi> entry : volMap.entrySet()) {
pathMap.put(entry.getKey(), entry.getValue().getBasePath());
}
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(pathMap);
return JsonUtil.toJsonString(pathMap);
} catch (DiskBalancerException ex) {
throw ex;
} catch (IOException e) {

View File

@ -44,6 +44,7 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -69,6 +70,8 @@
* Common interface for command handling.
*/
public abstract class Command extends Configured {
private static final ObjectReader READER =
new ObjectMapper().reader(HashMap.class);
static final Logger LOG = LoggerFactory.getLogger(Command.class);
private Map<String, String> validArgs = new HashMap<>();
private URI clusterURI;
@ -441,11 +444,10 @@ protected void populatePathNames(
ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
String volumeNameJson = dnClient.getDiskBalancerSetting(
DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
ObjectMapper mapper = new ObjectMapper();
@SuppressWarnings("unchecked")
Map<String, String> volumeMap =
mapper.readValue(volumeNameJson, HashMap.class);
READER.readValue(volumeNameJson);
for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
for (DiskBalancerVolume vol : set.getVolumes()) {
if (volumeMap.containsKey(vol.getUuid())) {

View File

@ -18,12 +18,14 @@
package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import java.io.File;
import java.net.URL;
@ -35,6 +37,8 @@
public class JsonNodeConnector implements ClusterConnector {
private static final Logger LOG =
LoggerFactory.getLogger(JsonNodeConnector.class);
private static final ObjectReader READER =
new ObjectMapper().reader(DiskBalancerCluster.class);
private final URL clusterURI;
/**
@ -56,9 +60,7 @@ public List<DiskBalancerDataNode> getNodes() throws Exception {
Preconditions.checkNotNull(this.clusterURI);
String dataFilePath = this.clusterURI.getPath();
LOG.info("Reading cluster info from file : " + dataFilePath);
ObjectMapper mapper = new ObjectMapper();
DiskBalancerCluster cluster =
mapper.readValue(new File(dataFilePath), DiskBalancerCluster.class);
DiskBalancerCluster cluster = READER.readValue(new File(dataFilePath));
String message = String.format("Found %d node(s)",
cluster.getNodes().size());
LOG.info(message);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -25,9 +26,11 @@
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.PlannerFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import java.io.File;
import java.io.IOException;
@ -69,6 +72,8 @@ public class DiskBalancerCluster {
private static final Logger LOG =
LoggerFactory.getLogger(DiskBalancerCluster.class);
private static final ObjectReader READER =
new ObjectMapper().reader(DiskBalancerCluster.class);
private final Set<String> exclusionList;
private final Set<String> inclusionList;
private ClusterConnector clusterConnector;
@ -118,8 +123,7 @@ public DiskBalancerCluster(ClusterConnector connector) throws IOException {
* @throws IOException
*/
public static DiskBalancerCluster parseJson(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(json, DiskBalancerCluster.class);
return READER.readValue(json);
}
/**
@ -232,8 +236,7 @@ public void setInclusionList(Set<String> includeNodes) {
* @throws IOException
*/
public String toJson() throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(this);
return JsonUtil.toJsonString(this);
}
/**

View File

@ -19,9 +19,11 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.codehaus.jackson.annotate.JsonIgnore;
import org.codehaus.jackson.annotate.JsonIgnoreProperties;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.map.ObjectReader;
import java.io.IOException;
@ -30,6 +32,9 @@
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class DiskBalancerVolume {
private static final ObjectReader READER =
new ObjectMapper().reader(DiskBalancerVolume.class);
private String path;
private long capacity;
private String storageType;
@ -58,8 +63,7 @@ public DiskBalancerVolume() {
* @throws IOException
*/
public static DiskBalancerVolume parseJson(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(json, DiskBalancerVolume.class);
return READER.readValue(json);
}
/**
@ -305,8 +309,7 @@ public long computeEffectiveCapacity() {
* @throws IOException
*/
public String toJson() throws IOException {
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(this);
return JsonUtil.toJsonString(this);
}
/**