HDFS-7433. Optimize performance of DatanodeManager's node map. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2015-05-08 15:18:29 -05:00
parent 8f7c2364d7
commit 7a7960be41
2 changed files with 9 additions and 10 deletions

View File

@ -535,6 +535,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
DFSOutputStream#writeChunk (cmccabe) DFSOutputStream#writeChunk (cmccabe)
HDFS-7433. Optimize performance of DatanodeManager's node map.
(daryn via kihwal)
BUG FIXES BUG FIXES
HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

View File

@ -40,7 +40,6 @@
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.*; import org.apache.hadoop.net.*;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
@ -84,8 +83,8 @@ public class DatanodeManager {
* <p> * <p>
* Mapping: StorageID -> DatanodeDescriptor * Mapping: StorageID -> DatanodeDescriptor
*/ */
private final NavigableMap<String, DatanodeDescriptor> datanodeMap private final Map<String, DatanodeDescriptor> datanodeMap
= new TreeMap<String, DatanodeDescriptor>(); = new HashMap<String, DatanodeDescriptor>();
/** Cluster network topology */ /** Cluster network topology */
private final NetworkTopology networktopology; private final NetworkTopology networktopology;
@ -405,11 +404,6 @@ public void sortLocatedBlocks(final String targethost,
} }
} }
CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
final String firstkey) {
return new CyclicIteration<String, DatanodeDescriptor>(
datanodeMap, firstkey);
}
/** @return the datanode descriptor for the host. */ /** @return the datanode descriptor for the host. */
public DatanodeDescriptor getDatanodeByHost(final String host) { public DatanodeDescriptor getDatanodeByHost(final String host) {
@ -526,9 +520,10 @@ public DatanodeStorageInfo[] getDatanodeStorageInfos(
/** Prints information about all datanodes. */ /** Prints information about all datanodes. */
void datanodeDump(final PrintWriter out) { void datanodeDump(final PrintWriter out) {
synchronized (datanodeMap) { synchronized (datanodeMap) {
Map<String,DatanodeDescriptor> sortedDatanodeMap =
new TreeMap<String,DatanodeDescriptor>(datanodeMap);
out.println("Metasave: Number of datanodes: " + datanodeMap.size()); out.println("Metasave: Number of datanodes: " + datanodeMap.size());
for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) { for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
DatanodeDescriptor node = it.next();
out.println(node.dumpDatanode()); out.println(node.dumpDatanode());
} }
} }
@ -1289,6 +1284,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn)); foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
} }
} }
Collections.sort(nodes);
if (listDeadNodes) { if (listDeadNodes) {
for (InetSocketAddress addr : includedNodes) { for (InetSocketAddress addr : includedNodes) {