diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index aae56dd98d..33493ce6e1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -984,7 +984,10 @@ private void sortByDistance(Node reader, T[] nodes, int idx = 0; for (List list: tree.values()) { if (list != null) { - secondarySort.accept(list); + Collections.shuffle(list, r); + if (secondarySort != null) { + secondarySort.accept(list); + } for (T n: list) { nodes[idx] = n; idx++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index f5cf1d5744..c26dd90988 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -548,8 +548,7 @@ private void sortLocatedBlock(final LocatedBlock lb, String targetHost, } private Consumer> createSecondaryNodeSorter() { - Consumer> secondarySort = - list -> Collections.shuffle(list); + Consumer> secondarySort = null; if (readConsiderLoad) { Comparator comp = Comparator.comparingInt(DatanodeInfo::getXceiverCount); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index e8e6b949f1..ef9758aa88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -28,11 +28,13 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Random; +import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -600,6 +602,72 @@ public void testGetBlockLocationConsiderLoad() assertEquals(locs[0].getIpAddr(), sortedLocs2[4].getIpAddr()); } + @Test + public void testGetBlockLocationConsiderLoadWithNodesOfSameDistance() + throws IOException, URISyntaxException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_READ_CONSIDERLOAD_KEY, true); + conf.setBoolean( + DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + Mockito.when(fsn.hasWriteLock()).thenReturn(true); + URL shellScript = getClass() + .getResource("/" + Shell.appendScriptExtension("topology-script")); + Path resourcePath = Paths.get(shellScript.toURI()); + FileUtil.setExecutable(resourcePath.toFile(), true); + conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, + resourcePath.toString()); + DatanodeManager dm = mockDatanodeManager(fsn, conf); + + int totalDNs = 5; + // Register 5 datanodes and 2 nodes per rack with different load. + DatanodeInfo[] locs = new DatanodeInfo[totalDNs]; + String[] storageIDs = new String[totalDNs]; + for (int i = 0; i < totalDNs; i++) { + // Register new datanode. + String uuid = "UUID-" + i; + String ip = "IP-" + i / 2 + "-" + i; + DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class); + Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid); + Mockito.when(dr.getIpAddr()).thenReturn(ip); + dm.registerDatanode(dr); + + // Get location and storage information. + locs[i] = dm.getDatanode(uuid); + storageIDs[i] = "storageID-" + i; + + // Set load for datanodes. + locs[i].setXceiverCount(2); + } + + // Set node 0 decommissioned. + locs[0].setDecommissioned(); + + // Create LocatedBlock with above locations. + ExtendedBlock b = new ExtendedBlock("somePoolID", 1234); + LocatedBlock block = new LocatedBlock(b, locs); + List blocks = new ArrayList<>(); + blocks.add(block); + + // Test client not in cluster but same rack with locs[3]. + // Number of iterations to do the test + int numIterations = 100; + + Set ipSet = new HashSet<>(); + final String targetIpNotInCluster = locs[3].getIpAddr() + "-client"; + for (int i = 0; i < numIterations; i++) { + dm.sortLocatedBlocks(targetIpNotInCluster, blocks); + DatanodeInfo[] sortedLocs = block.getLocations(); + assertEquals(totalDNs, sortedLocs.length); + if (!ipSet.contains(sortedLocs[0].getIpAddr())) { + ipSet.add(sortedLocs[0].getIpAddr()); + } + } + + // when the two nodes' distance and weight are same, they are same close. + assertEquals(2, ipSet.size()); + } + /** * Test whether removing a host from the includes list without adding it to * the excludes list will exclude it from data node reports.