HDFS-14913. Correct the value of available count in DFSNetworkTopology#chooseRandomWithStorageType(). Contributed by Ayush Saxena.
This commit is contained in:
parent
ff6a492d66
commit
74c2329fc3
@ -212,8 +212,7 @@ Node chooseRandomWithStorageType(final String scope,
|
|||||||
}
|
}
|
||||||
if (excludedNodes != null) {
|
if (excludedNodes != null) {
|
||||||
for (Node excludedNode : excludedNodes) {
|
for (Node excludedNode : excludedNodes) {
|
||||||
if (excludeRoot != null
|
if (excludeRoot != null && isNodeInScope(excludedNode, excludedScope)) {
|
||||||
&& excludedNode.getNetworkLocation().startsWith(excludedScope)) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (excludedNode instanceof DatanodeDescriptor) {
|
if (excludedNode instanceof DatanodeDescriptor) {
|
||||||
@ -259,6 +258,14 @@ Node chooseRandomWithStorageType(final String scope,
|
|||||||
return chosen;
|
return chosen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean isNodeInScope(Node node, String scope) {
|
||||||
|
if (!scope.endsWith("/")) {
|
||||||
|
scope += "/";
|
||||||
|
}
|
||||||
|
String nodeLocation = node.getNetworkLocation() + "/";
|
||||||
|
return nodeLocation.startsWith(scope);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Choose a random node that has the required storage type, under the given
|
* Choose a random node that has the required storage type, under the given
|
||||||
* root, with an excluded subtree root (could also just be a leaf node).
|
* root, with an excluded subtree root (could also just be a leaf node).
|
||||||
|
@ -581,4 +581,23 @@ public void testChooseRandomWithStorageTypeTwoTrial() throws Exception {
|
|||||||
assertTrue(dd.getHostName().equals("host7"));
|
assertTrue(dd.getHostName().equals("host7"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testChooseRandomWithStorageTypeNoAvlblNode() {
|
||||||
|
DFSNetworkTopology dfsCluster =
|
||||||
|
DFSNetworkTopology.getInstance(new Configuration());
|
||||||
|
final String[] racks = {"/default/rack1", "/default/rack10"};
|
||||||
|
final String[] hosts = {"host1", "host2"};
|
||||||
|
final StorageType[] types = {StorageType.DISK, StorageType.DISK};
|
||||||
|
final DatanodeStorageInfo[] storages =
|
||||||
|
DFSTestUtil.createDatanodeStorageInfos(2, racks, hosts, types);
|
||||||
|
DatanodeDescriptor[] dns = DFSTestUtil.toDatanodeDescriptor(storages);
|
||||||
|
dfsCluster.add(dns[0]);
|
||||||
|
dfsCluster.add(dns[1]);
|
||||||
|
HashSet<Node> excluded = new HashSet<>();
|
||||||
|
excluded.add(dns[1]);
|
||||||
|
Node n = dfsCluster.chooseRandomWithStorageType("/default",
|
||||||
|
"/default/rack1", excluded, StorageType.DISK);
|
||||||
|
assertNull("No node should have been selected.", n);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user