HDFS-11419. DFSTopologyNodeImpl#chooseRandom optimizations. Contributed by Chen Liang.

This commit is contained in:
Chen Liang 2017-03-15 12:28:22 -07:00 committed by Arpit Agarwal
parent 221783b35b
commit 615ac09499
2 changed files with 137 additions and 48 deletions

View File

@ -27,7 +27,6 @@
import java.util.EnumMap; import java.util.EnumMap;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map;
/** /**
* The HDFS-specific representation of a network topology inner node. The * The HDFS-specific representation of a network topology inner node. The
@ -76,32 +75,58 @@ public InnerNodeImpl newInnerNode(String path) {
private final HashMap private final HashMap
<String, EnumMap<StorageType, Integer>> childrenStorageInfo; <String, EnumMap<StorageType, Integer>> childrenStorageInfo;
/**
* This map stores storage type counts of the subtree. We can always get this
* info by iterate over the childrenStorageInfo variable. But for optimization
* purpose, we store this info directly to avoid the iteration.
*/
private final EnumMap<StorageType, Integer> storageTypeCounts;
DFSTopologyNodeImpl(String path) { DFSTopologyNodeImpl(String path) {
super(path); super(path);
childrenStorageInfo = new HashMap<>(); childrenStorageInfo = new HashMap<>();
storageTypeCounts = new EnumMap<>(StorageType.class);
} }
DFSTopologyNodeImpl( DFSTopologyNodeImpl(
String name, String location, InnerNode parent, int level) { String name, String location, InnerNode parent, int level) {
super(name, location, parent, level); super(name, location, parent, level);
childrenStorageInfo = new HashMap<>(); childrenStorageInfo = new HashMap<>();
storageTypeCounts = new EnumMap<>(StorageType.class);
} }
public int getSubtreeStorageCount(StorageType type) { public int getSubtreeStorageCount(StorageType type) {
int res = 0; if (storageTypeCounts.containsKey(type)) {
for (Map.Entry<String, EnumMap<StorageType, Integer>> entry : return storageTypeCounts.get(type);
childrenStorageInfo.entrySet()) { } else {
if (entry.getValue().containsKey(type)) { return 0;
res += entry.getValue().get(type);
}
} }
return res;
} }
int getNumOfChildren() { int getNumOfChildren() {
return children.size(); return children.size();
} }
private void incStorageTypeCount(StorageType type) {
// no locking because the caller is synchronized already
if (storageTypeCounts.containsKey(type)) {
storageTypeCounts.put(type, storageTypeCounts.get(type)+1);
} else {
storageTypeCounts.put(type, 1);
}
}
private void decStorageTypeCount(StorageType type) {
// no locking because the caller is synchronized already
int current = storageTypeCounts.get(type);
current -= 1;
if (current == 0) {
storageTypeCounts.remove(type);
} else {
storageTypeCounts.put(type, current);
}
}
@Override @Override
public boolean add(Node n) { public boolean add(Node n) {
if (!isAncestor(n)) { if (!isAncestor(n)) {
@ -130,14 +155,13 @@ public boolean add(Node n) {
} }
children.add(n); children.add(n);
numOfLeaves++; numOfLeaves++;
synchronized (childrenStorageInfo) { if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) {
if (!childrenStorageInfo.containsKey(dnDescriptor.getName())) { childrenStorageInfo.put(
childrenStorageInfo.put( dnDescriptor.getName(), new EnumMap<>(StorageType.class));
dnDescriptor.getName(), new EnumMap<>(StorageType.class)); }
} for (StorageType st : dnDescriptor.getStorageTypes()) {
for (StorageType st : dnDescriptor.getStorageTypes()) { childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1);
childrenStorageInfo.get(dnDescriptor.getName()).put(st, 1); incStorageTypeCount(st);
}
} }
return true; return true;
} else { } else {
@ -153,25 +177,26 @@ public boolean add(Node n) {
// add n to the subtree of the next ancestor node // add n to the subtree of the next ancestor node
if (parentNode.add(n)) { if (parentNode.add(n)) {
numOfLeaves++; numOfLeaves++;
synchronized (childrenStorageInfo) { if (!childrenStorageInfo.containsKey(parentNode.getName())) {
if (!childrenStorageInfo.containsKey(parentNode.getName())) { childrenStorageInfo.put(
childrenStorageInfo.put( parentNode.getName(), new EnumMap<>(StorageType.class));
parentNode.getName(), new EnumMap<>(StorageType.class)); for (StorageType st : dnDescriptor.getStorageTypes()) {
for (StorageType st : dnDescriptor.getStorageTypes()) { childrenStorageInfo.get(parentNode.getName()).put(st, 1);
childrenStorageInfo.get(parentNode.getName()).put(st, 1); }
} } else {
} else { EnumMap<StorageType, Integer> currentCount =
EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
childrenStorageInfo.get(parentNode.getName()); for (StorageType st : dnDescriptor.getStorageTypes()) {
for (StorageType st : dnDescriptor.getStorageTypes()) { if (currentCount.containsKey(st)) {
if (currentCount.containsKey(st)) { currentCount.put(st, currentCount.get(st) + 1);
currentCount.put(st, currentCount.get(st) + 1); } else {
} else { currentCount.put(st, 1);
currentCount.put(st, 1);
}
} }
} }
} }
for (StorageType st : dnDescriptor.getStorageTypes()) {
incStorageTypeCount(st);
}
return true; return true;
} else { } else {
return false; return false;
@ -220,8 +245,9 @@ public boolean remove(Node n) {
if (children.get(i).getName().equals(n.getName())) { if (children.get(i).getName().equals(n.getName())) {
children.remove(i); children.remove(i);
childrenMap.remove(n.getName()); childrenMap.remove(n.getName());
synchronized (childrenStorageInfo) { childrenStorageInfo.remove(dnDescriptor.getName());
childrenStorageInfo.remove(dnDescriptor.getName()); for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
} }
numOfLeaves--; numOfLeaves--;
n.setParent(null); n.setParent(null);
@ -242,20 +268,21 @@ public boolean remove(Node n) {
boolean isRemoved = parentNode.remove(n); boolean isRemoved = parentNode.remove(n);
if (isRemoved) { if (isRemoved) {
// if the parent node has no children, remove the parent node too // if the parent node has no children, remove the parent node too
synchronized (childrenStorageInfo) { EnumMap<StorageType, Integer> currentCount =
EnumMap<StorageType, Integer> currentCount = childrenStorageInfo.get(parentNode.getName());
childrenStorageInfo.get(parentNode.getName()); EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class);
EnumSet<StorageType> toRemove = EnumSet.noneOf(StorageType.class); for (StorageType st : dnDescriptor.getStorageTypes()) {
for (StorageType st : dnDescriptor.getStorageTypes()) { int newCount = currentCount.get(st) - 1;
int newCount = currentCount.get(st) - 1; if (newCount == 0) {
if (newCount == 0) { toRemove.add(st);
toRemove.add(st);
}
currentCount.put(st, newCount);
}
for (StorageType st : toRemove) {
currentCount.remove(st);
} }
currentCount.put(st, newCount);
}
for (StorageType st : toRemove) {
currentCount.remove(st);
}
for (StorageType st : dnDescriptor.getStorageTypes()) {
decStorageTypeCount(st);
} }
if (parentNode.getNumOfChildren() == 0) { if (parentNode.getNumOfChildren() == 0) {
for(int i=0; i < children.size(); i++) { for(int i=0; i < children.size(); i++) {

View File

@ -229,7 +229,6 @@ public void testAddAndRemoveTopology() throws Exception {
assertEquals(1, (int)l1info.get("d2").get(StorageType.DISK)); assertEquals(1, (int)l1info.get("d2").get(StorageType.DISK));
assertEquals(2, (int)l1info.get("d3").get(StorageType.SSD)); assertEquals(2, (int)l1info.get("d3").get(StorageType.SSD));
for (int i = 0; i<4; i++) { for (int i = 0; i<4; i++) {
CLUSTER.remove(newDD[i]); CLUSTER.remove(newDD[i]);
} }
@ -446,4 +445,67 @@ public void testNonExistingNode() throws Exception {
"/l100/d100/r100", null, null, StorageType.DISK); "/l100/d100/r100", null, null, StorageType.DISK);
assertNull(n); assertNull(n);
} }
/**
* Tests getting subtree storage counts, and see whether it is correct when
* we update subtree.
* @throws Exception
*/
@Test
public void testGetSubtreeStorageCount() throws Exception {
// add and remove a node to rack /l2/d3/r1. So all the inner nodes /l2,
// /l2/d3 and /l2/d3/r1 should be affected. /l2/d3/r3 should still be the
// same, only checked as a reference
Node l2 = CLUSTER.getNode("/l2");
Node l2d3 = CLUSTER.getNode("/l2/d3");
Node l2d3r1 = CLUSTER.getNode("/l2/d3/r1");
Node l2d3r3 = CLUSTER.getNode("/l2/d3/r3");
assertTrue(l2 instanceof DFSTopologyNodeImpl);
assertTrue(l2d3 instanceof DFSTopologyNodeImpl);
assertTrue(l2d3r1 instanceof DFSTopologyNodeImpl);
assertTrue(l2d3r3 instanceof DFSTopologyNodeImpl);
DFSTopologyNodeImpl innerl2 = (DFSTopologyNodeImpl)l2;
DFSTopologyNodeImpl innerl2d3 = (DFSTopologyNodeImpl)l2d3;
DFSTopologyNodeImpl innerl2d3r1 = (DFSTopologyNodeImpl)l2d3r1;
DFSTopologyNodeImpl innerl2d3r3 = (DFSTopologyNodeImpl)l2d3r3;
assertEquals(4,
innerl2.getSubtreeStorageCount(StorageType.DISK));
assertEquals(2,
innerl2d3.getSubtreeStorageCount(StorageType.DISK));
assertEquals(1,
innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
assertEquals(1,
innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
DatanodeStorageInfo storageInfo =
DFSTestUtil.createDatanodeStorageInfo("StorageID",
"1.2.3.4", "/l2/d3/r1", "newhost");
DatanodeDescriptor newNode = storageInfo.getDatanodeDescriptor();
CLUSTER.add(newNode);
// after adding a storage to /l2/d3/r1, ancestor inner node should have
// DISK count incremented by 1.
assertEquals(5,
innerl2.getSubtreeStorageCount(StorageType.DISK));
assertEquals(3,
innerl2d3.getSubtreeStorageCount(StorageType.DISK));
assertEquals(2,
innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
assertEquals(1,
innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
CLUSTER.remove(newNode);
assertEquals(4,
innerl2.getSubtreeStorageCount(StorageType.DISK));
assertEquals(2,
innerl2d3.getSubtreeStorageCount(StorageType.DISK));
assertEquals(1,
innerl2d3r1.getSubtreeStorageCount(StorageType.DISK));
assertEquals(1,
innerl2d3r3.getSubtreeStorageCount(StorageType.DISK));
}
} }