HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla .

This commit is contained in:
Kihwal Lee 2015-10-28 11:57:56 -05:00
parent 2d10cb8e00
commit 19a77f5466
3 changed files with 58 additions and 18 deletions

View File

@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9302. WebHDFS throws NullPointerException if newLength is not HDFS-9302. WebHDFS throws NullPointerException if newLength is not
provided. (Jagadesh Kiran N via yliu) provided. (Jagadesh Kiran N via yliu)
HDFS-9297. Decomissioned capacity should not be considered for
configured/used capacity (Contributed by Kuhu Shukla)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -45,19 +45,20 @@ class DatanodeStats {
private int expiredHeartbeats = 0; private int expiredHeartbeats = 0;
synchronized void add(final DatanodeDescriptor node) { synchronized void add(final DatanodeDescriptor node) {
capacityUsed += node.getDfsUsed();
blockPoolUsed += node.getBlockPoolUsed();
xceiverCount += node.getXceiverCount(); xceiverCount += node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
capacityUsed += node.getDfsUsed();
blockPoolUsed += node.getBlockPoolUsed();
nodesInService++; nodesInService++;
nodesInServiceXceiverCount += node.getXceiverCount(); nodesInServiceXceiverCount += node.getXceiverCount();
capacityTotal += node.getCapacity(); capacityTotal += node.getCapacity();
capacityRemaining += node.getRemaining(); capacityRemaining += node.getRemaining();
} else { cacheCapacity += node.getCacheCapacity();
capacityTotal += node.getDfsUsed(); cacheUsed += node.getCacheUsed();
} else if (!node.isDecommissioned()) {
cacheCapacity += node.getCacheCapacity();
cacheUsed += node.getCacheUsed();
} }
cacheCapacity += node.getCacheCapacity();
cacheUsed += node.getCacheUsed();
Set<StorageType> storageTypes = new HashSet<>(); Set<StorageType> storageTypes = new HashSet<>();
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
statsMap.addStorage(storageInfo, node); statsMap.addStorage(storageInfo, node);
@ -69,19 +70,20 @@ synchronized void add(final DatanodeDescriptor node) {
} }
synchronized void subtract(final DatanodeDescriptor node) { synchronized void subtract(final DatanodeDescriptor node) {
capacityUsed -= node.getDfsUsed();
blockPoolUsed -= node.getBlockPoolUsed();
xceiverCount -= node.getXceiverCount(); xceiverCount -= node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
capacityUsed -= node.getDfsUsed();
blockPoolUsed -= node.getBlockPoolUsed();
nodesInService--; nodesInService--;
nodesInServiceXceiverCount -= node.getXceiverCount(); nodesInServiceXceiverCount -= node.getXceiverCount();
capacityTotal -= node.getCapacity(); capacityTotal -= node.getCapacity();
capacityRemaining -= node.getRemaining(); capacityRemaining -= node.getRemaining();
} else { cacheCapacity -= node.getCacheCapacity();
capacityTotal -= node.getDfsUsed(); cacheUsed -= node.getCacheUsed();
} else if (!node.isDecommissioned()) {
cacheCapacity -= node.getCacheCapacity();
cacheUsed -= node.getCacheUsed();
} }
cacheCapacity -= node.getCacheCapacity();
cacheUsed -= node.getCacheUsed();
Set<StorageType> storageTypes = new HashSet<>(); Set<StorageType> storageTypes = new HashSet<>();
for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) {
statsMap.subtractStorage(storageInfo, node); statsMap.subtractStorage(storageInfo, node);

View File

@ -62,6 +62,7 @@
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -350,14 +351,13 @@ private void verifyStats(NameNode namenode, FSNamesystem fsn,
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
long[] newStats = namenode.getRpcServer().getStats(); long[] newStats = namenode.getRpcServer().getStats();
// For decommissioning nodes, ensure capacity of the DN is no longer // For decommissioning nodes, ensure capacity of the DN and dfsUsed
// counted. Only used space of the DN is counted in cluster capacity // is no longer counted towards total
assertEquals(newStats[0], assertEquals(newStats[0],
decommissioning ? info.getDfsUsed() : info.getCapacity()); decommissioning ? 0 : info.getCapacity());
// Ensure cluster used capacity is counted for both normal and // Ensure cluster used capacity is counted for normal nodes only
// decommissioning nodes assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
assertEquals(newStats[1], info.getDfsUsed());
// For decommissioning nodes, remaining space from the DN is not counted // For decommissioning nodes, remaining space from the DN is not counted
assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining()); assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
@ -1264,4 +1264,39 @@ public void nodeUsageVerification(int numDatanodes, long[] nodesCapacity,
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test
public void testUsedCapacity() throws Exception {
int numNamenodes = 1;
int numDatanodes = 2;
startCluster(numNamenodes,numDatanodes,conf);
cluster.waitActive();
FSNamesystem ns = cluster.getNamesystem(0);
BlockManager blockManager = ns.getBlockManager();
DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager()
.getDatanodeStatistics();
long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
//decommission one node
DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
long newUsedCapacity = datanodeStatistics.getCapacityUsed();
long newTotalCapacity = datanodeStatistics.getCapacityTotal();
long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
assertTrue("DfsUsedCapacity should not be the same after a node has " +
"been decommissioned!", initialUsedCapacity != newUsedCapacity);
assertTrue("TotalCapacity should not be the same after a node has " +
"been decommissioned!", initialTotalCapacity != newTotalCapacity);
assertTrue("BlockPoolUsed should not be the same after a node has " +
"been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
}
} }