From 10ab7d595ece59f2d00b406ba8812c6295a4187f Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 22 Sep 2015 12:25:35 +0530 Subject: [PATCH] HDFS-8780. Fetching live/dead datanode list with arg true for removeDecommissionNode,returns list with decom node. (Contributed by J.Andreina) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../blockmanagement/DatanodeManager.java | 45 ++++--------------- .../hdfs/server/namenode/FSNamesystem.java | 8 ++-- .../apache/hadoop/hdfs/TestDecommission.java | 43 ++++++++++++++++++ 4 files changed, 58 insertions(+), 41 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44774a7c8c..af7118a431 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1378,6 +1378,9 @@ Release 2.8.0 - UNRELEASED HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9) + HDFS-8780. Fetching live/dead datanode list with arg true for remove- + DecommissionNode,returns list with decom node. (J.Andreina via vinayakumab) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 31149372bb..a484fccf9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -788,45 +788,16 @@ private List getNetworkDependencies(DatanodeInfo node) } /** - * Remove an already decommissioned data node who is neither in include nor - * exclude hosts lists from the the list of live or dead nodes. This is used - * to not display an already decommssioned data node to the operators. - * The operation procedure of making a already decommissioned data node not - * to be displayed is as following: - *
    - *
  1. - * Host must have been in the include hosts list and the include hosts list - * must not be empty. - *
  2. - *
  3. - * Host is decommissioned by remaining in the include hosts list and added - * into the exclude hosts list. Name node is updated with the new - * information by issuing dfsadmin -refreshNodes command. - *
  4. - *
  5. - * Host is removed from both include hosts and exclude hosts lists. Name - * node is updated with the new informationby issuing dfsamin -refreshNodes - * command. - *
  6. - *
- * - * @param nodeList - * , array list of live or dead nodes. + * Remove decommissioned datanode from the the list of live or dead nodes. + * This is used to not to display a decommissioned datanode to the operators. + * @param nodeList , array list of live or dead nodes. */ - private void removeDecomNodeFromList(final List nodeList) { - // If the include list is empty, any nodes are welcomed and it does not - // make sense to exclude any nodes from the cluster. Therefore, no remove. - if (!hostFileManager.hasIncludes()) { - return; - } - - for (Iterator it = nodeList.iterator(); it.hasNext();) { + private void removeDecomNodeFromList( + final List nodeList) { + Iterator it=null; + for (it = nodeList.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); - if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node)) - && node.isDecommissioned()) { - // Include list is not empty, an existing datanode does not appear - // in both include or exclude lists and it has been decommissioned. - // Remove it from the node list. + if (node.isDecommissioned()) { it.remove(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 75b6be90c3..4a9d13b494 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5073,7 +5073,7 @@ public int getNumDecomDeadDataNodes() { @Override // FSNamesystemMBean public int getVolumeFailuresTotal() { List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); int volumeFailuresTotal = 0; for (DatanodeDescriptor node: live) { volumeFailuresTotal += node.getVolumeFailures(); @@ -5084,7 +5084,7 @@ public int getVolumeFailuresTotal() { @Override // FSNamesystemMBean public long getEstimatedCapacityLostTotal() { List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); long estimatedCapacityLostTotal = 0; for (DatanodeDescriptor node: live) { VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary(); @@ -5891,7 +5891,7 @@ public String getLiveNodes() { final Map> info = new HashMap>(); final List live = new ArrayList(); - blockManager.getDatanodeManager().fetchDatanodes(live, null, true); + blockManager.getDatanodeManager().fetchDatanodes(live, null, false); for (DatanodeDescriptor node : live) { ImmutableMap.Builder innerinfo = ImmutableMap.builder(); @@ -5939,7 +5939,7 @@ public String getDeadNodes() { final Map> info = new HashMap>(); final List dead = new ArrayList(); - blockManager.getDatanodeManager().fetchDatanodes(null, dead, true); + blockManager.getDatanodeManager().fetchDatanodes(null, dead, false); for (DatanodeDescriptor node : dead) { Map innerinfo = ImmutableMap.builder() .put("lastContact", getLastContact(node)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 7c30361ee2..c1fdd2527e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -1128,6 +1128,49 @@ private void assertTrackedAndPending(DecommissionManager decomManager, decomManager.getNumPendingNodes()); } + /** + * Fetching Live DataNodes by passing removeDecommissionedNode value as + * false- returns LiveNodeList with Node in Decommissioned state + * true - returns LiveNodeList without Node in Decommissioned state + * @throws InterruptedException + */ + @Test + public void testCountOnDecommissionedNodeList() throws IOException{ + conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); + try { + cluster = + new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1)) + .numDataNodes(1).build(); + cluster.waitActive(); + DFSClient client = getDfsClient(cluster.getNameNode(0), conf); + validateCluster(client, 1); + + ArrayList> namenodeDecomList = + new ArrayList>(1); + namenodeDecomList.add(0, new ArrayList(1)); + + // Move datanode1 to Decommissioned state + ArrayList decommissionedNode = namenodeDecomList.get(0); + decommissionNode(0, null, + decommissionedNode, AdminStates.DECOMMISSIONED); + + FSNamesystem ns = cluster.getNamesystem(0); + DatanodeManager datanodeManager = + ns.getBlockManager().getDatanodeManager(); + List live = new ArrayList(); + // fetchDatanode with false should return livedecommisioned node + datanodeManager.fetchDatanodes(live, null, false); + assertTrue(1==live.size()); + // fetchDatanode with true should not return livedecommisioned node + datanodeManager.fetchDatanodes(live, null, true); + assertTrue(0==live.size()); + }finally { + cluster.shutdown(); + } + } + /** * Decommissioned node should not be considered while calculating node usage * @throws InterruptedException