HDFS-8780. Fetching live/dead datanode list with arg true for removeDecommissionNode,returns list with decom node. (Contributed by J.Andreina)

This commit is contained in:
Vinayakumar B 2015-09-22 12:25:35 +05:30
parent dfd807afab
commit 10ab7d595e
4 changed files with 58 additions and 41 deletions

View File

@ -1378,6 +1378,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9) HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
HDFS-8780. Fetching live/dead datanode list with arg true for remove-
DecommissionNode,returns list with decom node. (J.Andreina via vinayakumab)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -788,45 +788,16 @@ private List<String> getNetworkDependencies(DatanodeInfo node)
} }
/** /**
* Remove an already decommissioned data node who is neither in include nor * Remove decommissioned datanode from the the list of live or dead nodes.
* exclude hosts lists from the the list of live or dead nodes. This is used * This is used to not to display a decommissioned datanode to the operators.
* to not display an already decommssioned data node to the operators. * @param nodeList , array list of live or dead nodes.
* The operation procedure of making a already decommissioned data node not
* to be displayed is as following:
* <ol>
* <li>
* Host must have been in the include hosts list and the include hosts list
* must not be empty.
* </li>
* <li>
* Host is decommissioned by remaining in the include hosts list and added
* into the exclude hosts list. Name node is updated with the new
* information by issuing dfsadmin -refreshNodes command.
* </li>
* <li>
* Host is removed from both include hosts and exclude hosts lists. Name
* node is updated with the new informationby issuing dfsamin -refreshNodes
* command.
* <li>
* </ol>
*
* @param nodeList
* , array list of live or dead nodes.
*/ */
private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) { private void removeDecomNodeFromList(
// If the include list is empty, any nodes are welcomed and it does not final List<DatanodeDescriptor> nodeList) {
// make sense to exclude any nodes from the cluster. Therefore, no remove. Iterator<DatanodeDescriptor> it=null;
if (!hostFileManager.hasIncludes()) { for (it = nodeList.iterator(); it.hasNext();) {
return;
}
for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
DatanodeDescriptor node = it.next(); DatanodeDescriptor node = it.next();
if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node)) if (node.isDecommissioned()) {
&& node.isDecommissioned()) {
// Include list is not empty, an existing datanode does not appear
// in both include or exclude lists and it has been decommissioned.
// Remove it from the node list.
it.remove(); it.remove();
} }
} }

View File

@ -5073,7 +5073,7 @@ public int getNumDecomDeadDataNodes() {
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
public int getVolumeFailuresTotal() { public int getVolumeFailuresTotal() {
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
int volumeFailuresTotal = 0; int volumeFailuresTotal = 0;
for (DatanodeDescriptor node: live) { for (DatanodeDescriptor node: live) {
volumeFailuresTotal += node.getVolumeFailures(); volumeFailuresTotal += node.getVolumeFailures();
@ -5084,7 +5084,7 @@ public int getVolumeFailuresTotal() {
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
public long getEstimatedCapacityLostTotal() { public long getEstimatedCapacityLostTotal() {
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
long estimatedCapacityLostTotal = 0; long estimatedCapacityLostTotal = 0;
for (DatanodeDescriptor node: live) { for (DatanodeDescriptor node: live) {
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary(); VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
@ -5891,7 +5891,7 @@ public String getLiveNodes() {
final Map<String, Map<String,Object>> info = final Map<String, Map<String,Object>> info =
new HashMap<String, Map<String,Object>>(); new HashMap<String, Map<String,Object>>();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(live, null, true); blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
for (DatanodeDescriptor node : live) { for (DatanodeDescriptor node : live) {
ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.Builder<String, Object> innerinfo =
ImmutableMap.<String,Object>builder(); ImmutableMap.<String,Object>builder();
@ -5939,7 +5939,7 @@ public String getDeadNodes() {
final Map<String, Map<String, Object>> info = final Map<String, Map<String, Object>> info =
new HashMap<String, Map<String, Object>>(); new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(null, dead, true); blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
for (DatanodeDescriptor node : dead) { for (DatanodeDescriptor node : dead) {
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder() Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
.put("lastContact", getLastContact(node)) .put("lastContact", getLastContact(node))

View File

@ -1128,6 +1128,49 @@ private void assertTrackedAndPending(DecommissionManager decomManager,
decomManager.getNumPendingNodes()); decomManager.getNumPendingNodes());
} }
/**
* Fetching Live DataNodes by passing removeDecommissionedNode value as
* false- returns LiveNodeList with Node in Decommissioned state
* true - returns LiveNodeList without Node in Decommissioned state
* @throws InterruptedException
*/
@Test
public void testCountOnDecommissionedNodeList() throws IOException{
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
try {
cluster =
new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
.numDataNodes(1).build();
cluster.waitActive();
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
validateCluster(client, 1);
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
new ArrayList<ArrayList<DatanodeInfo>>(1);
namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
// Move datanode1 to Decommissioned state
ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
decommissionNode(0, null,
decommissionedNode, AdminStates.DECOMMISSIONED);
FSNamesystem ns = cluster.getNamesystem(0);
DatanodeManager datanodeManager =
ns.getBlockManager().getDatanodeManager();
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
// fetchDatanode with false should return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, false);
assertTrue(1==live.size());
// fetchDatanode with true should not return livedecommisioned node
datanodeManager.fetchDatanodes(live, null, true);
assertTrue(0==live.size());
}finally {
cluster.shutdown();
}
}
/** /**
* Decommissioned node should not be considered while calculating node usage * Decommissioned node should not be considered while calculating node usage
* @throws InterruptedException * @throws InterruptedException