From 0f2b65c3da44f81be0f1973233e4cc10819c5e7b Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Mon, 18 Feb 2019 15:21:46 +0900 Subject: [PATCH] HADOOP-16116. Fix Spelling Mistakes - DECOMISSIONED. Contributed by BELUGA BEHR. --- .../hadoop-hdfs/src/site/markdown/Federation.md | 2 +- .../org/apache/hadoop/hdfs/TestDecommission.java | 16 ++++++++-------- .../hadoop/hdfs/TestDecommissionWithStriped.java | 16 ++++++++-------- .../blockmanagement/TestDatanodeManager.java | 2 +- .../TestLowRedundancyBlockQueues.java | 4 ++-- .../TestSortLocatedStripedBlock.java | 6 +++--- .../namenode/ITestProvidedImplementation.java | 2 +- .../server/resourcemanager/NodesListManager.java | 6 +++--- .../server/resourcemanager/TestRMRestart.java | 2 +- .../TestResourceTrackerService.java | 7 ++++--- .../tests/unit/models/cluster-metric-test.js | 2 +- 11 files changed, 33 insertions(+), 32 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md index be36cc2de5..6d937ade2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/Federation.md @@ -210,7 +210,7 @@ For the complete command usage, see [balancer](./HDFSCommands.html#balancer). ### Decommissioning -Decommissioning is similar to prior releases. The nodes that need to be decomissioned are added to the exclude file at all of the Namenodes. Each Namenode decommissions its Block Pool. When all the Namenodes finish decommissioning a Datanode, the Datanode is considered decommissioned. +Decommissioning is similar to prior releases. The nodes that need to be decommissioned are added to the exclude file at all of the Namenodes. Each Namenode decommissions its Block Pool. When all the Namenodes finish decommissioning a Datanode, the Datanode is considered decommissioned. **Step 1**: To distribute an exclude file to all the Namenodes, use the following command: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index bd266ed905..b5ff36c225 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -204,15 +204,15 @@ public void testDecommission2() throws IOException { writeFile(fileSys, file1, replicas); - int deadDecomissioned = ns.getNumDecomDeadDataNodes(); - int liveDecomissioned = ns.getNumDecomLiveDataNodes(); + int deadDecommissioned = ns.getNumDecomDeadDataNodes(); + int liveDecommissioned = ns.getNumDecomLiveDataNodes(); // Decommission one node. Verify that node is decommissioned. DatanodeInfo decomNode = takeNodeOutofService(0, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED); decommissionedNodes.add(decomNode); - assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes()); - assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes()); + assertEquals(deadDecommissioned, ns.getNumDecomDeadDataNodes()); + assertEquals(liveDecommissioned + 1, ns.getNumDecomLiveDataNodes()); // Ensure decommissioned datanode is not automatically shutdown DFSClient client = getDfsClient(0); @@ -378,15 +378,15 @@ private void testDecommission(int numNamenodes, int numDatanodes) writeFile(fileSys, file1, replicas); - int deadDecomissioned = ns.getNumDecomDeadDataNodes(); - int liveDecomissioned = ns.getNumDecomLiveDataNodes(); + int deadDecommissioned = ns.getNumDecomDeadDataNodes(); + int liveDecommissioned = ns.getNumDecomLiveDataNodes(); // Decommission one node. Verify that node is decommissioned. DatanodeInfo decomNode = takeNodeOutofService(i, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED); decommissionedNodes.add(decomNode); - assertEquals(deadDecomissioned, ns.getNumDecomDeadDataNodes()); - assertEquals(liveDecomissioned + 1, ns.getNumDecomLiveDataNodes()); + assertEquals(deadDecommissioned, ns.getNumDecomDeadDataNodes()); + assertEquals(liveDecommissioned + 1, ns.getNumDecomLiveDataNodes()); // Ensure decommissioned datanode is not automatically shutdown DFSClient client = getDfsClient(i); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 7bd85b4989..2b92c5f628 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -243,8 +243,8 @@ public void run() { } }; }; - int deadDecomissioned = fsn.getNumDecomDeadDataNodes(); - int liveDecomissioned = fsn.getNumDecomLiveDataNodes(); + int deadDecommissioned = fsn.getNumDecomDeadDataNodes(); + int liveDecommissioned = fsn.getNumDecomLiveDataNodes(); decomTh.start(); decomStarted.await(5, TimeUnit.SECONDS); Thread.sleep(3000); // grace period to trigger decommissioning call @@ -260,8 +260,8 @@ public void run() { decomTh.join(20000); // waiting 20secs to finish decommission LOG.info("Finished decommissioning node:{}", decommisionNodes); - assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes()); - assertEquals(liveDecomissioned + decommisionNodes.size(), + assertEquals(deadDecommissioned, fsn.getNumDecomDeadDataNodes()); + assertEquals(liveDecommissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes()); // Ensure decommissioned datanode is not automatically shutdown @@ -328,8 +328,8 @@ private void testDecommission(int writeBytes, int storageCount, List decommisionNodes = getDecommissionDatanode(dfs, ecFile, writeBytes, decomNodeCount); - int deadDecomissioned = fsn.getNumDecomDeadDataNodes(); - int liveDecomissioned = fsn.getNumDecomLiveDataNodes(); + int deadDecommissioned = fsn.getNumDecomDeadDataNodes(); + int liveDecommissioned = fsn.getNumDecomLiveDataNodes(); List lbs = ((HdfsDataInputStream) dfs.open(ecFile)) .getAllBlocks(); @@ -342,8 +342,8 @@ private void testDecommission(int writeBytes, int storageCount, // Decommission node. Verify that node is decommissioned. decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED); - assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes()); - assertEquals(liveDecomissioned + decommisionNodes.size(), + assertEquals(deadDecommissioned, fsn.getNumDecomDeadDataNodes()); + assertEquals(liveDecommissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes()); // Ensure decommissioned datanode is not automatically shutdown diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index aa7f4d2f7a..600a021bcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -408,7 +408,7 @@ public void HelperFunction(String scriptFileName, int providedStorages) storageIDs[i] = "storageID-" + i; } - // set first 2 locations as decomissioned + // set first 2 locations as decommissioned locs[0].setDecommissioned(); locs[1].setDecommissioned(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index cf40c39993..785f3bec05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -218,12 +218,12 @@ private void doTestStripedBlockPriorities(int dataBlkNum, int parityBlkNum) private void assertAdded(LowRedundancyBlocks queues, BlockInfo block, int curReplicas, - int decomissionedReplicas, + int decommissionedReplicas, int expectedReplicas) { assertTrue("Failed to add " + block, queues.add(block, curReplicas, 0, - decomissionedReplicas, + decommissionedReplicas, expectedReplicas)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java index 616b4c340d..a017cb960a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSortLocatedStripedBlock.java @@ -452,7 +452,7 @@ private LocatedStripedBlock createEachLocatedBlock(int numDataBlk, locs[index].setDecommissioned(); decommNodeInfo.add(locs[index].toString()); // Removing it from the list to ensure that all the given nodes are - // successfully marked as decomissioned. + // successfully marked as decommissioned. decommnNodeIndices.remove(new Integer(index)); } } @@ -471,7 +471,7 @@ private LocatedStripedBlock createEachLocatedBlock(int numDataBlk, locs[j].setDecommissioned(); decommNodeInfo.add(locs[j].toString()); // Removing it from the list to ensure that all the given nodes are - // successfully marked as decomissioned. + // successfully marked as decommissioned. decommnNodeIndices.remove(new Integer(index)); } } @@ -493,7 +493,7 @@ private LocatedStripedBlock createEachLocatedBlock(int numDataBlk, locs[index].setDecommissioned(); decommNodeInfo.add(locs[index].toString()); // Removing it from the list to ensure that all the given nodes are - // successfully marked as decomissioned. + // successfully marked as decommissioned. decommnNodeIndices.remove(new Integer(blkIndexPos)); } } diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java index f822cc0123..cf86dd7dcb 100644 --- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java +++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java @@ -1146,7 +1146,7 @@ public void testDatanodeLifeCycle() throws Exception { DataNode dn1 = cluster.getDataNodes().get(0); DataNode dn2 = cluster.getDataNodes().get(1); - // stop the 1st DN while being decomissioned. + // stop the 1st DN while being decommissioned. MiniDFSCluster.DataNodeProperties dn1Properties = cluster.stopDataNode(0); BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(), dn1.getDatanodeId().getXferAddr()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java index 647dfa333f..a73a842f52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java @@ -113,7 +113,7 @@ protected void serviceInit(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH); this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile); - setDecomissionedNMs(); + setDecommissionedNMs(); printConfiguredHosts(); } catch (YarnException ex) { disableHostsFileReader(ex); @@ -245,7 +245,7 @@ private void refreshHostsReader( handleExcludeNodeList(graceful, timeout); } - private void setDecomissionedNMs() { + private void setDecommissionedNMs() { Set excludeList = hostsReader.getExcludedHosts(); for (final String host : excludeList) { NodeId nodeId = createUnknownNodeId(host); @@ -526,7 +526,7 @@ private void disableHostsFileReader(Exception ex) { conf.get(YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH); this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile); - setDecomissionedNMs(); + setDecommissionedNMs(); } catch (IOException ioe2) { // Should *never* happen this.hostsReader = null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index be7275667a..7ebfe54ea2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -2019,7 +2019,7 @@ private void assertQueueMetrics(QueueMetrics qm, int appsSubmitted, } @Test (timeout = 60000) - public void testDecomissionedNMsMetricsOnRMRestart() throws Exception { + public void testDecommissionedNMsMetricsOnRMRestart() throws Exception { conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, hostFile.getAbsolutePath()); writeToHostsFile(""); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 5b4d36f808..1fd34a0bb0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -523,12 +523,13 @@ public void testAddNewExcludePathToConfiguration() throws Exception { checkDecommissionedNMCount(rm, ++initialMetricCount); nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals( - "Node should not have been decomissioned.", + "Node should not have been decommissioned.", NodeAction.NORMAL, nodeHeartbeat.getNodeAction()); nodeHeartbeat = nm2.nodeHeartbeat(true); - Assert.assertEquals("Node should have been decomissioned but is in state" + - nodeHeartbeat.getNodeAction(), + Assert.assertEquals( + "Node should have been decommissioned but is in state" + + nodeHeartbeat.getNodeAction(), NodeAction.SHUTDOWN, nodeHeartbeat.getNodeAction()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/cluster-metric-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/cluster-metric-test.js index 58a6af41de..63defa488b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/cluster-metric-test.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/cluster-metric-test.js @@ -73,7 +73,7 @@ test('Testing fields', function(assert) { assert.deepEqual(model.get('getRunningAppsDataForDonutChart'), [{label: "Pending", value: 0}, {label: "Running", value: 0}]); assert.deepEqual(model.get('getNodesDataForDonutChart'), - [{label: "Active", value: 4}, {label: "Unhealthy", value: 0}, {label: "Decomissioned", value: 0}]); + [{label: "Active", value: 4}, {label: "Unhealthy", value: 0}, {label: "Decommissioned", value: 0}]); assert.deepEqual(model.get('getMemoryDataForDonutChart'), [{label: "Allocated", value: 0}, {label: "Reserved", value: 0}, {label: "Available", value: 32768}]); assert.deepEqual(model.get('getVCoreDataForDonutChart'),