From 51edaacd09d86419f99ca96545a1393db1f43f73 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 19 Sep 2017 13:48:23 -0700 Subject: [PATCH] HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong. --- .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++-- .../apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java | 2 +- .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 2 +- .../apache/hadoop/examples/dancing/DistributedPentomino.java | 4 ++-- .../java/org/apache/hadoop/examples/dancing/Pentomino.java | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index f33ec63cf5..0545bb2a1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3551,8 +3551,8 @@ private void chooseExcessRedundancyContiguous( List replicasToDelete = replicator .chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes, addedNode, delNodeHint); - for (DatanodeStorageInfo choosenReplica : replicasToDelete) { - processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock); + for (DatanodeStorageInfo chosenReplica : replicasToDelete) { + processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index b6c13188c2..1860565083 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -178,7 +178,7 @@ public void testDeadNodeAsBlockTarget() throws Exception { clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7, BlockType.CONTIGUOUS, null, null); for (DatanodeStorageInfo datanodeStorageInfo : results) { - assertFalse("Dead node should not be choosen", datanodeStorageInfo + assertFalse("Dead node should not be chosen", datanodeStorageInfo .getDatanodeDescriptor().equals(clientNode)); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java index 537b4d4e33..eef44617ce 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java @@ -368,7 +368,7 @@ public List split(int depth) { /** * Make one move from a prefix - * @param goalRow the row that should be choosen + * @param goalRow the row that should be chosen * @return the row that was found */ private Node advance(int goalRow) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java index 29f1eb2c45..d4fe6dcdb4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java @@ -39,8 +39,8 @@ * Launch a distributed pentomino solver. * It generates a complete list of prefixes of length N with each unique prefix * as a separate line. A prefix is a sequence of N integers that denote the - * index of the row that is choosen for each column in order. Note that the - * next column is heuristically choosen by the solver, so it is dependant on + * index of the row that is chosen for each column in order. Note that the + * next column is heuristically chosen by the solver, so it is dependant on * the previous choice. That file is given as the input to * map/reduce. The output key/value are the move prefix/solution as Text/Text. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java index 2485728b07..a30d62c33c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Pentomino.java @@ -411,7 +411,7 @@ public List getSplits(int depth) { /** * Find all of the solutions that start with the given prefix. The printer * is given each solution as it is found. - * @param split a list of row indexes that should be choosen for each row + * @param split a list of row indexes that should be chosen for each row * in order * @return the number of solutions found */