From 7d7acb004af5095983e99c86deedfc60a0355ff7 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 1 Nov 2019 22:41:50 +0530 Subject: [PATCH] HDFS-13736. BlockPlacementPolicyDefault can not choose favored nodes when 'dfs.namenode.block-placement-policy.default.prefer-local-node' set to false. Contributed by hu xiaodong. --- .../BlockPlacementPolicyDefault.java | 44 +++++++++++++++---- .../TestReplicationPolicy.java | 23 ++++++++++ 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index b50d479c78..9a089ce2a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -240,9 +240,10 @@ protected void chooseFavouredNodes(String src, int numOfReplicas, DatanodeDescriptor favoredNode = favoredNodes.get(i); // Choose a single node which is local to favoredNode. // 'results' is updated within chooseLocalNode - final DatanodeStorageInfo target = - chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize, - maxNodesPerRack, results, avoidStaleNodes, storageTypes, false); + final DatanodeStorageInfo target = chooseLocalOrFavoredStorage( + favoredNode, true, favoriteAndExcludedNodes, blocksize, + maxNodesPerRack, results, avoidStaleNodes, storageTypes); + if (target == null) { LOG.warn("Could not find a target for file " + src + " with favored node " + favoredNode); @@ -546,16 +547,41 @@ protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, List results, boolean avoidStaleNodes, EnumMap storageTypes) throws NotEnoughReplicasException { + return chooseLocalOrFavoredStorage(localMachine, false, + excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, + storageTypes); + } + + /** + * Choose storage of local or favored node. + * @param localOrFavoredNode local or favored node + * @param isFavoredNode if target node is favored node + * @param excludedNodes datanodes that should not be considered as targets + * @param blocksize size of the data to be written + * @param maxNodesPerRack max nodes allowed per rack + * @param results the target nodes already chosen + * @param avoidStaleNodes avoid stale nodes in replica choosing + * @param storageTypes storage type to be considered for target + * @return storage of local or favored node (not chosen node) + * @throws NotEnoughReplicasException + */ + protected DatanodeStorageInfo chooseLocalOrFavoredStorage( + Node localOrFavoredNode, boolean isFavoredNode, Set excludedNodes, + long blocksize, int maxNodesPerRack, List results, + boolean avoidStaleNodes, EnumMap storageTypes) + throws NotEnoughReplicasException { // if no local machine, randomly choose one node - if (localMachine == null) { + if (localOrFavoredNode == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } - if (preferLocalNode && localMachine instanceof DatanodeDescriptor - && clusterMap.contains(localMachine)) { - DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; + if ((preferLocalNode || isFavoredNode) + && localOrFavoredNode instanceof DatanodeDescriptor + && clusterMap.contains(localOrFavoredNode)) { + DatanodeDescriptor localDatanode = + (DatanodeDescriptor) localOrFavoredNode; // otherwise try local machine first - if (excludedNodes.add(localMachine) // was not in the excluded list + if (excludedNodes.add(localOrFavoredNode) // was not in the excluded list && isGoodDatanode(localDatanode, maxNodesPerRack, false, results, avoidStaleNodes)) { for (Iterator> iter = storageTypes @@ -575,7 +601,7 @@ && isGoodDatanode(localDatanode, maxNodesPerRack, false, return localStorage; } } - } + } } return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 02d5a3a711..9c3a630f95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1519,6 +1519,29 @@ public void testChooseExcessReplicaApartFromFavoredNodes() throws Exception { } } + @Test + public void testChooseFromFavoredNodesWhenPreferLocalSetToFalse() { + ((BlockPlacementPolicyDefault) replicator).setPreferLocalNode(false); + try { + DatanodeStorageInfo[] targets; + List expectedTargets = new ArrayList<>(); + expectedTargets.add(dataNodes[0]); + expectedTargets.add(dataNodes[2]); + List favouredNodes = new ArrayList<>(); + favouredNodes.add(dataNodes[0]); + favouredNodes.add(dataNodes[2]); + targets = chooseTarget(2, dataNodes[3], null, + favouredNodes); + assertEquals(targets.length, 2); + for (int i = 0; i < targets.length; i++) { + assertTrue("Target should be a part of Expected Targets", + expectedTargets.contains(targets[i].getDatanodeDescriptor())); + } + } finally { + ((BlockPlacementPolicyDefault) replicator).setPreferLocalNode(true); + } + } + private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, DatanodeDescriptor writer, Set excludedNodes, List favoredNodes) {