diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f437ad8cad..811ee75420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -829,6 +829,9 @@ Release 2.7.0 - UNRELEASED HDFS-7410. Support CreateFlags with append() to support hsync() for appending streams (Vinayakumar B via Colin P. McCabe) + HDFS-7742. Favoring decommissioning node for replication can cause a block + to stay underreplicated for long periods (Nathan Roberts via kihwal) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ad40782b3f..f6e15a337c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1637,7 +1637,8 @@ else if (excessBlocks != null && excessBlocks.contains(block)) { // If so, do not select the node as src node if ((nodesCorrupt != null) && nodesCorrupt.contains(node)) continue; - if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY + if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY + && !node.isDecommissionInProgress() && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) { continue; // already reached replication limit @@ -1652,13 +1653,12 @@ else if (excessBlocks != null && excessBlocks.contains(block)) { // never use already decommissioned nodes if(node.isDecommissioned()) continue; - // we prefer nodes that are in DECOMMISSION_INPROGRESS state - if(node.isDecommissionInProgress() || srcNode == null) { + + // We got this far, current node is a reasonable choice + if (srcNode == null) { srcNode = node; continue; } - if(srcNode.isDecommissionInProgress()) - continue; // switch to a different node randomly // this to prevent from deterministically selecting the same node even // if the node failed to replicate the block on previous iterations diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 707c7807b8..91abb2a5f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -534,6 +534,48 @@ public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception { UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY)); } + @Test + public void testFavorDecomUntilHardLimit() throws Exception { + bm.maxReplicationStreams = 0; + bm.replicationStreamsHardLimit = 1; + + long blockId = 42; // arbitrary + Block aBlock = new Block(blockId, 0, 0); + List origNodes = getNodes(0, 1); + // Add the block to the first node. + addBlockOnNodes(blockId,origNodes.subList(0,1)); + origNodes.get(0).startDecommission(); + + List cntNodes = new LinkedList(); + List liveNodes = new LinkedList(); + + assertNotNull("Chooses decommissioning source node for a normal replication" + + " if all available source nodes have reached their replication" + + " limits below the hard limit.", + bm.chooseSourceDatanode( + aBlock, + cntNodes, + liveNodes, + new NumberReplicas(), + UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED)); + + + // Increase the replication count to test replication count > hard limit + DatanodeStorageInfo targets[] = { origNodes.get(1).getStorageInfos()[0] }; + origNodes.get(0).addBlockToBeReplicated(aBlock, targets); + + assertNull("Does not choose a source decommissioning node for a normal" + + " replication when all available nodes exceed the hard limit.", + bm.chooseSourceDatanode( + aBlock, + cntNodes, + liveNodes, + new NumberReplicas(), + UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED)); + } + + + @Test public void testSafeModeIBR() throws Exception { DatanodeDescriptor node = spy(nodes.get(0));