From 118a35bc2eabe3918b4797a1b626e9a39d77754b Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 8 Oct 2015 14:11:02 -0700 Subject: [PATCH] HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly calculated. Contributed by Mingliang Liu. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/blockmanagement/DatanodeDescriptor.java | 8 ++++---- .../hdfs/server/blockmanagement/ReplicationWork.java | 3 +++ .../server/blockmanagement/TestUnderReplicatedBlocks.java | 5 +++++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1116f800fb..95351a2298 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -356,6 +356,9 @@ Trunk (Unreleased) HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw) + HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly + calculated. (Mingliang Liu via jing9) + BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS HDFS-7347. Configurable erasure coding policy for individual files and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index b258f0600e..fde645ebb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -256,7 +256,7 @@ public CachedBlocksList getPendingUncached() { private boolean disallowed = false; // The number of replication work pending before targets are determined - private int PendingReplicationWithoutTargets = 0; + private int pendingReplicationWithoutTargets = 0; // HB processing can use it to tell if it is the first HB since DN restarted private boolean heartbeatedSinceRegistration = false; @@ -594,11 +594,11 @@ Iterator getBlockIterator(final String storageID) { } void incrementPendingReplicationWithoutTargets() { - PendingReplicationWithoutTargets++; + pendingReplicationWithoutTargets++; } void decrementPendingReplicationWithoutTargets() { - PendingReplicationWithoutTargets--; + pendingReplicationWithoutTargets--; } /** @@ -651,7 +651,7 @@ void addBlocksToBeInvalidated(List blocklist) { * The number of work items that are pending to be replicated */ int getNumberOfBlocksToBeReplicated() { - return PendingReplicationWithoutTargets + replicateBlocks.size(); + return pendingReplicationWithoutTargets + replicateBlocks.size(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java index 8266f45373..8a3900c22a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java @@ -30,6 +30,9 @@ public ReplicationWork(BlockInfo block, BlockCollection bc, int priority) { super(block, bc, srcNodes, containingNodes, liveReplicaStorages, additionalReplRequired, priority); + assert getSrcNodes().length == 1 : + "There should be exactly 1 source node that have been selected"; + getSrcNodes()[0].incrementPendingReplicationWithoutTargets(); BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java index 27b35f0b56..c0b54b0f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; import java.util.Iterator; @@ -135,6 +136,10 @@ public void testNumberOfBlocksToBeReplicated() throws Exception { assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks()); bm.computeDatanodeWork(); + assertTrue("The number of replication work pending before targets are " + + "determined should be non-negative.", + (Integer)Whitebox.getInternalState(secondDn, + "pendingReplicationWithoutTargets") >= 0); assertTrue("The number of blocks to be replicated should be less than " + "or equal to " + bm.replicationStreamsHardLimit,