diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fa3631545d..2de1332878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -522,6 +522,9 @@ Release 2.0.3-alpha - Unreleased HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads may not be shutdown properly in DataNode. (Jing Zhao via szetszwo) + HDFS-4127. Log message is not correct in case of short of replica. + (Junping Du via suresh) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 211a574f1d..f976c99615 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -185,7 +185,7 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas, if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return writer; } - int totalReplicasExpected = numOfReplicas; + int totalReplicasExpected = numOfReplicas + results.size(); int numOfResults = results.size(); boolean newBlock = (numOfResults==0); @@ -231,7 +231,8 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas, maxNodesPerRack, results, avoidStaleNodes); } catch (NotEnoughReplicasException e) { LOG.warn("Not able to place enough replicas, still in need of " - + numOfReplicas + " to reach " + totalReplicasExpected + "\n" + + (totalReplicasExpected - results.size()) + " to reach " + + totalReplicasExpected + "\n" + e.getMessage()); if (avoidStaleNodes) { // ecxludedNodes now has - initial excludedNodes, any nodes that were diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 7fa2fcbf72..7daccd320e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.File; @@ -44,6 +45,10 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.Time; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -375,7 +380,71 @@ public void testChooseTarget5() throws Exception { new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); - assertFalse(cluster.isOnSameRack(targets[0], targets[1])); + assertFalse(cluster.isOnSameRack(targets[0], targets[1])); + } + + /** + * In this testcase, it tries to choose more targets than available nodes and + * check the result. + * @throws Exception + */ + @Test + public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception { + // make data node 0 & 1 to be not qualified to choose: not enough disk space + for(int i=0; i<2; i++) { + dataNodes[i].updateHeartbeat( + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); + } + + final TestAppender appender = new TestAppender(); + final Logger logger = Logger.getRootLogger(); + logger.addAppender(appender); + + // try to choose NUM_OF_DATANODES which is more than actually available + // nodes. + DatanodeDescriptor[] targets = replicator.chooseTarget(filename, + NUM_OF_DATANODES, dataNodes[0], new ArrayList(), + BLOCK_SIZE); + assertEquals(targets.length, NUM_OF_DATANODES - 2); + + final List log = appender.getLog(); + assertNotNull(log); + assertFalse(log.size() == 0); + final LoggingEvent lastLogEntry = log.get(log.size() - 1); + + assertEquals(lastLogEntry.getLevel(), Level.WARN); + // Suppose to place replicas on each node but two data nodes are not + // available for placing replica, so here we expect a short of 2 + assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); + + for(int i=0; i<2; i++) { + dataNodes[i].updateHeartbeat( + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + } + } + + class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override + public void close() { + } + + public List getLog() { + return new ArrayList(log); + } } private boolean containsWithinRange(DatanodeDescriptor target,