HDFS-4127. Log message is not correct in case of short of replica. Contributed by Junping Du.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1403616 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-30 05:48:58 +00:00
parent 16e21dfe92
commit 7604a276a7
3 changed files with 76 additions and 3 deletions

View File

@ -522,6 +522,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads HDFS-3616. Fix a ConcurrentModificationException bug that BP actor threads
may not be shutdown properly in DataNode. (Jing Zhao via szetszwo) may not be shutdown properly in DataNode. (Jing Zhao via szetszwo)
HDFS-4127. Log message is not correct in case of short of replica.
(Junping Du via suresh)
Release 2.0.2-alpha - 2012-09-07 Release 2.0.2-alpha - 2012-09-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -185,7 +185,7 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas,
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
return writer; return writer;
} }
int totalReplicasExpected = numOfReplicas; int totalReplicasExpected = numOfReplicas + results.size();
int numOfResults = results.size(); int numOfResults = results.size();
boolean newBlock = (numOfResults==0); boolean newBlock = (numOfResults==0);
@ -231,7 +231,8 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas,
maxNodesPerRack, results, avoidStaleNodes); maxNodesPerRack, results, avoidStaleNodes);
} catch (NotEnoughReplicasException e) { } catch (NotEnoughReplicasException e) {
LOG.warn("Not able to place enough replicas, still in need of " LOG.warn("Not able to place enough replicas, still in need of "
+ numOfReplicas + " to reach " + totalReplicasExpected + "\n" + (totalReplicasExpected - results.size()) + " to reach "
+ totalReplicasExpected + "\n"
+ e.getMessage()); + e.getMessage());
if (avoidStaleNodes) { if (avoidStaleNodes) {
// ecxludedNodes now has - initial excludedNodes, any nodes that were // ecxludedNodes now has - initial excludedNodes, any nodes that were

View File

@ -19,6 +19,7 @@
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.File; import java.io.File;
@ -44,6 +45,10 @@
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
@ -378,6 +383,70 @@ public void testChooseTarget5() throws Exception {
assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
} }
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test
public void testChooseTargetWithMoreThanAvaiableNodes() throws Exception {
// make data node 0 & 1 to be not qualified to choose: not enough disk space
for(int i=0; i<2; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
(HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0);
}
final TestAppender appender = new TestAppender();
final Logger logger = Logger.getRootLogger();
logger.addAppender(appender);
// try to choose NUM_OF_DATANODES which is more than actually available
// nodes.
DatanodeDescriptor[] targets = replicator.chooseTarget(filename,
NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
BLOCK_SIZE);
assertEquals(targets.length, NUM_OF_DATANODES - 2);
final List<LoggingEvent> log = appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry = log.get(log.size() - 1);
assertEquals(lastLogEntry.getLevel(), Level.WARN);
// Suppose to place replicas on each node but two data nodes are not
// available for placing replica, so here we expect a short of 2
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for(int i=0; i<2; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
}
}
class TestAppender extends AppenderSkeleton {
private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(final LoggingEvent loggingEvent) {
log.add(loggingEvent);
}
@Override
public void close() {
}
public List<LoggingEvent> getLog() {
return new ArrayList<LoggingEvent>(log);
}
}
private boolean containsWithinRange(DatanodeDescriptor target, private boolean containsWithinRange(DatanodeDescriptor target,
DatanodeDescriptor[] nodes, int startIndex, int endIndex) { DatanodeDescriptor[] nodes, int startIndex, int endIndex) {
assert startIndex >= 0 && startIndex < nodes.length; assert startIndex >= 0 && startIndex < nodes.length;