HDFS-5672. TestHASafeMode#testSafeBlockTracking fails in trunk. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1581994 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-03-26 18:36:08 +00:00
parent 3d207f1db2
commit fe8c3dc2b8
3 changed files with 33 additions and 10 deletions

View File

@ -706,6 +706,8 @@ Release 2.4.0 - UNRELEASED
HDFS-6115. Call flush() for every append on block scan verification log.
(Vinayakumar B via szetszwo)
HDFS-5672. TestHASafeMode#testSafeBlockTracking fails in trunk. (jing9)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -1874,8 +1874,9 @@ private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage,
int headIndex = 0; //currently the delimiter is in the head of the list
int curIndex;
if (newReport == null)
if (newReport == null) {
newReport = new BlockListAsLongs();
}
// scan the report and process newly reported blocks
BlockReportIterator itBR = newReport.getBlockReportIterator();
while(itBR.hasNext()) {
@ -1968,9 +1969,11 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn,
// Ignore replicas already scheduled to be removed from the DN
if(invalidateBlocks.contains(dn.getDatanodeUuid(), block)) {
/* TODO: following assertion is incorrect, see HDFS-2668
assert storedBlock.findDatanode(dn) < 0 : "Block " + block
+ " in recentInvalidatesSet should not appear in DN " + dn; */
/*
* TODO: following assertion is incorrect, see HDFS-2668 assert
* storedBlock.findDatanode(dn) < 0 : "Block " + block +
* " in recentInvalidatesSet should not appear in DN " + dn;
*/
return storedBlock;
}
@ -1990,8 +1993,8 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn,
}
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
toUC.add(new StatefulBlockInfo(
(BlockInfoUnderConstruction)storedBlock, block, reportedState));
toUC.add(new StatefulBlockInfo((BlockInfoUnderConstruction) storedBlock,
new Block(block), reportedState));
return storedBlock;
}
@ -2875,7 +2878,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
// about new storages from heartbeats but during NN restart we may
// receive a block report or incremental report before the heartbeat.
// We must handle this for protocol compatibility. This issue was
// uncovered by HDFS-6904.
// uncovered by HDFS-6094.
node.updateStorage(srdb.getStorage());
}

View File

@ -592,7 +592,16 @@ public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception
// below 0.
assertSafeMode(nn1, 0, 0, 3, 0);
}
@Test
public void testSafeBlockTracking() throws Exception {
testSafeBlockTracking(false);
}
@Test
public void testSafeBlockTracking2() throws Exception {
testSafeBlockTracking(true);
}
/**
* Test that the number of safe blocks is accounted correctly even when
@ -600,9 +609,15 @@ public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception
* If a FINALIZED report arrives at the SBN before the block is marked
* COMPLETE, then when we get the OP_CLOSE we need to count it as "safe"
* at that point. This is a regression test for HDFS-2742.
*
* @param noFirstBlockReport If this is set to true, we shutdown NN1 before
* closing the writing streams. In this way, when NN1 restarts, all DNs will
* first send it incremental block report before the first full block report.
* And NN1 will not treat the full block report as the first block report
* in BlockManager#processReport.
*/
@Test
public void testSafeBlockTracking() throws Exception {
private void testSafeBlockTracking(boolean noFirstBlockReport)
throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some " +
"UC blocks plus some other blocks to force safemode");
DFSTestUtil.createFile(fs, new Path("/other-blocks"), 10*BLOCK_SIZE, (short) 3, 1L);
@ -619,6 +634,9 @@ public void testSafeBlockTracking() throws Exception {
// the namespace during startup and enter safemode.
nn0.getRpcServer().rollEditLog();
} finally {
if (noFirstBlockReport) {
cluster.shutdownNameNode(1);
}
for (FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm);
}