HDFS-2973. Re-enable NO_ACK optimization for block deletion. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1292611 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-02-23 01:25:14 +00:00
parent 7527e943e6
commit 90a14f89e1
3 changed files with 18 additions and 10 deletions

View File

@ -224,3 +224,5 @@ HDFS-2974. MiniDFSCluster does not delete standby NN name dirs during format. (a
HDFS-2929. Stress test and fixes for block synchronization (todd)
HDFS-2972. Small optimization building incremental block report (todd)
HDFS-2973. Re-enable NO_ACK optimization for block deletion. (todd)

View File

@ -61,6 +61,7 @@
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@ -2672,10 +2673,10 @@ public int getTotalBlocks() {
public void removeBlock(Block block) {
assert namesystem.hasWriteLock();
// TODO(HA): the following causes some problems for HA:
// the SBN doesn't get block deletions until the next
// BR...
// block.setNumBytes(BlockCommand.NO_ACK);
// No need to ACK blocks that are being removed entirely
// from the namespace, since the removal of the associated
// file already removes them from the block map below.
block.setNumBytes(BlockCommand.NO_ACK);
addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block);
blocksMap.removeBlock(block);

View File

@ -311,8 +311,9 @@ public void testBlocksRemovedWhileInSafeMode() throws Exception {
// It will initially have all of the blocks necessary.
assertSafeMode(nn1, 10, 10);
// Delete those blocks while the SBN is in safe mode - this
// should reduce it back below the threshold
// Delete those blocks while the SBN is in safe mode.
// This doesn't affect the SBN, since deletions are not
// ACKed when due to block removals.
banner("Removing the blocks without rolling the edit log");
fs.delete(new Path("/test"), true);
BlockManagerTestUtil.computeAllPendingWork(
@ -323,8 +324,10 @@ public void testBlocksRemovedWhileInSafeMode() throws Exception {
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertSafeMode(nn1, 0, 10);
assertSafeMode(nn1, 10, 10);
// When we catch up to active namespace, it will restore back
// to 0 blocks.
banner("Waiting for standby to catch up to active namespace");
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
@ -372,8 +375,9 @@ public void testAppendWhileInSafeMode() throws Exception {
IOUtils.closeStream(stm);
}
// Delete those blocks while the SBN is in safe mode - this
// should reduce it back below the threshold
// Delete those blocks while the SBN is in safe mode.
// This will not ACK the deletions to the SBN, so it won't
// notice until we roll the edit log.
banner("Removing the blocks without rolling the edit log");
fs.delete(new Path("/test"), true);
BlockManagerTestUtil.computeAllPendingWork(
@ -384,8 +388,9 @@ public void testAppendWhileInSafeMode() throws Exception {
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertSafeMode(nn1, 0, 4);
assertSafeMode(nn1, 4, 4);
// When we roll the edit log, the deletions will go through.
banner("Waiting for standby to catch up to active namespace");
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);