From f9427f1760cce7e0befc3e066cebd0912652a411 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Thu, 7 May 2015 11:36:35 -0700 Subject: [PATCH] HDFS-7980. Incremental BlockReport will dramatically slow down namenode startup. Contributed by Walter Su --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/BlockManager.java | 8 +- .../blockmanagement/TestBlockManager.java | 108 ++++++++++++++++++ 3 files changed, 115 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 17faebf7b2..74456dbb45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -710,6 +710,9 @@ Release 2.7.1 - UNRELEASED HDFS-8305: HDFS INotify: the destination field of RenameOp should always end with the file name (cmccabe) + HDFS-7980. Incremental BlockReport will dramatically slow down namenode + startup. (Walter Su via szetszwo) + Release 2.7.0 - 2015-04-20 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 53ffe0bc35..87cb63ca51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1810,7 +1810,7 @@ public boolean processReport(final DatanodeID nodeID, return !node.hasStaleStorages(); } - if (storageInfo.numBlocks() == 0) { + if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than // ordinary block reports. This shortens restart times. processFirstBlockReport(storageInfo, newReport); @@ -2064,7 +2064,7 @@ private void processFirstBlockReport( final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); - assert (storageInfo.numBlocks() == 0); + assert (storageInfo.getBlockReportCount() == 0); for (BlockReportReplica iblk : report) { ReplicaState reportedState = iblk.getState(); @@ -2476,14 +2476,14 @@ private void addStoredBlockImmediate(BlockInfoContiguous storedBlock, } // just add it - storageInfo.addBlock(storedBlock); + AddBlockResult result = storageInfo.addBlock(storedBlock); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numCurrentReplica >= minReplication) { completeBlock(storedBlock.getBlockCollection(), storedBlock, false); - } else if (storedBlock.isComplete()) { + } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. // In the case that the block just became complete above, completeBlock() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 9ce16f219a..1e09e19ebb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -47,10 +47,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; +import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; import org.junit.Assert; @@ -646,6 +650,110 @@ public void testSafeModeIBRAfterIncremental() throws Exception { BlockListAsLongs.EMPTY, null, false); assertEquals(1, ds.getBlockReportCount()); } + + /** + * test when NN starts and in same mode, it receives an incremental blockReport + * firstly. Then receives first full block report. + */ + @Test + public void testSafeModeIBRBeforeFirstFullBR() throws Exception { + // pretend to be in safemode + doReturn(true).when(fsn).isInStartupSafeMode(); + + DatanodeDescriptor node = nodes.get(0); + DatanodeStorageInfo ds = node.getStorageInfos()[0]; + node.isAlive = true; + DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); + + // register new node + bm.getDatanodeManager().registerDatanode(nodeReg); + bm.getDatanodeManager().addDatanode(node); + assertEquals(node, bm.getDatanodeManager().getDatanode(node)); + assertEquals(0, ds.getBlockReportCount()); + // Build a incremental report + List rdbiList = new ArrayList<>(); + // Build a full report + BlockListAsLongs.Builder builder = BlockListAsLongs.builder(); + + // blk_42 is finalized. + long receivedBlockId = 42; // arbitrary + BlockInfoContiguous receivedBlock = addBlockToBM(receivedBlockId); + rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); + builder.add(new FinalizedReplica(receivedBlock, null, null)); + + // blk_43 is under construction. + long receivingBlockId = 43; + BlockInfoContiguous receivingBlock = addUcBlockToBM(receivingBlockId); + rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); + builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null)); + + // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record. + long receivingReceivedBlockId = 44; + BlockInfoContiguous receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId); + rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null)); + rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); + builder.add(new FinalizedReplica(receivingReceivedBlock, null, null)); + + // blk_45 is not in full BR, because it's deleted. + long ReceivedDeletedBlockId = 45; + rdbiList.add(new ReceivedDeletedBlockInfo( + new Block(ReceivedDeletedBlockId), + ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null)); + rdbiList.add(new ReceivedDeletedBlockInfo( + new Block(ReceivedDeletedBlockId), + ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null)); + + // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR. + long existedBlockId = 46; + BlockInfoContiguous existedBlock = addBlockToBM(existedBlockId); + builder.add(new FinalizedReplica(existedBlock, null, null)); + + // process IBR and full BR + StorageReceivedDeletedBlocks srdb = + new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), + rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()])); + bm.processIncrementalBlockReport(node, srdb); + // Make sure it's the first full report + assertEquals(0, ds.getBlockReportCount()); + bm.processReport(node, new DatanodeStorage(ds.getStorageID()), + builder.build(), null, false); + assertEquals(1, ds.getBlockReportCount()); + + // verify the storage info is correct + assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo + (ds) >= 0); + assertTrue(((BlockInfoContiguousUnderConstruction) bm. + getStoredBlock(new Block(receivingBlockId))).getNumExpectedLocations() > 0); + assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)) + .findStorageInfo(ds) >= 0); + assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId))); + assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo + (ds) >= 0); + } + + private BlockInfoContiguous addBlockToBM(long blkId) { + Block block = new Block(blkId); + BlockInfoContiguous blockInfo = + new BlockInfoContiguous(block, (short) 3); + BlockCollection bc = Mockito.mock(BlockCollection.class); + Mockito.doReturn((short) 3).when(bc).getBlockReplication(); + bm.blocksMap.addBlockCollection(blockInfo, bc); + return blockInfo; + } + + private BlockInfoContiguous addUcBlockToBM(long blkId) { + Block block = new Block(blkId); + BlockInfoContiguousUnderConstruction blockInfo = + new BlockInfoContiguousUnderConstruction(block, (short) 3); + BlockCollection bc = Mockito.mock(BlockCollection.class); + Mockito.doReturn((short) 3).when(bc).getBlockReplication(); + bm.blocksMap.addBlockCollection(blockInfo, bc); + return blockInfo; + } /** * Tests that a namenode doesn't choose a datanode with full disks to