diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 0b1da8bccc..7f0f17e7b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1287,7 +1287,14 @@ public LocatedBlock convertLastBlockToUnderConstruction( neededReconstruction.remove(lastBlock, replicas.liveReplicas(), replicas.readOnlyReplicas(), replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock)); - pendingReconstruction.remove(lastBlock); + PendingBlockInfo remove = pendingReconstruction.remove(lastBlock); + if (remove != null) { + List locations = remove.getTargets(); + DatanodeStorageInfo[] removedBlockTargets = + new DatanodeStorageInfo[locations.size()]; + locations.toArray(removedBlockTargets); + DatanodeStorageInfo.decrementBlocksScheduled(removedBlockTargets); + } // remove this block from the list of pending blocks to be deleted. for (DatanodeStorageInfo storage : targets) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index 95f4e2c597..95d6825d29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -202,4 +202,60 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock() } } + /** + * Test Block Scheduled counter on truncating a file. + * @throws Exception + */ + @Test + public void testBlocksScheduledCounterOnTruncate() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + cluster.waitActive(); + BlockManager bm = cluster.getNamesystem().getBlockManager(); + try { + DistributedFileSystem dfs = cluster.getFileSystem(); + // 1. stop a datanode + cluster.stopDataNode(0); + + // 2. create a file + Path filePath = new Path("/tmp"); + DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 0L); + + DatanodeManager datanodeManager = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + ArrayList dnList = + new ArrayList(); + datanodeManager.fetchDatanodes(dnList, dnList, false); + + // 3. restart the stopped datanode + cluster.restartDataNode(0); + + // 4. disable the heartbeats + for (DataNode dn : cluster.getDataNodes()) { + DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); + } + + cluster.getNamesystem().writeLock(); + try { + BlockManagerTestUtil.computeAllPendingWork(bm); + BlockManagerTestUtil.updateState(bm); + assertEquals(1L, bm.getPendingReconstructionBlocksCount()); + } finally { + cluster.getNamesystem().writeUnlock(); + } + + // 5.truncate the file whose block exists in pending reconstruction + dfs.truncate(filePath, 10); + int blocksScheduled = 0; + for (DatanodeDescriptor descriptor : dnList) { + if (descriptor.getBlocksScheduled() != 0) { + blocksScheduled += descriptor.getBlocksScheduled(); + } + } + assertEquals(0, blocksScheduled); + } finally { + cluster.shutdown(); + } + } } \ No newline at end of file