HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina.
(cherry picked from commit 719b53a79d
)
This commit is contained in:
parent
02d37c7d22
commit
887948d127
@ -1280,7 +1280,14 @@ public LocatedBlock convertLastBlockToUnderConstruction(
|
|||||||
neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
|
neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
|
||||||
replicas.readOnlyReplicas(),
|
replicas.readOnlyReplicas(),
|
||||||
replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock));
|
replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock));
|
||||||
pendingReconstruction.remove(lastBlock);
|
PendingBlockInfo remove = pendingReconstruction.remove(lastBlock);
|
||||||
|
if (remove != null) {
|
||||||
|
List<DatanodeStorageInfo> locations = remove.getTargets();
|
||||||
|
DatanodeStorageInfo[] removedBlockTargets =
|
||||||
|
new DatanodeStorageInfo[locations.size()];
|
||||||
|
locations.toArray(removedBlockTargets);
|
||||||
|
DatanodeStorageInfo.decrementBlocksScheduled(removedBlockTargets);
|
||||||
|
}
|
||||||
|
|
||||||
// remove this block from the list of pending blocks to be deleted.
|
// remove this block from the list of pending blocks to be deleted.
|
||||||
for (DatanodeStorageInfo storage : targets) {
|
for (DatanodeStorageInfo storage : targets) {
|
||||||
|
@ -202,4 +202,60 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Block Scheduled counter on truncating a file.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testBlocksScheduledCounterOnTruncate() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
BlockManager bm = cluster.getNamesystem().getBlockManager();
|
||||||
|
try {
|
||||||
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
// 1. stop a datanode
|
||||||
|
cluster.stopDataNode(0);
|
||||||
|
|
||||||
|
// 2. create a file
|
||||||
|
Path filePath = new Path("/tmp");
|
||||||
|
DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 0L);
|
||||||
|
|
||||||
|
DatanodeManager datanodeManager =
|
||||||
|
cluster.getNamesystem().getBlockManager().getDatanodeManager();
|
||||||
|
ArrayList<DatanodeDescriptor> dnList =
|
||||||
|
new ArrayList<DatanodeDescriptor>();
|
||||||
|
datanodeManager.fetchDatanodes(dnList, dnList, false);
|
||||||
|
|
||||||
|
// 3. restart the stopped datanode
|
||||||
|
cluster.restartDataNode(0);
|
||||||
|
|
||||||
|
// 4. disable the heartbeats
|
||||||
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
|
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.getNamesystem().writeLock();
|
||||||
|
try {
|
||||||
|
BlockManagerTestUtil.computeAllPendingWork(bm);
|
||||||
|
BlockManagerTestUtil.updateState(bm);
|
||||||
|
assertEquals(1L, bm.getPendingReconstructionBlocksCount());
|
||||||
|
} finally {
|
||||||
|
cluster.getNamesystem().writeUnlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5.truncate the file whose block exists in pending reconstruction
|
||||||
|
dfs.truncate(filePath, 10);
|
||||||
|
int blocksScheduled = 0;
|
||||||
|
for (DatanodeDescriptor descriptor : dnList) {
|
||||||
|
if (descriptor.getBlocksScheduled() != 0) {
|
||||||
|
blocksScheduled += descriptor.getBlocksScheduled();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertEquals(0, blocksScheduled);
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user