HDFS-17456. Fix the incorrect dfsused statistics of datanode when appending a file. (#6713). Contributed by fuchaohong.

Reviewed-by: ZanderXu <zanderxu@apache.org>
Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
This commit is contained in:
fuchaohong 2024-04-30 12:22:53 +08:00 committed by GitHub
parent ddb805951e
commit 0c9e0b4398
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 44 additions and 0 deletions

View File

@ -1291,7 +1291,9 @@ public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo,
// rename meta file to rbw directory // rename meta file to rbw directory
// rename block file to rbw directory // rename block file to rbw directory
long oldReplicaLength = replicaInfo.getNumBytes() + replicaInfo.getMetadataLength();
newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile); newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
getBlockPoolSlice(bpid).decDfsUsed(oldReplicaLength);
reserveSpaceForReplica(bytesReserved); reserveSpaceForReplica(bytesReserved);
return newReplicaInfo; return newReplicaInfo;

View File

@ -2102,4 +2102,46 @@ public void delayGetMetaDataInputStream() {
DataNodeFaultInjector.set(oldDnInjector); DataNodeFaultInjector.set(oldDnInjector);
} }
} }
@Test(timeout = 30000)
public void testAppend() {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
.storagesPerDatanode(2)
.build();
FileSystem fs = cluster.getFileSystem();
DataNode dataNode = cluster.getDataNodes().get(0);
// Create test file
Path filePath = new Path("testData");
FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
DFSTestUtil.createFile(fs, filePath, 100, (short) 1, 0);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
long oldMetaLength = replicaInfo.getMetadataLength();
long oldDfsUsed = fsDataSetImpl.getDfsUsed();
// Append to file
int appendLength = 100;
DFSTestUtil.appendFile(fs, filePath, appendLength);
block = DFSTestUtil.getFirstBlock(fs, filePath);
replicaInfo = fsDataSetImpl.getReplicaInfo(block);
long newMetaLength = replicaInfo.getMetadataLength();
long newDfsUsed = fsDataSetImpl.getDfsUsed();
assert newDfsUsed == oldDfsUsed + appendLength + (newMetaLength - oldMetaLength) :
"When appending a file, the dfsused statistics of datanode are incorrect.";
} catch (Exception ex) {
LOG.info("Exception in testAppend ", ex);
fail("Exception while testing testAppend ");
} finally {
if (cluster.isClusterUp()) {
cluster.shutdown();
}
}
}
} }