HDFS-17177. ErasureCodingWork should ignore the deleted block while reconstructing blocks (#6024)

This commit is contained in:
huhaiyang 2023-09-11 11:57:52 +08:00 committed by GitHub
parent c5e9510b54
commit 3bd6a751ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 7 deletions

View File

@ -48,7 +48,7 @@ public ErasureCodingWork(String blockPoolId, BlockInfo block,
this.blockPoolId = blockPoolId;
this.liveBlockIndices = liveBlockIndices;
this.liveBusyBlockIndices = liveBusyBlockIndices;
this.excludeReconstructedIndices=excludeReconstrutedIndices;
this.excludeReconstructedIndices = excludeReconstrutedIndices;
LOG.debug("Creating an ErasureCodingWork to {} reconstruct ",
block);
}
@ -62,10 +62,18 @@ void chooseTargets(BlockPlacementPolicy blockplacement,
BlockStoragePolicySuite storagePolicySuite,
Set<Node> excludedNodes) {
// TODO: new placement policy for EC considering multiple writers
DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
storagePolicySuite.getPolicy(getStoragePolicyID()), null);
DatanodeStorageInfo[] chosenTargets = null;
// HDFS-14720. If the block is deleted, the block size will become
// BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't need
// to send for replication or reconstruction
if (!getBlock().isDeleted()) {
chosenTargets = blockplacement.chooseTarget(
getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
storagePolicySuite.getPolicy(getStoragePolicyID()), null);
} else {
LOG.warn("ErasureCodingWork could not need choose targets for {}", getBlock());
}
setTargets(chosenTargets);
}

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.net.Node;
import java.util.List;
@ -47,11 +46,13 @@ assert getSrcNodes().length > 0
// HDFS-14720 If the block is deleted, the block size will become
// BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't need
// to send for replication or reconstruction
if (getBlock().getNumBytes() != BlockCommand.NO_ACK) {
if (!getBlock().isDeleted()) {
chosenTargets = blockplacement.chooseTarget(getSrcPath(),
getAdditionalReplRequired(), getSrcNodes()[0],
getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
storagePolicySuite.getPolicy(getStoragePolicyID()), null);
} else {
LOG.warn("ReplicationWork could not need choose targets for {}", getBlock());
}
setTargets(chosenTargets);
} finally {