HDFS-14720. DataNode shouldn't report block as bad block if the block length is Long.MAX_VALUE. Contributed by hemanthboyina.
This commit is contained in:
parent
3d249301f4
commit
320008bb7c
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -42,10 +43,16 @@ void chooseTargets(BlockPlacementPolicy blockplacement,
|
|||||||
assert getSrcNodes().length > 0
|
assert getSrcNodes().length > 0
|
||||||
: "At least 1 source node should have been selected";
|
: "At least 1 source node should have been selected";
|
||||||
try {
|
try {
|
||||||
DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
|
DatanodeStorageInfo[] chosenTargets = null;
|
||||||
getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
|
// HDFS-14720 If the block is deleted, the block size will become
|
||||||
getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
|
// BlockCommand.NO_ACK (LONG.MAX_VALUE) . This kind of block we don't need
|
||||||
storagePolicySuite.getPolicy(getStoragePolicyID()), null);
|
// to send for replication or reconstruction
|
||||||
|
if (getBlock().getNumBytes() != BlockCommand.NO_ACK) {
|
||||||
|
chosenTargets = blockplacement.chooseTarget(getSrcPath(),
|
||||||
|
getAdditionalReplRequired(), getSrcNodes()[0],
|
||||||
|
getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
|
||||||
|
storagePolicySuite.getPolicy(getStoragePolicyID()), null);
|
||||||
|
}
|
||||||
setTargets(chosenTargets);
|
setTargets(chosenTargets);
|
||||||
} finally {
|
} finally {
|
||||||
getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
|
getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
|
||||||
|
Loading…
Reference in New Issue
Block a user