From d331762f24b3f22f609366740c9c4f449edc61ac Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Tue, 28 Nov 2017 17:14:23 -0800 Subject: [PATCH] HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko. --- .../hdfs/server/blockmanagement/BlockManager.java | 2 -- .../blockmanagement/BlockReconstructionWork.java | 14 ++++++++++---- .../server/blockmanagement/ErasureCodingWork.java | 4 ++-- .../server/blockmanagement/ReplicationWork.java | 4 ++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bdabd81688..4986027b04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1825,8 +1825,6 @@ int computeReconstructionWorkForBlocks( } // choose replication targets: NOT HOLDING THE GLOBAL LOCK - // It is costly to extract the filename for which chooseTargets is called, - // so for now we pass in the block collection itself. final BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(rw.getBlock().getBlockType()); rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java index 57121bd0a1..3f591e8bdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java @@ -32,7 +32,8 @@ abstract class BlockReconstructionWork { private final BlockInfo block; - private final BlockCollection bc; + private final String srcPath; + private final byte storagePolicyID; /** * An erasure coding reconstruction task has multiple source nodes. @@ -57,7 +58,8 @@ public BlockReconstructionWork(BlockInfo block, int additionalReplRequired, int priority) { this.block = block; - this.bc = bc; + this.srcPath = bc.getName(); + this.storagePolicyID = bc.getStoragePolicyID(); this.srcNodes = srcNodes; this.containingNodes = containingNodes; this.liveReplicaStorages = liveReplicaStorages; @@ -94,8 +96,12 @@ public DatanodeDescriptor[] getSrcNodes() { return srcNodes; } - BlockCollection getBc() { - return bc; + public String getSrcPath() { + return srcPath; + } + + public byte getStoragePolicyID() { + return storagePolicyID; } List getLiveReplicaStorages() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java index 0ae6f0fdd0..a23b1d5f5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java @@ -58,10 +58,10 @@ void chooseTargets(BlockPlacementPolicy blockplacement, Set excludedNodes) { // TODO: new placement policy for EC considering multiple writers DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget( - getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0], + getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0], getLiveReplicaStorages(), false, excludedNodes, getBlock().getNumBytes(), - storagePolicySuite.getPolicy(getBc().getStoragePolicyID()), null); + storagePolicySuite.getPolicy(getStoragePolicyID()), null); setTargets(chosenTargets); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java index 6078b1dfb7..26c38cb164 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java @@ -44,10 +44,10 @@ assert getSrcNodes().length > 0 : "At least 1 source node should have been selected"; try { DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget( - getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0], + getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0], getLiveReplicaStorages(), false, excludedNodes, getBlock().getNumBytes(), - storagePolicySuite.getPolicy(getBc().getStoragePolicyID()), + storagePolicySuite.getPolicy(getStoragePolicyID()), null); setTargets(chosenTargets); } finally {