From 830eb252aaa4fec7ef2ec38cb66f669e8e1ecaa5 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 1 Dec 2015 13:05:22 -0800 Subject: [PATCH] HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo instead of Block. Contributed by Mingliang Liu. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++-- .../apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 77d54155ac..3f31f3e5c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1704,6 +1704,9 @@ Release 2.8.0 - UNRELEASED HDFS-9269. Update the documentation and wrapper for fuse-dfs. (Wei-Chiu Chuang via zhz) + HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo + instead of Block. (Mingliang Liu via jing9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ef3a8cc0a3..3033eaa10f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3930,7 +3930,7 @@ public class BlockManager implements BlockStatsMXBean { return corruptReplicas.numCorruptReplicas(block); } - public void removeBlockFromMap(Block block) { + public void removeBlockFromMap(BlockInfo block) { removeFromExcessReplicateMap(block); blocksMap.removeBlock(block); // If block is removed from blocksMap remove it from corruptReplicasMap @@ -3940,7 +3940,7 @@ public class BlockManager implements BlockStatsMXBean { /** * If a block is removed from blocksMap, remove it from excessReplicateMap. */ - private void removeFromExcessReplicateMap(Block block) { + private void removeFromExcessReplicateMap(BlockInfo block) { for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); LightWeightHashSet excessReplicas = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index fa87bf285c..0a71d787cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -82,7 +82,7 @@ class FSDirWriteFileOp { if (uc == null) { return false; } - fsd.getBlockManager().removeBlockFromMap(block); + fsd.getBlockManager().removeBlockFromMap(uc); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "