diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 140be77da6..8c26ee7e53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -951,6 +951,9 @@ Release 2.9.0 - UNRELEASED HDFS-9601. NNThroughputBenchmark.BlockReportStats should handle NotReplicatedYetException on adding block (iwasakims) + HDFS-9618. Fix mismatch between log level and guard in + BlockManager#computeRecoveryWorkForBlocks (iwasakims) + HDFS-9621. getListing wrongly associates Erasure Coding policy to pre-existing replicated files under an EC directory. (jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bf63708233..d255471419 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1488,7 +1488,7 @@ int computeRecoveryWorkForBlocks(List> blocksToRecover) { namesystem.writeUnlock(); } - if (blockLog.isInfoEnabled()) { + if (blockLog.isDebugEnabled()) { // log which blocks have been scheduled for replication for(BlockRecoveryWork rw : recovWork){ DatanodeStorageInfo[] targets = rw.getTargets(); @@ -1502,8 +1502,7 @@ int computeRecoveryWorkForBlocks(List> blocksToRecover) { rw.getBlock(), targetList); } } - } - if (blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* neededReplications = {} pendingReplications = {}", neededReplications.size(), pendingReplications.size()); }