From c9e0268216584f1df1a7c6cd25d2cfb2bc6d1d3c Mon Sep 17 00:00:00 2001 From: Zhe Zhang Date: Tue, 26 May 2015 12:32:16 -0700 Subject: [PATCH] Addendum fix for HDFS-7912. --- .../server/blockmanagement/BlockManager.java | 2 +- .../hadoop/hdfs/TestRecoverStripedFile.java | 3 ++- .../TestPendingReplication.java | 5 +++-- .../blockmanagement/TestReplicationPolicy.java | 18 ++++-------------- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 8f1f6b71c9..32757f925b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1819,7 +1819,7 @@ private void processPendingReplications() { * Use the blockinfo from the blocksmap to be certain we're working * with the most up-to-date block information (e.g. genstamp). */ - BlockInfoContiguous bi = blocksMap.getStoredBlock(timedOutItems[i]); + BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]); if (bi == null) { continue; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java index e5e324cfbb..9e44761d20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRecoverStripedFile.java @@ -50,6 +50,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; public class TestRecoverStripedFile { public static final Log LOG = LogFactory.getLog(TestRecoverStripedFile.class); @@ -340,7 +341,7 @@ private LocatedBlocks getLocatedBlocks(Path file) throws IOException { private void testCreateStripedFile(Path file, int dataLen) throws IOException { final byte[] data = new byte[dataLen]; - DFSUtil.getRandom().nextBytes(data); + ThreadLocalRandom.current().nextBytes(data); writeContents(file, data); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java index b714afa684..c00037b071 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java @@ -186,7 +186,7 @@ public void testProcessPendingReplications() throws Exception { block = new Block(1, 1, 0); blockInfo = new BlockInfoContiguous(block, (short) 3); - pendingReplications.increment(block, + pendingReplications.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors( DFSTestUtil.createDatanodeStorageInfos(1))); BlockCollection bc = Mockito.mock(BlockCollection.class); @@ -201,7 +201,8 @@ public void testProcessPendingReplications() throws Exception { // Add a second block to pendingReplications that has no // corresponding entry in blocksmap block = new Block(2, 2, 0); - pendingReplications.increment(block, + blockInfo = new BlockInfoContiguous(block, (short) 3); + pendingReplications.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors( DFSTestUtil.createDatanodeStorageInfos(1))); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 1916b3cd3f..6e92264aa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1209,13 +1209,8 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; -<<<<<<< HEAD - Block block1 = new Block(ThreadLocalRandom.current().nextLong()); - Block block2 = new Block(ThreadLocalRandom.current().nextLong()); -======= - BlockInfo block1 = genBlockInfo(random.nextLong()); - BlockInfo block2 = genBlockInfo(random.nextLong()); ->>>>>>> 3e6f458... HDFS-7912. Erasure Coding: track BlockInfo instead of Block in UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao. + BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); + BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1); @@ -1277,13 +1272,8 @@ public void testupdateNeededReplicationsDoesNotCauseSkippedReplication() BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; -<<<<<<< HEAD - Block block1 = new Block(ThreadLocalRandom.current().nextLong()); - Block block2 = new Block(ThreadLocalRandom.current().nextLong()); -======= - BlockInfo block1 = genBlockInfo(random.nextLong()); - BlockInfo block2 = genBlockInfo(random.nextLong()); ->>>>>>> 3e6f458... HDFS-7912. Erasure Coding: track BlockInfo instead of Block in UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao. + BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); + BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1);