From 4852a90e4b077ece2d68595210e62959a9923683 Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Fri, 13 Sep 2019 00:17:13 +0530 Subject: [PATCH] HDFS-14754. Erasure Coding : The number of Under-Replicated Blocks never reduced. Contributed by hemanthboyina. --- .../server/blockmanagement/BlockManager.java | 3 +- .../server/namenode/TestRedudantBlocks.java | 137 ++++++++++++++++++ 2 files changed, 138 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 3f718a04b2..6c349ffd5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -3437,8 +3437,7 @@ private Block addStoredBlock(final BlockInfo block, private boolean shouldProcessExtraRedundancy(NumberReplicas num, int expectedNum) { final int numCurrent = num.liveReplicas(); - return numCurrent > expectedNum || - (numCurrent == expectedNum && num.redundantInternalBlocks() > 0); + return numCurrent > expectedNum || num.redundantInternalBlocks() > 0; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java new file mode 100644 index 0000000000..943699aaa7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestRedudantBlocks.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; +import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.hdfs.util.StripedBlockUtil; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +/** + * Test RedudantBlocks. + */ +public class TestRedudantBlocks { + + private MiniDFSCluster cluster; + private DistributedFileSystem fs; + private final Path dirPath = new Path("/striped"); + private Path filePath = new Path(dirPath, "file"); + private final ErasureCodingPolicy ecPolicy = + SystemErasureCodingPolicies.getPolicies().get(1); + private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); + private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); + private final short groupSize = (short) (dataBlocks + parityBlocks); + private final int cellSize = ecPolicy.getCellSize(); + private final int stripesPerBlock = 4; + private final int blockSize = stripesPerBlock * cellSize; + private final int numDNs = groupSize + 1; + + @Before + public void setup() throws IOException { + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + // disable block recovery + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + SimulatedFSDataset.setFactory(conf); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); + fs.mkdirs(dirPath); + fs.getClient().setErasureCodingPolicy(dirPath.toString(), + ecPolicy.getName()); + } + + @After + public void tearDown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + @Test + public void testProcessOverReplicatedAndRedudantBlock() throws Exception { + long fileLen = dataBlocks * blockSize; + DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, + false); + LocatedBlocks lbs = cluster.getNameNodeRpc() + .getBlockLocations(filePath.toString(), 0, fileLen); + LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0)); + long gs = bg.getBlock().getGenerationStamp(); + String bpid = bg.getBlock().getBlockPoolId(); + long groupId = bg.getBlock().getBlockId(); + Block blk = new Block(groupId, blockSize, gs); + int i = 0; + // one missing block + for (; i < groupSize - 1; i++) { + blk.setBlockId(groupId + i); + cluster.injectBlocks(i, Arrays.asList(blk), bpid); + } + cluster.triggerBlockReports(); + // one redundant block + blk.setBlockId(groupId + 2); + cluster.injectBlocks(i, Arrays.asList(blk), bpid); + + // update blocksMap + cluster.triggerBlockReports(); + // add to invalidates + cluster.triggerHeartbeats(); + // datanode delete block + cluster.triggerHeartbeats(); + // update blocksMap + cluster.triggerBlockReports(); + + HashSet blockIdsSet = new HashSet(); + + lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, + fileLen); + bg = (LocatedStripedBlock) (lbs.get(0)); + + final LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(bg, + cellSize, dataBlocks, parityBlocks); + + for (LocatedBlock dn : blocks) { + if (dn != null) { + blockIdsSet.add(dn.getBlock().getBlockId()); + } + } + assertEquals(groupSize, blockIdsSet.size()); + + } +}