From 76bbd173749f2af4f17946fb37c4c72e2de26764 Mon Sep 17 00:00:00 2001 From: qinyuren <1476659627@qq.com> Date: Wed, 20 Apr 2022 14:04:27 +0800 Subject: [PATCH] HDFS-16544. EC decoding failed due to invalid buffer (#4179) --- .../hadoop/hdfs/StatefulStripeReader.java | 5 --- .../hdfs/TestReadStripedFileWithDecoding.java | 38 +++++++++++++++++++ 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java index 9fb86e513e..730307b443 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java @@ -74,11 +74,6 @@ void prepareDecodeInputs() { boolean prepareParityChunk(int index) { Preconditions.checkState(index >= dataBlkNum && alignedStripe.chunks[index] == null); - if (readerInfos[index] != null && readerInfos[index].shouldSkip) { - alignedStripe.chunks[index] = new StripingChunk(StripingChunk.MISSING); - // we have failed the block reader before - return false; - } final int parityIndex = index - dataBlkNum; ByteBuffer buf = dfsStripedInputStream.getParityBuffer().duplicate(); buf.position(cellSize * parityIndex); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 093138beb6..f80cb01bab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -19,7 +19,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -183,4 +185,40 @@ public void testMoreThanOneCorruptedBlock() throws IOException { buffer); } } + + @Test + public void testReadWithCorruptedDataBlockAndParityBlock() throws IOException { + final Path file = new Path("/corruptedDataBlockAndParityBlock"); + final int length = BLOCK_SIZE * NUM_DATA_UNITS; + final byte[] bytes = StripedFileTestUtil.generateBytes(length); + DFSTestUtil.writeFile(dfs, file, bytes); + + // set one dataBlock and the first parityBlock corrupted + int dataBlkDelNum = 1; + int parityBlkDelNum = 1; + int recoverBlkNum = dataBlkDelNum + parityBlkDelNum; + int[] dataBlkIndices = {0}; + int[] parityBlkIndices = {6}; + + LocatedBlocks locatedBlocks = ReadStripedFileWithDecodingHelper.getLocatedBlocks(dfs, file); + LocatedStripedBlock lastBlock = + (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock(); + + int[] delBlkIndices = new int[recoverBlkNum]; + System.arraycopy(dataBlkIndices, 0, + delBlkIndices, 0, dataBlkIndices.length); + System.arraycopy(parityBlkIndices, 0, + delBlkIndices, dataBlkIndices.length, parityBlkIndices.length); + ExtendedBlock[] delBlocks = new ExtendedBlock[recoverBlkNum]; + for (int i = 0; i < recoverBlkNum; i++) { + delBlocks[i] = StripedBlockUtil + .constructInternalBlock(lastBlock.getBlock(), + CELL_SIZE, NUM_DATA_UNITS, delBlkIndices[i]); + cluster.corruptBlockOnDataNodes(delBlocks[i]); + } + + byte[] buffer = new byte[length + 100]; + StripedFileTestUtil.verifyStatefulRead(dfs, file, length, bytes, + buffer); + } }