From f6e1160ef1e946a5f6c9503b06832e6b84c36edb Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 17 Apr 2015 18:13:47 -0700 Subject: [PATCH] HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. Contributed by Jing Zhao. --- .../blockmanagement/BlockInfoStriped.java | 7 ------ .../namenode/ErasureCodingZoneManager.java | 12 ++++------ .../hdfs/server/namenode/FSDirectory.java | 2 +- .../hdfs/server/namenode/FSEditLogLoader.java | 14 ++++++----- .../hdfs/server/namenode/FSImageFormat.java | 4 +--- .../server/namenode/FSImageSerialization.java | 13 +++++------ .../blockmanagement/TestBlockInfoStriped.java | 23 ++++++------------- .../hdfs/server/namenode/TestFSImage.java | 2 +- 8 files changed, 29 insertions(+), 48 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 9f2f5ba5f8..23e3153b57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -244,13 +244,6 @@ public int numNodes() { return num; } - @Override - public void write(DataOutput out) throws IOException { - out.writeShort(dataBlockNum); - out.writeShort(parityBlockNum); - super.write(out); - } - /** * Convert a complete block to an under construction block. * @return BlockInfoUnderConstruction - an under construction block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java index 0a84083f4f..3f94227902 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java @@ -54,10 +54,6 @@ public ErasureCodingZoneManager(FSDirectory dir) { this.dir = dir; } - boolean getECPolicy(INodesInPath iip) throws IOException { - return getECSchema(iip) != null; - } - ECSchema getECSchema(INodesInPath iip) throws IOException { ECZoneInfo ecZoneInfo = getECZoneInfo(iip); return ecZoneInfo == null ? null : ecZoneInfo.getSchema(); @@ -109,7 +105,7 @@ XAttr createErasureCodingZone(String src, ECSchema schema) throw new IOException("Attempt to create an erasure coding zone " + "for a file."); } - if (getECPolicy(srcIIP)) { + if (getECSchema(srcIIP) != null) { throw new IOException("Directory " + src + " is already in an " + "erasure coding zone."); } @@ -132,8 +128,10 @@ XAttr createErasureCodingZone(String src, ECSchema schema) void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src) throws IOException { assert dir.hasReadLock(); - if (getECPolicy(srcIIP) - != getECPolicy(dstIIP)) { + final ECSchema srcSchema = getECSchema(srcIIP); + final ECSchema dstSchema = getECSchema(dstIIP); + if ((srcSchema != null && !srcSchema.equals(dstSchema)) || + (dstSchema != null && !dstSchema.equals(srcSchema))) { throw new IOException( src + " can't be moved because the source and destination have " + "different erasure coding policies."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c091484874..7392552133 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1237,7 +1237,7 @@ XAttr createErasureCodingZone(String src, ECSchema schema) } } - public boolean getECPolicy(INodesInPath iip) throws IOException { + public boolean isInECZone(INodesInPath iip) throws IOException { return getECSchema(iip) != null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 47bfa47d7b..592c421619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -416,7 +417,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, addCloseOp, iip, newFile, fsDir.getECPolicy(iip)); + updateBlocks(fsDir, addCloseOp, iip, newFile, fsDir.isInECZone(iip)); break; } case OP_CLOSE: { @@ -437,7 +438,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, addCloseOp, iip, file, fsDir.getECPolicy(iip)); + updateBlocks(fsDir, addCloseOp, iip, file, fsDir.isInECZone(iip)); // Now close the file if (!file.isUnderConstruction() && @@ -496,7 +497,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // Update in-memory data structures // TODO whether the file is striped should later be retrieved from iip - updateBlocks(fsDir, updateOp, iip, oldFile, fsDir.getECPolicy(iip)); + updateBlocks(fsDir, updateOp, iip, oldFile, fsDir.isInECZone(iip)); if (toAddRetryCache) { fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId); @@ -514,7 +515,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // add the new block to the INodeFile // TODO whether the file is striped should later be retrieved from iip - addNewBlock(addBlockOp, oldFile, fsDir.getECPolicy(iip)); + addNewBlock(addBlockOp, oldFile, fsDir.isInECZone(iip)); break; } case OP_SET_REPLICATION: { @@ -1079,8 +1080,9 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfoContiguous(newBlock, - file.getPreferredBlockReplication()); + newBI = isStriped ? new BlockInfoStriped(newBlock, + HdfsConstants.NUM_DATA_BLOCKS, HdfsConstants.NUM_PARITY_BLOCKS) : + new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication()); } fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 54d0d30c24..6f485f53fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -771,10 +771,8 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, if (isStriped) { blocks = new Block[numBlocks]; for (int j = 0; j < numBlocks; j++) { - short dataBlockNum = in.readShort(); - short parityBlockNum = in.readShort(); blocks[j] = new BlockInfoStriped(new Block(), - dataBlockNum, parityBlockNum); + HdfsConstants.NUM_DATA_BLOCKS, HdfsConstants.NUM_PARITY_BLOCKS); blocks[j].readFields(in); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 58244e5bfe..25febd45f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; @@ -139,17 +140,15 @@ static INodeFile readINodeUnderConstruction( blocksStriped = new BlockInfoStriped[numBlocks]; int i = 0; for (; i < numBlocks - 1; i++) { - short dataBlockNum = in.readShort(); - short parityBlockNum = in.readShort(); - blocksStriped[i] = new BlockInfoStriped(new Block(), dataBlockNum, - parityBlockNum); + blocksStriped[i] = new BlockInfoStriped(new Block(), + HdfsConstants.NUM_DATA_BLOCKS, + HdfsConstants.NUM_PARITY_BLOCKS); blocksStriped[i].readFields(in); } if (numBlocks > 0) { - short dataBlockNum = in.readShort(); - short parityBlockNum = in.readShort(); blocksStriped[i] = new BlockInfoStripedUnderConstruction(new Block(), - dataBlockNum, parityBlockNum, BlockUCState.UNDER_CONSTRUCTION, null); + HdfsConstants.NUM_DATA_BLOCKS, HdfsConstants.NUM_PARITY_BLOCKS, + BlockUCState.UNDER_CONSTRUCTION, null); blocksStriped[i].readFields(in); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java index c4db5d4d37..3b689ebd4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; import org.junit.Assert; -import org.junit.Before; import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; @@ -43,12 +42,8 @@ public class TestBlockInfoStriped { private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; private static final long BASE_ID = -1600; private static final Block baseBlock = new Block(BASE_ID); - private BlockInfoStriped info; - - @Before - public void setup() { - info = new BlockInfoStriped(baseBlock, NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS); - } + private final BlockInfoStriped info = new BlockInfoStriped(baseBlock, + NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS); private Block[] createReportedBlocks(int num) { Block[] blocks = new Block[num]; @@ -230,17 +225,14 @@ public void testWrite() { long blkID = 1; long numBytes = 1; long generationStamp = 1; - short dataBlockNum = 6; - short parityBlockNum = 3; - ByteBuffer byteBuffer = ByteBuffer.allocate(Long.SIZE/Byte.SIZE*3 - + Short.SIZE/Byte.SIZE*2); - byteBuffer.putShort(dataBlockNum).putShort(parityBlockNum) - .putLong(blkID).putLong(numBytes).putLong(generationStamp); + ByteBuffer byteBuffer = ByteBuffer.allocate(Long.SIZE / Byte.SIZE * 3); + byteBuffer.putLong(blkID).putLong(numBytes).putLong(generationStamp); ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); DataOutput out = new DataOutputStream(byteStream); - BlockInfoStriped blk = new BlockInfoStriped(new Block(1,1,1), - (short)6,(short)3); + BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes, + generationStamp), NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS); + try { blk.write(out); } catch(Exception ex) { @@ -249,5 +241,4 @@ public void testWrite() { assertEquals(byteBuffer.array().length, byteStream.toByteArray().length); assertArrayEquals(byteBuffer.array(), byteStream.toByteArray()); } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index c482f1faf2..bb37534667 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -158,7 +158,7 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con for (int i = 0; i < stripedBlks.length; i++) { stripedBlks[i] = new BlockInfoStriped( new Block(stripedBlkId + i, preferredBlockSize, timestamp), - (short) 6, (short) 3); + HdfsConstants.NUM_DATA_BLOCKS, HdfsConstants.NUM_PARITY_BLOCKS); file.getStripedBlocksFeature().addBlock(stripedBlks[i]); }