HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. (Jing Zhao via yliu)

This commit is contained in:
yliu 2015-06-14 15:39:19 +08:00
parent 683332b36d
commit 49d5cff490
4 changed files with 33 additions and 126 deletions

View File

@ -299,3 +299,6 @@
HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
StripedBlockProto. (Yi Liu via jing9) StripedBlockProto. (Yi Liu via jing9)
HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks.
(Jing Zhao via yliu)

View File

@ -365,12 +365,6 @@ public void load(File curFile) throws IOException {
long maxSequentialBlockId = in.readLong(); long maxSequentialBlockId = in.readLong();
namesystem.getBlockIdManager().setLastAllocatedContiguousBlockId( namesystem.getBlockIdManager().setLastAllocatedContiguousBlockId(
maxSequentialBlockId); maxSequentialBlockId);
if (NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)) {
final long maxStripedBlockId = in.readLong();
namesystem.getBlockIdManager().setLastAllocatedStripedBlockId(
maxStripedBlockId);
}
} else { } else {
long startingGenStamp = namesystem.getBlockIdManager() long startingGenStamp = namesystem.getBlockIdManager()
@ -759,32 +753,17 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
atime = in.readLong(); atime = in.readLong();
} }
final long blockSize = in.readLong(); final long blockSize = in.readLong();
final boolean isStriped = NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
&& (in.readBoolean());
final int numBlocks = in.readInt(); final int numBlocks = in.readInt();
// TODO: ECSchema can be restored from persisted file (HDFS-7859).
final ECSchema schema = isStriped ?
ErasureCodingSchemaManager.getSystemDefaultSchema() : null;
if (numBlocks >= 0) { if (numBlocks >= 0) {
// file // file
// read blocks // read blocks
Block[] blocks; Block[] blocks = new BlockInfoContiguous[numBlocks];
if (isStriped) {
blocks = new Block[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfoStriped(new Block(), schema);
blocks[j].readFields(in);
}
} else {
blocks = new BlockInfoContiguous[numBlocks];
for (int j = 0; j < numBlocks; j++) { for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfoContiguous(replication); blocks[j] = new BlockInfoContiguous(replication);
blocks[j].readFields(in); blocks[j].readFields(in);
} }
}
String clientName = ""; String clientName = "";
String clientMachine = ""; String clientMachine = "";
@ -803,16 +782,8 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
// convert the last block to BlockUC // convert the last block to BlockUC
if (blocks.length > 0) { if (blocks.length > 0) {
Block lastBlk = blocks[blocks.length - 1]; Block lastBlk = blocks[blocks.length - 1];
if (isStriped){ blocks[blocks.length - 1] =
BlockInfoStriped lastStripedBlk = (BlockInfoStriped) lastBlk; new BlockInfoContiguousUnderConstruction(lastBlk, replication);
blocks[blocks.length - 1]
= new BlockInfoStripedUnderConstruction(lastBlk,
lastStripedBlk.getSchema());
} else {
blocks[blocks.length - 1]
= new BlockInfoContiguousUnderConstruction(lastBlk,
replication);
}
} }
} }
} }
@ -825,19 +796,9 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
counter.increment(); counter.increment();
} }
INodeFile file; INodeFile file = new INodeFile(inodeId, localName, permissions,
if (isStriped) {
file = new INodeFile(inodeId, localName, permissions, modificationTime,
atime, new BlockInfoContiguous[0], (short) 0, blockSize);
file.addStripedBlocksFeature();
for (Block block : blocks) {
file.getStripedBlocksFeature().addBlock((BlockInfoStriped) block);
}
} else {
file = new INodeFile(inodeId, localName, permissions,
modificationTime, atime, (BlockInfoContiguous[]) blocks, modificationTime, atime, (BlockInfoContiguous[]) blocks,
replication, blockSize); replication, blockSize);
}
if (underConstruction) { if (underConstruction) {
file.toUnderConstruction(clientName, clientMachine); file.toUnderConstruction(clientName, clientMachine);
} }
@ -1315,7 +1276,6 @@ void save(File newFile, FSImageCompression compression) throws IOException {
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2()); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch()); out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedContiguousBlockId()); out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedContiguousBlockId());
out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedStripedBlockId());
out.writeLong(context.getTxId()); out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.dir.getLastInodeId()); out.writeLong(sourceNamesystem.dir.getLastInodeId());

View File

@ -32,12 +32,9 @@
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
@ -50,7 +47,6 @@
import org.apache.hadoop.io.ShortWritable; import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.xml.sax.ContentHandler; import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException; import org.xml.sax.SAXException;
@ -128,36 +124,14 @@ static INodeFile readINodeUnderConstruction(
short blockReplication = in.readShort(); short blockReplication = in.readShort();
long modificationTime = in.readLong(); long modificationTime = in.readLong();
long preferredBlockSize = in.readLong(); long preferredBlockSize = in.readLong();
final boolean isStriped = NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
&& (in.readBoolean());
// TODO: ECSchema can be restored from persisted file (HDFS-7859).
final ECSchema schema = isStriped ?
ErasureCodingSchemaManager.getSystemDefaultSchema() : null;
int numBlocks = in.readInt(); int numBlocks = in.readInt();
final BlockInfoContiguous[] blocksContiguous; final BlockInfoContiguous[] blocksContiguous =
BlockInfoStriped[] blocksStriped = null; new BlockInfoContiguous[numBlocks];
if (isStriped) {
blocksContiguous = new BlockInfoContiguous[0];
blocksStriped = new BlockInfoStriped[numBlocks];
int i = 0;
for (; i < numBlocks - 1; i++) {
blocksStriped[i] = new BlockInfoStriped(new Block(), schema);
blocksStriped[i].readFields(in);
}
if (numBlocks > 0) {
blocksStriped[i] = new BlockInfoStripedUnderConstruction(new Block(),
schema, BlockUCState.UNDER_CONSTRUCTION, null);
blocksStriped[i].readFields(in);
}
} else {
blocksContiguous = new BlockInfoContiguous[numBlocks];
Block blk = new Block(); Block blk = new Block();
int i = 0; int i = 0;
for (; i < numBlocks-1; i++) { for (; i < numBlocks - 1; i++) {
blk.readFields(in); blk.readFields(in);
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication); blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
} }
@ -167,7 +141,6 @@ static INodeFile readINodeUnderConstruction(
blocksContiguous[i] = new BlockInfoContiguousUnderConstruction( blocksContiguous[i] = new BlockInfoContiguousUnderConstruction(
blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null); blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
} }
}
PermissionStatus perm = PermissionStatus.read(in); PermissionStatus perm = PermissionStatus.read(in);
String clientName = readString(in); String clientName = readString(in);
@ -180,19 +153,8 @@ static INodeFile readINodeUnderConstruction(
// Images in the pre-protobuf format will not have the lazyPersist flag, // Images in the pre-protobuf format will not have the lazyPersist flag,
// so it is safe to pass false always. // so it is safe to pass false always.
INodeFile file; INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
if (isStriped) { modificationTime, blocksContiguous, blockReplication, preferredBlockSize);
file = new INodeFile(inodeId, name, perm, modificationTime,
modificationTime, blocksContiguous, (short) 0, preferredBlockSize);
file.addStripedBlocksFeature();
for (int i = 0; i < numBlocks; i++) {
file.getStripedBlocksFeature().addBlock(blocksStriped[i]);
}
} else {
file = new INodeFile(inodeId, name, perm, modificationTime,
modificationTime, blocksContiguous, blockReplication,
preferredBlockSize);
}
file.toUnderConstruction(clientName, clientMachine); file.toUnderConstruction(clientName, clientMachine);
return file; return file;
} }
@ -207,8 +169,7 @@ static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
out.writeShort(cons.getFileReplication()); out.writeShort(cons.getFileReplication());
out.writeLong(cons.getModificationTime()); out.writeLong(cons.getModificationTime());
out.writeLong(cons.getPreferredBlockSize()); out.writeLong(cons.getPreferredBlockSize());
// whether the file has striped blocks
out.writeBoolean(cons.isStriped());
writeBlocks(cons.getBlocks(), out); writeBlocks(cons.getBlocks(), out);
cons.getPermissionStatus().write(out); cons.getPermissionStatus().write(out);
@ -233,8 +194,7 @@ public static void writeINodeFile(INodeFile file, DataOutput out,
out.writeLong(file.getModificationTime()); out.writeLong(file.getModificationTime());
out.writeLong(file.getAccessTime()); out.writeLong(file.getAccessTime());
out.writeLong(file.getPreferredBlockSize()); out.writeLong(file.getPreferredBlockSize());
// whether the file has striped blocks
out.writeBoolean(file.isStriped());
writeBlocks(file.getBlocks(), out); writeBlocks(file.getBlocks(), out);
SnapshotFSImageFormat.saveFileDiffList(file, out); SnapshotFSImageFormat.saveFileDiffList(file, out);
@ -347,7 +307,7 @@ private static void writeINodeReference(INodeReference ref, DataOutput out,
if (!isWithName) { if (!isWithName) {
Preconditions.checkState(ref instanceof INodeReference.DstReference); Preconditions.checkState(ref instanceof INodeReference.DstReference);
// dst snapshot id // dst snapshot id
out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId()); out.writeInt(ref.getDstSnapshotId());
} else { } else {
out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId()); out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId());
} }

View File

@ -203,27 +203,11 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con
fileByLoaded.getPermissionStatus().getPermission()); fileByLoaded.getPermissionStatus().getPermission());
assertEquals(mtime, fileByLoaded.getModificationTime()); assertEquals(mtime, fileByLoaded.getModificationTime());
assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime()); assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
assertEquals(0, fileByLoaded.getContiguousBlocks().length); // TODO for striped blocks, we currently save and load them as contiguous
assertEquals(0, fileByLoaded.getFileReplication()); // blocks to/from legacy fsimage
assertEquals(3, fileByLoaded.getContiguousBlocks().length);
assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize()); assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
//check the BlockInfoStriped
BlockInfoStriped[] stripedBlksByLoaded =
fileByLoaded.getStripedBlocksFeature().getBlocks();
assertEquals(3, stripedBlksByLoaded.length);
for (int i = 0; i < 3; i++) {
assertEquals(stripedBlks[i].getBlockId(),
stripedBlksByLoaded[i].getBlockId());
assertEquals(stripedBlks[i].getNumBytes(),
stripedBlksByLoaded[i].getNumBytes());
assertEquals(stripedBlks[i].getGenerationStamp(),
stripedBlksByLoaded[i].getGenerationStamp());
assertEquals(stripedBlks[i].getDataBlockNum(),
stripedBlksByLoaded[i].getDataBlockNum());
assertEquals(stripedBlks[i].getParityBlockNum(),
stripedBlksByLoaded[i].getParityBlockNum());
}
if (isUC) { if (isUC) {
assertEquals(client, assertEquals(client,
fileByLoaded.getFileUnderConstructionFeature().getClientName()); fileByLoaded.getFileUnderConstructionFeature().getClientName());