HDFS-8580. Erasure coding: Persist cellSize in BlockInfoStriped and StripedBlocksFeature. Contributed by Walter Su.

This commit is contained in:
Jing Zhao 2015-06-17 11:35:31 -07:00
parent 49d5cff490
commit c12a974ccf
12 changed files with 94 additions and 62 deletions

View File

@ -302,3 +302,6 @@
HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks. HDFS-8559. Erasure Coding: fix non-protobuf fsimage for striped blocks.
(Jing Zhao via yliu) (Jing Zhao via yliu)
HDFS-8580. Erasure coding: Persist cellSize in BlockInfoStriped and
StripedBlocksFeature. (Walter Su via jing9)

View File

@ -39,6 +39,7 @@
*/ */
public class BlockInfoStriped extends BlockInfo { public class BlockInfoStriped extends BlockInfo {
private final ECSchema schema; private final ECSchema schema;
private final int cellSize;
/** /**
* Always the same size with triplets. Record the block index for each triplet * Always the same size with triplets. Record the block index for each triplet
* TODO: actually this is only necessary for over-replicated block. Thus can * TODO: actually this is only necessary for over-replicated block. Thus can
@ -46,15 +47,16 @@ public class BlockInfoStriped extends BlockInfo {
*/ */
private byte[] indices; private byte[] indices;
public BlockInfoStriped(Block blk, ECSchema schema) { public BlockInfoStriped(Block blk, ECSchema schema, int cellSize) {
super(blk, (short) (schema.getNumDataUnits() + schema.getNumParityUnits())); super(blk, (short) (schema.getNumDataUnits() + schema.getNumParityUnits()));
indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()]; indices = new byte[schema.getNumDataUnits() + schema.getNumParityUnits()];
initIndices(); initIndices();
this.schema = schema; this.schema = schema;
this.cellSize = cellSize;
} }
BlockInfoStriped(BlockInfoStriped b) { BlockInfoStriped(BlockInfoStriped b) {
this(b, b.getSchema()); this(b, b.getSchema(), b.getCellSize());
this.setBlockCollection(b.getBlockCollection()); this.setBlockCollection(b.getBlockCollection());
} }
@ -75,6 +77,10 @@ public ECSchema getSchema() {
return schema; return schema;
} }
public int getCellSize() {
return cellSize;
}
private void initIndices() { private void initIndices() {
for (int i = 0; i < indices.length; i++) { for (int i = 0; i < indices.length; i++) {
indices[i] = -1; indices[i] = -1;
@ -236,7 +242,7 @@ public BlockInfoStripedUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeStorageInfo[] targets) { BlockUCState s, DatanodeStorageInfo[] targets) {
final BlockInfoStripedUnderConstruction ucBlock; final BlockInfoStripedUnderConstruction ucBlock;
if(isComplete()) { if(isComplete()) {
ucBlock = new BlockInfoStripedUnderConstruction(this, schema, ucBlock = new BlockInfoStripedUnderConstruction(this, schema, cellSize,
s, targets); s, targets);
ucBlock.setBlockCollection(getBlockCollection()); ucBlock.setBlockCollection(getBlockCollection());
} else { } else {

View File

@ -57,16 +57,17 @@ public class BlockInfoStripedUnderConstruction extends BlockInfoStriped
/** /**
* Constructor with null storage targets. * Constructor with null storage targets.
*/ */
public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema) { public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
this(blk, schema, UNDER_CONSTRUCTION, null); int cellSize) {
this(blk, schema, cellSize, UNDER_CONSTRUCTION, null);
} }
/** /**
* Create a striped block that is currently being constructed. * Create a striped block that is currently being constructed.
*/ */
public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema, public BlockInfoStripedUnderConstruction(Block blk, ECSchema schema,
BlockUCState state, DatanodeStorageInfo[] targets) { int cellSize, BlockUCState state, DatanodeStorageInfo[] targets) {
super(blk, schema); super(blk, schema, cellSize);
assert getBlockUCState() != COMPLETE : assert getBlockUCState() != COMPLETE :
"BlockInfoStripedUnderConstruction cannot be in COMPLETE state"; "BlockInfoStripedUnderConstruction cannot be in COMPLETE state";
this.blockUCState = state; this.blockUCState = state;

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -531,8 +532,9 @@ private static BlockInfo addBlock(FSDirectory fsd, String path,
// associate new last block for the file // associate new last block for the file
final BlockInfo blockInfo; final BlockInfo blockInfo;
if (isStriped) { if (isStriped) {
ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsd.getFSNamesystem(), inodesInPath); fsd.getFSNamesystem(), inodesInPath);
ECSchema ecSchema = ecZone.getSchema();
short numDataUnits = (short) ecSchema.getNumDataUnits(); short numDataUnits = (short) ecSchema.getNumDataUnits();
short numParityUnits = (short) ecSchema.getNumParityUnits(); short numParityUnits = (short) ecSchema.getNumParityUnits();
short numLocations = (short) (numDataUnits + numParityUnits); short numLocations = (short) (numDataUnits + numParityUnits);
@ -541,8 +543,8 @@ private static BlockInfo addBlock(FSDirectory fsd, String path,
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
numLocations, true); numLocations, true);
blockInfo = new BlockInfoStripedUnderConstruction(block, ecSchema, blockInfo = new BlockInfoStripedUnderConstruction(block, ecSchema,
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, ecZone.getCellSize(),
targets); HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
} else { } else {
// check quota limits and updated space consumed // check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
@ -417,9 +418,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// Update the salient file attributes. // Update the salient file attributes.
newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsDir.getFSNamesystem(), iip); fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, addCloseOp, iip, newFile, ecSchema); updateBlocks(fsDir, addCloseOp, iip, newFile, ecZone);
break; break;
} }
case OP_CLOSE: { case OP_CLOSE: {
@ -439,9 +440,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
// Update the salient file attributes. // Update the salient file attributes.
file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID);
ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsDir.getFSNamesystem(), iip); fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, addCloseOp, iip, file, ecSchema); updateBlocks(fsDir, addCloseOp, iip, file, ecZone);
// Now close the file // Now close the file
if (!file.isUnderConstruction() && if (!file.isUnderConstruction() &&
@ -499,9 +500,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodesInPath iip = fsDir.getINodesInPath(path, true); INodesInPath iip = fsDir.getINodesInPath(path, true);
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
// Update in-memory data structures // Update in-memory data structures
ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsDir.getFSNamesystem(), iip); fsDir.getFSNamesystem(), iip);
updateBlocks(fsDir, updateOp, iip, oldFile, ecSchema); updateBlocks(fsDir, updateOp, iip, oldFile, ecZone);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId); fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId);
@ -518,9 +519,9 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodesInPath iip = fsDir.getINodesInPath(path, true); INodesInPath iip = fsDir.getINodesInPath(path, true);
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path);
// add the new block to the INodeFile // add the new block to the INodeFile
ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( ErasureCodingZone ecZone = FSDirErasureCodingOp.getErasureCodingZone(
fsDir.getFSNamesystem(), iip); fsDir.getFSNamesystem(), iip);
addNewBlock(addBlockOp, oldFile, ecSchema); addNewBlock(addBlockOp, oldFile, ecZone);
break; break;
} }
case OP_SET_REPLICATION: { case OP_SET_REPLICATION: {
@ -961,8 +962,8 @@ private static String formatEditLogReplayError(EditLogInputStream in,
/** /**
* Add a new block into the given INodeFile * Add a new block into the given INodeFile
*/ */
private void addNewBlock(AddBlockOp op, INodeFile file, ECSchema ecSchema) private void addNewBlock(AddBlockOp op, INodeFile file,
throws IOException { ErasureCodingZone ecZone) throws IOException {
BlockInfo[] oldBlocks = file.getBlocks(); BlockInfo[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock(); Block pBlock = op.getPenultimateBlock();
Block newBlock= op.getLastBlock(); Block newBlock= op.getLastBlock();
@ -989,9 +990,10 @@ private void addNewBlock(AddBlockOp op, INodeFile file, ECSchema ecSchema)
} }
// add the new block // add the new block
final BlockInfo newBlockInfo; final BlockInfo newBlockInfo;
boolean isStriped = ecSchema != null; boolean isStriped = ecZone != null;
if (isStriped) { if (isStriped) {
newBlockInfo = new BlockInfoStripedUnderConstruction(newBlock, ecSchema); newBlockInfo = new BlockInfoStripedUnderConstruction(newBlock,
ecZone.getSchema(), ecZone.getCellSize());
} else { } else {
newBlockInfo = new BlockInfoContiguousUnderConstruction(newBlock, newBlockInfo = new BlockInfoContiguousUnderConstruction(newBlock,
file.getPreferredBlockReplication()); file.getPreferredBlockReplication());
@ -1006,7 +1008,8 @@ private void addNewBlock(AddBlockOp op, INodeFile file, ECSchema ecSchema)
* @throws IOException * @throws IOException
*/ */
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
INodesInPath iip, INodeFile file, ECSchema ecSchema) throws IOException { INodesInPath iip, INodeFile file, ErasureCodingZone ecZone)
throws IOException {
// Update its block list // Update its block list
BlockInfo[] oldBlocks = file.getBlocks(); BlockInfo[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks(); Block[] newBlocks = op.getBlocks();
@ -1065,7 +1068,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
throw new IOException("Trying to delete non-existant block " + oldBlock); throw new IOException("Trying to delete non-existant block " + oldBlock);
} }
} else if (newBlocks.length > oldBlocks.length) { } else if (newBlocks.length > oldBlocks.length) {
final boolean isStriped = ecSchema != null; final boolean isStriped = ecZone != null;
// We're adding blocks // We're adding blocks
for (int i = oldBlocks.length; i < newBlocks.length; i++) { for (int i = oldBlocks.length; i < newBlocks.length; i++) {
Block newBlock = newBlocks[i]; Block newBlock = newBlocks[i];
@ -1075,7 +1078,8 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// what about an old-version fsync() where fsync isn't called // what about an old-version fsync() where fsync isn't called
// until several blocks in? // until several blocks in?
if (isStriped) { if (isStriped) {
newBI = new BlockInfoStripedUnderConstruction(newBlock, ecSchema); newBI = new BlockInfoStripedUnderConstruction(newBlock,
ecZone.getSchema(), ecZone.getCellSize());
} else { } else {
newBI = new BlockInfoContiguousUnderConstruction(newBlock, newBI = new BlockInfoContiguousUnderConstruction(newBlock,
file.getPreferredBlockReplication()); file.getPreferredBlockReplication());
@ -1086,9 +1090,14 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// versions of Hadoop. Current versions always log // versions of Hadoop. Current versions always log
// OP_ADD operations as each block is allocated. // OP_ADD operations as each block is allocated.
// TODO: ECSchema can be restored from persisted file (HDFS-7859). // TODO: ECSchema can be restored from persisted file (HDFS-7859).
newBI = isStriped ? new BlockInfoStriped(newBlock, if (isStriped) {
ErasureCodingSchemaManager.getSystemDefaultSchema()) : newBI = new BlockInfoStriped(newBlock,
new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication()); ErasureCodingSchemaManager.getSystemDefaultSchema(),
ecZone.getCellSize());
} else {
newBI = new BlockInfoContiguous(newBlock,
file.getPreferredBlockReplication());
}
} }
fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file); fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file);
file.addBlock(newBI); file.addBlock(newBI);

View File

@ -360,8 +360,13 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
// TODO: HDFS-7859 // TODO: HDFS-7859
ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema(); ECSchema schema = ErasureCodingSchemaManager.getSystemDefaultSchema();
stripeFeature = file.addStripedBlocksFeature(); stripeFeature = file.addStripedBlocksFeature();
if (bp.size() > 0) {
// if a striped file has block, the cellSize must exist in proto
final int cellSize = f.getStripedBlocks().getCellSize();
for (BlockProto b : bp) { for (BlockProto b : bp) {
stripeFeature.addBlock(new BlockInfoStriped(PBHelper.convert(b), schema)); stripeFeature.addBlock(new BlockInfoStriped(PBHelper.convert(b),
schema, cellSize));
}
} }
} }
@ -376,7 +381,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
if (stripeFeature != null) { if (stripeFeature != null) {
BlockInfoStriped striped = (BlockInfoStriped) lastBlk; BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
ucBlk = new BlockInfoStripedUnderConstruction(striped, ucBlk = new BlockInfoStripedUnderConstruction(striped,
striped.getSchema()); striped.getSchema(), striped.getCellSize());
} else { } else {
ucBlk = new BlockInfoContiguousUnderConstruction(lastBlk, ucBlk = new BlockInfoContiguousUnderConstruction(lastBlk,
replication); replication);
@ -659,14 +664,17 @@ private void save(OutputStream out, INodeFile n) throws IOException {
FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature(); FileWithStripedBlocksFeature sb = n.getStripedBlocksFeature();
if (sb != null) { if (sb != null) {
BlockInfoStriped[] sblocks = sb.getBlocks();
if (sblocks != null) {
for (BlockInfoStriped sblk : sblocks) {
b.addBlocks(PBHelper.convert(sblk));
}
}
StripedBlocksFeature.Builder builder = StripedBlocksFeature.Builder builder =
StripedBlocksFeature.newBuilder(); StripedBlocksFeature.newBuilder();
BlockInfoStriped[] sblocks = sb.getBlocks();
if (sblocks != null && sblocks.length > 0) {
final int cellSize = sblocks[0].getCellSize();
for (BlockInfoStriped sblk : sblocks) {
assert cellSize == sblk.getCellSize();
b.addBlocks(PBHelper.convert(sblk));
}
builder.setCellSize(cellSize);
}
b.setStripedBlocks(builder.build()); b.setStripedBlocks(builder.build());
} }

View File

@ -3675,8 +3675,9 @@ void commitOrCompleteLastBlock(
final long diff; final long diff;
final short replicationFactor; final short replicationFactor;
if (fileINode.isStriped()) { if (fileINode.isStriped()) {
final ECSchema ecSchema = FSDirErasureCodingOp.getErasureCodingSchema( final ErasureCodingZone ecZone = FSDirErasureCodingOp
this, iip); .getErasureCodingZone(this, iip);
final ECSchema ecSchema = ecZone.getSchema();
final short numDataUnits = (short) ecSchema.getNumDataUnits(); final short numDataUnits = (short) ecSchema.getNumDataUnits();
final short numParityUnits = (short) ecSchema.getNumParityUnits(); final short numParityUnits = (short) ecSchema.getNumParityUnits();
@ -3685,7 +3686,7 @@ void commitOrCompleteLastBlock(
fileINode.getPreferredBlockSize() * numBlocks; fileINode.getPreferredBlockSize() * numBlocks;
final BlockInfoStriped striped = new BlockInfoStriped(commitBlock, final BlockInfoStriped striped = new BlockInfoStriped(commitBlock,
ecSchema); ecSchema, ecZone.getCellSize());
final long actualBlockGroupSize = striped.spaceConsumed(); final long actualBlockGroupSize = striped.spaceConsumed();
diff = fullBlockGroupSize - actualBlockGroupSize; diff = fullBlockGroupSize - actualBlockGroupSize;

View File

@ -94,6 +94,7 @@ message INodeSection {
message StripedBlocksFeature { message StripedBlocksFeature {
// store striped blocks related information // store striped blocks related information
optional uint32 cellSize = 1;
} }
message AclFeatureProto { message AclFeatureProto {

View File

@ -19,6 +19,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager; import org.apache.hadoop.hdfs.server.namenode.ErasureCodingSchemaManager;
import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.io.erasurecode.ECSchema;
@ -46,8 +47,9 @@ public class TestBlockInfoStriped {
private static final Block baseBlock = new Block(BASE_ID); private static final Block baseBlock = new Block(BASE_ID);
private static final ECSchema testSchema private static final ECSchema testSchema
= ErasureCodingSchemaManager.getSystemDefaultSchema(); = ErasureCodingSchemaManager.getSystemDefaultSchema();
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock, private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
testSchema); testSchema, cellSize);
private Block[] createReportedBlocks(int num) { private Block[] createReportedBlocks(int num) {
Block[] blocks = new Block[num]; Block[] blocks = new Block[num];
@ -235,7 +237,7 @@ public void testWrite() {
ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(byteStream); DataOutput out = new DataOutputStream(byteStream);
BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes, BlockInfoStriped blk = new BlockInfoStriped(new Block(blkID, numBytes,
generationStamp), testSchema); generationStamp), testSchema, cellSize);
try { try {
blk.write(out); blk.write(out);

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ErasureCodingZone;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -448,6 +449,7 @@ public void testAddNewStripedBlock() throws IOException{
long timestamp = 1426222918; long timestamp = 1426222918;
short blockNum = HdfsConstants.NUM_DATA_BLOCKS; short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS; short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
//set the storage policy of the directory //set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755")); fs.mkdir(new Path(testDir), new FsPermission("755"));
@ -463,7 +465,7 @@ public void testAddNewStripedBlock() throws IOException{
// Add a striped block to the file // Add a striped block to the file
BlockInfoStriped stripedBlk = new BlockInfoStriped( BlockInfoStriped stripedBlk = new BlockInfoStriped(
new Block(blkId, blkNumBytes, timestamp), testSchema); new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath); INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine); file.toUnderConstruction(clientName, clientMachine);
file.getStripedBlocksFeature().addBlock(stripedBlk); file.getStripedBlocksFeature().addBlock(stripedBlk);
@ -488,6 +490,7 @@ public void testAddNewStripedBlock() throws IOException{
assertEquals(timestamp, blks[0].getGenerationStamp()); assertEquals(timestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, blks[0].getDataBlockNum()); assertEquals(blockNum, blks[0].getDataBlockNum());
assertEquals(parityNum, blks[0].getParityBlockNum()); assertEquals(parityNum, blks[0].getParityBlockNum());
assertEquals(cellSize, blks[0].getCellSize());
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;
@ -520,6 +523,7 @@ public void testUpdateStripedBlocks() throws IOException{
long timestamp = 1426222918; long timestamp = 1426222918;
short blockNum = HdfsConstants.NUM_DATA_BLOCKS; short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
short parityNum = HdfsConstants.NUM_PARITY_BLOCKS; short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
//set the storage policy of the directory //set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755")); fs.mkdir(new Path(testDir), new FsPermission("755"));
@ -529,7 +533,7 @@ public void testUpdateStripedBlocks() throws IOException{
Path p = new Path(testFilePath); Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1); DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoStriped stripedBlk = new BlockInfoStriped( BlockInfoStriped stripedBlk = new BlockInfoStriped(
new Block(blkId, blkNumBytes, timestamp), testSchema); new Block(blkId, blkNumBytes, timestamp), testSchema, cellSize);
INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath); INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine); file.toUnderConstruction(clientName, clientMachine);
file.getStripedBlocksFeature().addBlock(stripedBlk); file.getStripedBlocksFeature().addBlock(stripedBlk);
@ -567,6 +571,7 @@ public void testUpdateStripedBlocks() throws IOException{
assertEquals(newTimestamp, blks[0].getGenerationStamp()); assertEquals(newTimestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, blks[0].getDataBlockNum()); assertEquals(blockNum, blks[0].getDataBlockNum());
assertEquals(parityNum, blks[0].getParityBlockNum()); assertEquals(parityNum, blks[0].getParityBlockNum());
assertEquals(cellSize, blks[0].getCellSize());
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;

View File

@ -70,6 +70,7 @@ public class TestFSImage {
"image-with-zero-block-size.tar.gz"; "image-with-zero-block-size.tar.gz";
private static final ECSchema testSchema private static final ECSchema testSchema
= ErasureCodingSchemaManager.getSystemDefaultSchema(); = ErasureCodingSchemaManager.getSystemDefaultSchema();
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
@Test @Test
public void testPersist() throws IOException { public void testPersist() throws IOException {
@ -162,7 +163,7 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con
for (int i = 0; i < stripedBlks.length; i++) { for (int i = 0; i < stripedBlks.length; i++) {
stripedBlks[i] = new BlockInfoStriped( stripedBlks[i] = new BlockInfoStriped(
new Block(stripedBlkId + i, preferredBlockSize, timestamp), new Block(stripedBlkId + i, preferredBlockSize, timestamp),
testSchema); testSchema, cellSize);
file.getStripedBlocksFeature().addBlock(stripedBlks[i]); file.getStripedBlocksFeature().addBlock(stripedBlks[i]);
} }

View File

@ -53,6 +53,7 @@ public class TestStripedINodeFile {
private static final ECSchema testSchema private static final ECSchema testSchema
= ErasureCodingSchemaManager.getSystemDefaultSchema(); = ErasureCodingSchemaManager.getSystemDefaultSchema();
private static final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
private static INodeFile createStripedINodeFile() { private static INodeFile createStripedINodeFile() {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
@ -69,22 +70,20 @@ public void testBlockStripedFeature()
@Test @Test
public void testBlockStripedTotalBlockCount() { public void testBlockStripedTotalBlockCount() {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, testSchema); = new BlockInfoStriped(blk, testSchema, cellSize);
assertEquals(9, blockInfoStriped.getTotalBlockNum()); assertEquals(9, blockInfoStriped.getTotalBlockNum());
} }
@Test @Test
public void testBlockStripedLength() public void testBlockStripedLength()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, testSchema); = new BlockInfoStriped(blk, testSchema, cellSize);
inf.addBlock(blockInfoStriped); inf.addBlock(blockInfoStriped);
assertEquals(1, inf.getBlocks().length); assertEquals(1, inf.getBlocks().length);
} }
@ -92,12 +91,11 @@ public void testBlockStripedLength()
@Test @Test
public void testBlockStripedConsumedSpace() public void testBlockStripedConsumedSpace()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, testSchema); = new BlockInfoStriped(blk, testSchema, cellSize);
blockInfoStriped.setNumBytes(1); blockInfoStriped.setNumBytes(1);
inf.addBlock(blockInfoStriped); inf.addBlock(blockInfoStriped);
// 0. Calculate the total bytes per stripes <Num Bytes per Stripes> // 0. Calculate the total bytes per stripes <Num Bytes per Stripes>
@ -119,16 +117,15 @@ public void testBlockStripedConsumedSpace()
@Test @Test
public void testMultipleBlockStripedConsumedSpace() public void testMultipleBlockStripedConsumedSpace()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk1 = new Block(1); Block blk1 = new Block(1);
BlockInfoStriped blockInfoStriped1 BlockInfoStriped blockInfoStriped1
= new BlockInfoStriped(blk1, testSchema); = new BlockInfoStriped(blk1, testSchema, cellSize);
blockInfoStriped1.setNumBytes(1); blockInfoStriped1.setNumBytes(1);
Block blk2 = new Block(2); Block blk2 = new Block(2);
BlockInfoStriped blockInfoStriped2 BlockInfoStriped blockInfoStriped2
= new BlockInfoStriped(blk2, testSchema); = new BlockInfoStriped(blk2, testSchema, cellSize);
blockInfoStriped2.setNumBytes(1); blockInfoStriped2.setNumBytes(1);
inf.addBlock(blockInfoStriped1); inf.addBlock(blockInfoStriped1);
inf.addBlock(blockInfoStriped2); inf.addBlock(blockInfoStriped2);
@ -140,12 +137,11 @@ public void testMultipleBlockStripedConsumedSpace()
@Test @Test
public void testBlockStripedFileSize() public void testBlockStripedFileSize()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, testSchema); = new BlockInfoStriped(blk, testSchema, cellSize);
blockInfoStriped.setNumBytes(100); blockInfoStriped.setNumBytes(100);
inf.addBlock(blockInfoStriped); inf.addBlock(blockInfoStriped);
// Compute file size should return actual data // Compute file size should return actual data
@ -157,12 +153,11 @@ public void testBlockStripedFileSize()
@Test @Test
public void testBlockStripedUCFileSize() public void testBlockStripedUCFileSize()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStripedUnderConstruction bInfoStripedUC BlockInfoStripedUnderConstruction bInfoStripedUC
= new BlockInfoStripedUnderConstruction(blk, testSchema); = new BlockInfoStripedUnderConstruction(blk, testSchema, cellSize);
bInfoStripedUC.setNumBytes(100); bInfoStripedUC.setNumBytes(100);
inf.addBlock(bInfoStripedUC); inf.addBlock(bInfoStripedUC);
assertEquals(100, inf.computeFileSize()); assertEquals(100, inf.computeFileSize());
@ -172,12 +167,11 @@ public void testBlockStripedUCFileSize()
@Test @Test
public void testBlockStripedComputeQuotaUsage() public void testBlockStripedComputeQuotaUsage()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStriped blockInfoStriped BlockInfoStriped blockInfoStriped
= new BlockInfoStriped(blk, testSchema); = new BlockInfoStriped(blk, testSchema, cellSize);
blockInfoStriped.setNumBytes(100); blockInfoStriped.setNumBytes(100);
inf.addBlock(blockInfoStriped); inf.addBlock(blockInfoStriped);
@ -195,12 +189,11 @@ public void testBlockStripedComputeQuotaUsage()
@Test @Test
public void testBlockStripedUCComputeQuotaUsage() public void testBlockStripedUCComputeQuotaUsage()
throws IOException, InterruptedException { throws IOException, InterruptedException {
ECSchema defaultSchema = ErasureCodingSchemaManager.getSystemDefaultSchema();
INodeFile inf = createStripedINodeFile(); INodeFile inf = createStripedINodeFile();
inf.addStripedBlocksFeature(); inf.addStripedBlocksFeature();
Block blk = new Block(1); Block blk = new Block(1);
BlockInfoStripedUnderConstruction bInfoStripedUC BlockInfoStripedUnderConstruction bInfoStripedUC
= new BlockInfoStripedUnderConstruction(blk, testSchema); = new BlockInfoStripedUnderConstruction(blk, testSchema, cellSize);
bInfoStripedUC.setNumBytes(100); bInfoStripedUC.setNumBytes(100);
inf.addBlock(bInfoStripedUC); inf.addBlock(bInfoStripedUC);