HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. Contributed by Hui Zheng.
This commit is contained in:
parent
46dac3595f
commit
d0d75a8339
@ -20,6 +20,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
|
* Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
|
||||||
@ -206,6 +208,13 @@ public int numNodes() {
|
|||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void write(DataOutput out) throws IOException {
|
||||||
|
out.writeShort(dataBlockNum);
|
||||||
|
out.writeShort(parityBlockNum);
|
||||||
|
super.write(out);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert a complete block to an under construction block.
|
* Convert a complete block to an under construction block.
|
||||||
* @return BlockInfoUnderConstruction - an under construction block.
|
* @return BlockInfoUnderConstruction - an under construction block.
|
||||||
|
@ -48,13 +48,16 @@
|
|||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
@ -755,17 +758,32 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
|||||||
atime = in.readLong();
|
atime = in.readLong();
|
||||||
}
|
}
|
||||||
final long blockSize = in.readLong();
|
final long blockSize = in.readLong();
|
||||||
|
final boolean isStriped = NameNodeLayoutVersion.supports(
|
||||||
|
NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
|
||||||
|
&& (in.readBoolean());
|
||||||
final int numBlocks = in.readInt();
|
final int numBlocks = in.readInt();
|
||||||
|
|
||||||
if (numBlocks >= 0) {
|
if (numBlocks >= 0) {
|
||||||
// file
|
// file
|
||||||
|
|
||||||
// read blocks
|
// read blocks
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
|
Block[] blocks;
|
||||||
|
if (isStriped) {
|
||||||
|
blocks = new Block[numBlocks];
|
||||||
|
for (int j = 0; j < numBlocks; j++) {
|
||||||
|
short dataBlockNum = in.readShort();
|
||||||
|
short parityBlockNum = in.readShort();
|
||||||
|
blocks[j] = new BlockInfoStriped(new Block(),
|
||||||
|
dataBlockNum, parityBlockNum);
|
||||||
|
blocks[j].readFields(in);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blocks = new BlockInfoContiguous[numBlocks];
|
||||||
for (int j = 0; j < numBlocks; j++) {
|
for (int j = 0; j < numBlocks; j++) {
|
||||||
blocks[j] = new BlockInfoContiguous(replication);
|
blocks[j] = new BlockInfoContiguous(replication);
|
||||||
blocks[j].readFields(in);
|
blocks[j].readFields(in);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
String clientName = "";
|
String clientName = "";
|
||||||
String clientMachine = "";
|
String clientMachine = "";
|
||||||
@ -783,9 +801,18 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
|||||||
clientMachine = FSImageSerialization.readString(in);
|
clientMachine = FSImageSerialization.readString(in);
|
||||||
// convert the last block to BlockUC
|
// convert the last block to BlockUC
|
||||||
if (blocks.length > 0) {
|
if (blocks.length > 0) {
|
||||||
BlockInfoContiguous lastBlk = blocks[blocks.length - 1];
|
Block lastBlk = blocks[blocks.length - 1];
|
||||||
blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction(
|
if (isStriped){
|
||||||
lastBlk, replication);
|
BlockInfoStriped lastStripedBlk = (BlockInfoStriped) lastBlk;
|
||||||
|
blocks[blocks.length - 1]
|
||||||
|
= new BlockInfoStripedUnderConstruction(lastBlk,
|
||||||
|
lastStripedBlk.getDataBlockNum(),
|
||||||
|
lastStripedBlk.getParityBlockNum());
|
||||||
|
} else {
|
||||||
|
blocks[blocks.length - 1]
|
||||||
|
= new BlockInfoContiguousUnderConstruction(lastBlk,
|
||||||
|
replication);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -798,8 +825,19 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
|||||||
counter.increment();
|
counter.increment();
|
||||||
}
|
}
|
||||||
|
|
||||||
final INodeFile file = new INodeFile(inodeId, localName, permissions,
|
INodeFile file;
|
||||||
modificationTime, atime, blocks, replication, blockSize);
|
if (isStriped) {
|
||||||
|
file = new INodeFile(inodeId, localName, permissions, modificationTime,
|
||||||
|
atime, new BlockInfoContiguous[0], (short) 0, blockSize);
|
||||||
|
file.addStripedBlocksFeature();
|
||||||
|
for (Block block : blocks) {
|
||||||
|
file.getStripedBlocksFeature().addBlock((BlockInfoStriped) block);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
file = new INodeFile(inodeId, localName, permissions,
|
||||||
|
modificationTime, atime, (BlockInfoContiguous[]) blocks,
|
||||||
|
replication, blockSize);
|
||||||
|
}
|
||||||
if (underConstruction) {
|
if (underConstruction) {
|
||||||
file.toUnderConstruction(clientName, clientMachine);
|
file.toUnderConstruction(clientName, clientMachine);
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||||
@ -124,21 +126,48 @@ static INodeFile readINodeUnderConstruction(
|
|||||||
short blockReplication = in.readShort();
|
short blockReplication = in.readShort();
|
||||||
long modificationTime = in.readLong();
|
long modificationTime = in.readLong();
|
||||||
long preferredBlockSize = in.readLong();
|
long preferredBlockSize = in.readLong();
|
||||||
|
final boolean isStriped = NameNodeLayoutVersion.supports(
|
||||||
|
NameNodeLayoutVersion.Feature.ERASURE_CODING, imgVersion)
|
||||||
|
&& (in.readBoolean());
|
||||||
|
|
||||||
int numBlocks = in.readInt();
|
int numBlocks = in.readInt();
|
||||||
BlockInfoContiguous[] blocks = new BlockInfoContiguous[numBlocks];
|
|
||||||
|
final BlockInfoContiguous[] blocksContiguous;
|
||||||
|
BlockInfoStriped[] blocksStriped = null;
|
||||||
|
if (isStriped) {
|
||||||
|
blocksContiguous = new BlockInfoContiguous[0];
|
||||||
|
blocksStriped = new BlockInfoStriped[numBlocks];
|
||||||
|
int i = 0;
|
||||||
|
for (; i < numBlocks - 1; i++) {
|
||||||
|
short dataBlockNum = in.readShort();
|
||||||
|
short parityBlockNum = in.readShort();
|
||||||
|
blocksStriped[i] = new BlockInfoStriped(new Block(), dataBlockNum,
|
||||||
|
parityBlockNum);
|
||||||
|
blocksStriped[i].readFields(in);
|
||||||
|
}
|
||||||
|
if (numBlocks > 0) {
|
||||||
|
short dataBlockNum = in.readShort();
|
||||||
|
short parityBlockNum = in.readShort();
|
||||||
|
blocksStriped[i] = new BlockInfoStripedUnderConstruction(new Block(),
|
||||||
|
dataBlockNum, parityBlockNum, BlockUCState.UNDER_CONSTRUCTION, null);
|
||||||
|
blocksStriped[i].readFields(in);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blocksContiguous = new BlockInfoContiguous[numBlocks];
|
||||||
Block blk = new Block();
|
Block blk = new Block();
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (; i < numBlocks-1; i++) {
|
for (; i < numBlocks-1; i++) {
|
||||||
blk.readFields(in);
|
blk.readFields(in);
|
||||||
blocks[i] = new BlockInfoContiguous(blk, blockReplication);
|
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
|
||||||
}
|
}
|
||||||
// last block is UNDER_CONSTRUCTION
|
// last block is UNDER_CONSTRUCTION
|
||||||
if(numBlocks > 0) {
|
if(numBlocks > 0) {
|
||||||
blk.readFields(in);
|
blk.readFields(in);
|
||||||
blocks[i] = new BlockInfoContiguousUnderConstruction(
|
blocksContiguous[i] = new BlockInfoContiguousUnderConstruction(
|
||||||
blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
|
blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
PermissionStatus perm = PermissionStatus.read(in);
|
PermissionStatus perm = PermissionStatus.read(in);
|
||||||
String clientName = readString(in);
|
String clientName = readString(in);
|
||||||
String clientMachine = readString(in);
|
String clientMachine = readString(in);
|
||||||
@ -150,8 +179,19 @@ static INodeFile readINodeUnderConstruction(
|
|||||||
|
|
||||||
// Images in the pre-protobuf format will not have the lazyPersist flag,
|
// Images in the pre-protobuf format will not have the lazyPersist flag,
|
||||||
// so it is safe to pass false always.
|
// so it is safe to pass false always.
|
||||||
INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
|
INodeFile file;
|
||||||
modificationTime, blocks, blockReplication, preferredBlockSize);
|
if (isStriped) {
|
||||||
|
file = new INodeFile(inodeId, name, perm, modificationTime,
|
||||||
|
modificationTime, blocksContiguous, (short) 0, preferredBlockSize);
|
||||||
|
file.addStripedBlocksFeature();
|
||||||
|
for (int i = 0; i < numBlocks; i++) {
|
||||||
|
file.getStripedBlocksFeature().addBlock(blocksStriped[i]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
file = new INodeFile(inodeId, name, perm, modificationTime,
|
||||||
|
modificationTime, blocksContiguous, blockReplication,
|
||||||
|
preferredBlockSize);
|
||||||
|
}
|
||||||
file.toUnderConstruction(clientName, clientMachine);
|
file.toUnderConstruction(clientName, clientMachine);
|
||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
@ -166,7 +206,8 @@ static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
|
|||||||
out.writeShort(cons.getFileReplication());
|
out.writeShort(cons.getFileReplication());
|
||||||
out.writeLong(cons.getModificationTime());
|
out.writeLong(cons.getModificationTime());
|
||||||
out.writeLong(cons.getPreferredBlockSize());
|
out.writeLong(cons.getPreferredBlockSize());
|
||||||
|
// whether the file has striped blocks
|
||||||
|
out.writeBoolean(cons.isWithStripedBlocks());
|
||||||
writeBlocks(cons.getBlocks(), out);
|
writeBlocks(cons.getBlocks(), out);
|
||||||
cons.getPermissionStatus().write(out);
|
cons.getPermissionStatus().write(out);
|
||||||
|
|
||||||
@ -179,9 +220,9 @@ static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Serialize a {@link INodeFile} node
|
* Serialize a {@link INodeFile} node
|
||||||
* @param node The node to write
|
* @param file The node to write
|
||||||
* @param out The {@link DataOutputStream} where the fields are written
|
* @param out The {@link DataOutputStream} where the fields are written
|
||||||
* @param writeBlock Whether to write block information
|
* @param writeUnderConstruction Whether to write block information
|
||||||
*/
|
*/
|
||||||
public static void writeINodeFile(INodeFile file, DataOutput out,
|
public static void writeINodeFile(INodeFile file, DataOutput out,
|
||||||
boolean writeUnderConstruction) throws IOException {
|
boolean writeUnderConstruction) throws IOException {
|
||||||
@ -191,7 +232,8 @@ public static void writeINodeFile(INodeFile file, DataOutput out,
|
|||||||
out.writeLong(file.getModificationTime());
|
out.writeLong(file.getModificationTime());
|
||||||
out.writeLong(file.getAccessTime());
|
out.writeLong(file.getAccessTime());
|
||||||
out.writeLong(file.getPreferredBlockSize());
|
out.writeLong(file.getPreferredBlockSize());
|
||||||
|
// whether the file has striped blocks
|
||||||
|
out.writeBoolean(file.isWithStripedBlocks());
|
||||||
writeBlocks(file.getBlocks(), out);
|
writeBlocks(file.getBlocks(), out);
|
||||||
SnapshotFSImageFormat.saveFileDiffList(file, out);
|
SnapshotFSImageFormat.saveFileDiffList(file, out);
|
||||||
|
|
||||||
|
@ -25,8 +25,16 @@
|
|||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.internal.util.reflection.Whitebox;
|
import org.mockito.internal.util.reflection.Whitebox;
|
||||||
|
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_DATA_BLOCKS;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_DATA_BLOCKS;
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_PARITY_BLOCKS;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_PARITY_BLOCKS;
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test {@link BlockInfoStriped}
|
* Test {@link BlockInfoStriped}
|
||||||
@ -216,4 +224,30 @@ public void testReplaceBlock() {
|
|||||||
Assert.assertNull(newBlockInfo.getNext());
|
Assert.assertNull(newBlockInfo.getNext());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWrite() {
|
||||||
|
long blkID = 1;
|
||||||
|
long numBytes = 1;
|
||||||
|
long generationStamp = 1;
|
||||||
|
short dataBlockNum = 6;
|
||||||
|
short parityBlockNum = 3;
|
||||||
|
ByteBuffer byteBuffer = ByteBuffer.allocate(Long.SIZE/Byte.SIZE*3
|
||||||
|
+ Short.SIZE/Byte.SIZE*2);
|
||||||
|
byteBuffer.putShort(dataBlockNum).putShort(parityBlockNum)
|
||||||
|
.putLong(blkID).putLong(numBytes).putLong(generationStamp);
|
||||||
|
|
||||||
|
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
|
||||||
|
DataOutput out = new DataOutputStream(byteStream);
|
||||||
|
BlockInfoStriped blk = new BlockInfoStriped(new Block(1,1,1),
|
||||||
|
(short)6,(short)3);
|
||||||
|
try {
|
||||||
|
blk.write(out);
|
||||||
|
} catch(Exception ex) {
|
||||||
|
fail("testWrite error:" + ex.getMessage());
|
||||||
|
}
|
||||||
|
assertEquals(byteBuffer.array().length, byteStream.toByteArray().length);
|
||||||
|
assertArrayEquals(byteBuffer.array(), byteStream.toByteArray());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,18 +17,28 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
|
import java.io.DataInput;
|
||||||
|
import java.io.DataInputStream;
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -42,8 +52,8 @@
|
|||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
@ -120,6 +130,140 @@ private void testPersistHelper(Configuration conf) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void testSaveAndLoadINodeFile(FSNamesystem fsn, Configuration conf,
|
||||||
|
boolean isUC) throws IOException{
|
||||||
|
// contruct a INode with StripedBlock for saving and loading
|
||||||
|
long id = 123456789;
|
||||||
|
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
||||||
|
PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
|
||||||
|
"testuser_groups", new FsPermission((short)0x755));
|
||||||
|
long mtime = 1426222916-3600;
|
||||||
|
long atime = 1426222916;
|
||||||
|
BlockInfoContiguous[] blks = new BlockInfoContiguous[0];
|
||||||
|
short replication = 3;
|
||||||
|
long preferredBlockSize = 128*1024*1024;
|
||||||
|
byte storagePolicyID = HdfsConstants.EC_STORAGE_POLICY_ID;
|
||||||
|
INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime,
|
||||||
|
blks, replication, preferredBlockSize, storagePolicyID);
|
||||||
|
ByteArrayOutputStream bs = new ByteArrayOutputStream();
|
||||||
|
file.addStripedBlocksFeature();
|
||||||
|
|
||||||
|
//construct StripedBlocks for the INode
|
||||||
|
BlockInfoStriped[] stripedBlks = new BlockInfoStriped[3];
|
||||||
|
long stripedBlkId = 10000001;
|
||||||
|
long timestamp = mtime+3600;
|
||||||
|
for (int i = 0; i < stripedBlks.length; i++) {
|
||||||
|
stripedBlks[i] = new BlockInfoStriped(
|
||||||
|
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
||||||
|
(short) 6, (short) 3);
|
||||||
|
file.getStripedBlocksFeature().addBlock(stripedBlks[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
final String client = "testClient";
|
||||||
|
final String clientMachine = "testClientMachine";
|
||||||
|
final String path = "testUnderConstructionPath";
|
||||||
|
|
||||||
|
//save the INode to byte array
|
||||||
|
DataOutput out = new DataOutputStream(bs);
|
||||||
|
if (isUC) {
|
||||||
|
file.toUnderConstruction(client, clientMachine);
|
||||||
|
FSImageSerialization.writeINodeUnderConstruction((DataOutputStream) out,
|
||||||
|
file, path);
|
||||||
|
} else {
|
||||||
|
FSImageSerialization.writeINodeFile(file, out, false);
|
||||||
|
}
|
||||||
|
DataInput in = new DataInputStream(
|
||||||
|
new ByteArrayInputStream(bs.toByteArray()));
|
||||||
|
|
||||||
|
// load the INode from the byte array
|
||||||
|
INodeFile fileByLoaded;
|
||||||
|
if (isUC) {
|
||||||
|
fileByLoaded = FSImageSerialization.readINodeUnderConstruction(in,
|
||||||
|
fsn, fsn.getFSImage().getLayoutVersion());
|
||||||
|
} else {
|
||||||
|
fileByLoaded = (INodeFile) new FSImageFormat.Loader(conf, fsn)
|
||||||
|
.loadINodeWithLocalName(false, in, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(id, fileByLoaded.getId() );
|
||||||
|
assertArrayEquals(isUC ? path.getBytes() : name,
|
||||||
|
fileByLoaded.getLocalName().getBytes());
|
||||||
|
assertEquals(permissionStatus.getUserName(),
|
||||||
|
fileByLoaded.getPermissionStatus().getUserName());
|
||||||
|
assertEquals(permissionStatus.getGroupName(),
|
||||||
|
fileByLoaded.getPermissionStatus().getGroupName());
|
||||||
|
assertEquals(permissionStatus.getPermission(),
|
||||||
|
fileByLoaded.getPermissionStatus().getPermission());
|
||||||
|
assertEquals(mtime, fileByLoaded.getModificationTime());
|
||||||
|
assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
|
||||||
|
assertEquals(0, fileByLoaded.getContiguousBlocks().length);
|
||||||
|
assertEquals(0, fileByLoaded.getBlockReplication());
|
||||||
|
assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
|
||||||
|
|
||||||
|
//check the BlockInfoStriped
|
||||||
|
BlockInfoStriped[] stripedBlksByLoaded =
|
||||||
|
fileByLoaded.getStripedBlocksFeature().getBlocks();
|
||||||
|
assertEquals(3, stripedBlksByLoaded.length);
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
assertEquals(stripedBlks[i].getBlockId(),
|
||||||
|
stripedBlksByLoaded[i].getBlockId());
|
||||||
|
assertEquals(stripedBlks[i].getNumBytes(),
|
||||||
|
stripedBlksByLoaded[i].getNumBytes());
|
||||||
|
assertEquals(stripedBlks[i].getGenerationStamp(),
|
||||||
|
stripedBlksByLoaded[i].getGenerationStamp());
|
||||||
|
assertEquals(stripedBlks[i].getDataBlockNum(),
|
||||||
|
stripedBlksByLoaded[i].getDataBlockNum());
|
||||||
|
assertEquals(stripedBlks[i].getParityBlockNum(),
|
||||||
|
stripedBlksByLoaded[i].getParityBlockNum());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isUC) {
|
||||||
|
assertEquals(client,
|
||||||
|
fileByLoaded.getFileUnderConstructionFeature().getClientName());
|
||||||
|
assertEquals(clientMachine,
|
||||||
|
fileByLoaded.getFileUnderConstructionFeature().getClientMachine());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test if a INodeFile with BlockInfoStriped can be saved by
|
||||||
|
* FSImageSerialization and loaded by FSImageFormat#Loader.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSaveAndLoadInodeFile() throws IOException{
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
testSaveAndLoadINodeFile(cluster.getNamesystem(), conf, false);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test if a INodeFileUnderConstruction with BlockInfoStriped can be
|
||||||
|
* saved and loaded by FSImageSerialization
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testSaveAndLoadInodeFileUC() throws IOException{
|
||||||
|
// construct a INode with StripedBlock for saving and loading
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
testSaveAndLoadINodeFile(cluster.getNamesystem(), conf, true);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ensure that the digest written by the saver equals to the digest of the
|
* Ensure that the digest written by the saver equals to the digest of the
|
||||||
* file.
|
* file.
|
||||||
|
Loading…
Reference in New Issue
Block a user