HDFS-11382. Persist Erasure Coding Policy ID in a new optional field in INodeFile in FSImage. Contributed by Manoj Govindassamy.
This commit is contained in:
parent
5f5b031d1f
commit
55c07bbed2
@ -79,7 +79,7 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication,
|
|||||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||||
this.length = length;
|
this.length = length;
|
||||||
this.isdir = isdir;
|
this.isdir = isdir;
|
||||||
this.block_replication = ecPolicy == null ? (short) block_replication : 0;
|
this.block_replication = (short) block_replication;
|
||||||
this.blocksize = blocksize;
|
this.blocksize = blocksize;
|
||||||
this.modification_time = modification_time;
|
this.modification_time = modification_time;
|
||||||
this.access_time = access_time;
|
this.access_time = access_time;
|
||||||
|
@ -234,4 +234,10 @@
|
|||||||
<Method name="assertAllResultsEqual" />
|
<Method name="assertAllResultsEqual" />
|
||||||
<Bug pattern="NP_LOAD_OF_KNOWN_NULL_VALUE" />
|
<Bug pattern="NP_LOAD_OF_KNOWN_NULL_VALUE" />
|
||||||
</Match>
|
</Match>
|
||||||
|
<!-- Manually verified that signed byte value involved in bitwise OR is not negative -->
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.server.namenode.INodeFile$HeaderFormat" />
|
||||||
|
<Method name="getBlockLayoutRedundancy" />
|
||||||
|
<Bug pattern="BIT_IOR_OF_SIGNED_BYTE" />
|
||||||
|
</Match>
|
||||||
</FindBugsFilter>
|
</FindBugsFilter>
|
||||||
|
@ -416,21 +416,25 @@ static INodeFile addFileForEditLog(
|
|||||||
assert fsd.hasWriteLock();
|
assert fsd.hasWriteLock();
|
||||||
try {
|
try {
|
||||||
// check if the file has an EC policy
|
// check if the file has an EC policy
|
||||||
|
boolean isStriped = false;
|
||||||
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
|
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
|
||||||
getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
|
getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
|
||||||
if (ecPolicy != null) {
|
if (ecPolicy != null) {
|
||||||
replication = ecPolicy.getId();
|
isStriped = true;
|
||||||
}
|
}
|
||||||
final BlockType blockType = ecPolicy != null?
|
final BlockType blockType = isStriped ?
|
||||||
BlockType.STRIPED : BlockType.CONTIGUOUS;
|
BlockType.STRIPED : BlockType.CONTIGUOUS;
|
||||||
|
final Short replicationFactor = (!isStriped ? replication : null);
|
||||||
|
final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
|
||||||
if (underConstruction) {
|
if (underConstruction) {
|
||||||
newNode = newINodeFile(id, permissions, modificationTime,
|
newNode = newINodeFile(id, permissions, modificationTime,
|
||||||
modificationTime, replication, preferredBlockSize, storagePolicyId,
|
modificationTime, replicationFactor, ecPolicyID, preferredBlockSize,
|
||||||
blockType);
|
storagePolicyId, blockType);
|
||||||
newNode.toUnderConstruction(clientName, clientMachine);
|
newNode.toUnderConstruction(clientName, clientMachine);
|
||||||
} else {
|
} else {
|
||||||
newNode = newINodeFile(id, permissions, modificationTime, atime,
|
newNode = newINodeFile(id, permissions, modificationTime, atime,
|
||||||
replication, preferredBlockSize, storagePolicyId, blockType);
|
replicationFactor, ecPolicyID, preferredBlockSize,
|
||||||
|
storagePolicyId, blockType);
|
||||||
}
|
}
|
||||||
newNode.setLocalName(localName);
|
newNode.setLocalName(localName);
|
||||||
INodesInPath iip = fsd.addINode(existing, newNode,
|
INodesInPath iip = fsd.addINode(existing, newNode,
|
||||||
@ -523,15 +527,19 @@ private static INodesInPath addFile(
|
|||||||
INodesInPath newiip;
|
INodesInPath newiip;
|
||||||
fsd.writeLock();
|
fsd.writeLock();
|
||||||
try {
|
try {
|
||||||
|
boolean isStriped = false;
|
||||||
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
|
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
|
||||||
getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
|
getErasureCodingPolicy(fsd.getFSNamesystem(), existing);
|
||||||
if (ecPolicy != null) {
|
if (ecPolicy != null) {
|
||||||
replication = ecPolicy.getId();
|
isStriped = true;
|
||||||
}
|
}
|
||||||
final BlockType blockType = ecPolicy != null?
|
final BlockType blockType = isStriped ?
|
||||||
BlockType.STRIPED : BlockType.CONTIGUOUS;
|
BlockType.STRIPED : BlockType.CONTIGUOUS;
|
||||||
|
final Short replicationFactor = (!isStriped ? replication : null);
|
||||||
|
final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
|
||||||
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
|
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
|
||||||
modTime, modTime, replication, preferredBlockSize, blockType);
|
modTime, modTime, replicationFactor, ecPolicyID, preferredBlockSize,
|
||||||
|
blockType);
|
||||||
newNode.setLocalName(localName);
|
newNode.setLocalName(localName);
|
||||||
newNode.toUnderConstruction(clientName, clientMachine);
|
newNode.toUnderConstruction(clientName, clientMachine);
|
||||||
newiip = fsd.addINode(existing, newNode, permissions.getPermission());
|
newiip = fsd.addINode(existing, newNode, permissions.getPermission());
|
||||||
@ -702,17 +710,17 @@ private static boolean completeFileInternal(
|
|||||||
|
|
||||||
private static INodeFile newINodeFile(
|
private static INodeFile newINodeFile(
|
||||||
long id, PermissionStatus permissions, long mtime, long atime,
|
long id, PermissionStatus permissions, long mtime, long atime,
|
||||||
short replication, long preferredBlockSize, byte storagePolicyId,
|
Short replication, Byte ecPolicyID, long preferredBlockSize,
|
||||||
BlockType blockType) {
|
byte storagePolicyId, BlockType blockType) {
|
||||||
return new INodeFile(id, null, permissions, mtime, atime,
|
return new INodeFile(id, null, permissions, mtime, atime,
|
||||||
BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize,
|
BlockInfo.EMPTY_ARRAY, replication, ecPolicyID, preferredBlockSize,
|
||||||
storagePolicyId, blockType);
|
storagePolicyId, blockType);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
|
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
|
||||||
long mtime, long atime, short replication, long preferredBlockSize,
|
long mtime, long atime, Short replication, Byte ecPolicyID,
|
||||||
BlockType blockType) {
|
long preferredBlockSize, BlockType blockType) {
|
||||||
return newINodeFile(id, permissions, mtime, atime, replication,
|
return newINodeFile(id, permissions, mtime, atime, replication, ecPolicyID,
|
||||||
preferredBlockSize, (byte)0, blockType);
|
preferredBlockSize, (byte)0, blockType);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -896,9 +896,9 @@ public INodeFileAttributes loadINodeFileAttributes(DataInput in)
|
|||||||
in.readShort());
|
in.readShort());
|
||||||
final long preferredBlockSize = in.readLong();
|
final long preferredBlockSize = in.readLong();
|
||||||
|
|
||||||
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
|
return new INodeFileAttributes.SnapshotCopy(name, permissions, null,
|
||||||
accessTime, replication, preferredBlockSize, (byte) 0, null,
|
modificationTime, accessTime, replication, null, preferredBlockSize,
|
||||||
BlockType.CONTIGUOUS);
|
(byte) 0, null, BlockType.CONTIGUOUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
|
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
|
||||||
|
@ -329,17 +329,19 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
|
|||||||
assert n.getType() == INodeSection.INode.Type.FILE;
|
assert n.getType() == INodeSection.INode.Type.FILE;
|
||||||
INodeSection.INodeFile f = n.getFile();
|
INodeSection.INodeFile f = n.getFile();
|
||||||
List<BlockProto> bp = f.getBlocksList();
|
List<BlockProto> bp = f.getBlocksList();
|
||||||
short replication = (short) f.getReplication();
|
|
||||||
BlockType blockType = PBHelperClient.convert(f.getBlockType());
|
BlockType blockType = PBHelperClient.convert(f.getBlockType());
|
||||||
LoaderContext state = parent.getLoaderContext();
|
LoaderContext state = parent.getLoaderContext();
|
||||||
ErasureCodingPolicy ecPolicy = (blockType == BlockType.STRIPED) ?
|
boolean isStriped = f.hasErasureCodingPolicyID();
|
||||||
ErasureCodingPolicyManager.getPolicyByPolicyID((byte) replication) :
|
Short replication = (!isStriped ? (short) f.getReplication() : null);
|
||||||
null;
|
ErasureCodingPolicy ecPolicy = isStriped ?
|
||||||
|
ErasureCodingPolicyManager.getPolicyByPolicyID(
|
||||||
|
(byte) f.getErasureCodingPolicyID()) : null;
|
||||||
|
Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
|
||||||
|
|
||||||
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
BlockInfo[] blocks = new BlockInfo[bp.size()];
|
||||||
for (int i = 0; i < bp.size(); ++i) {
|
for (int i = 0; i < bp.size(); ++i) {
|
||||||
BlockProto b = bp.get(i);
|
BlockProto b = bp.get(i);
|
||||||
if (blockType == BlockType.STRIPED) {
|
if (isStriped) {
|
||||||
blocks[i] = new BlockInfoStriped(PBHelperClient.convert(b), ecPolicy);
|
blocks[i] = new BlockInfoStriped(PBHelperClient.convert(b), ecPolicy);
|
||||||
} else {
|
} else {
|
||||||
blocks[i] = new BlockInfoContiguous(PBHelperClient.convert(b),
|
blocks[i] = new BlockInfoContiguous(PBHelperClient.convert(b),
|
||||||
@ -352,8 +354,8 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
|
|||||||
|
|
||||||
final INodeFile file = new INodeFile(n.getId(),
|
final INodeFile file = new INodeFile(n.getId(),
|
||||||
n.getName().toByteArray(), permissions, f.getModificationTime(),
|
n.getName().toByteArray(), permissions, f.getModificationTime(),
|
||||||
f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
|
f.getAccessTime(), blocks, replication, ecPolicyID,
|
||||||
(byte)f.getStoragePolicyID(), blockType);
|
f.getPreferredBlockSize(), (byte)f.getStoragePolicyID(), blockType);
|
||||||
|
|
||||||
if (f.hasAcl()) {
|
if (f.hasAcl()) {
|
||||||
int[] entries = AclEntryStatusFormat.toInt(loadAclEntries(
|
int[] entries = AclEntryStatusFormat.toInt(loadAclEntries(
|
||||||
@ -376,7 +378,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
|
|||||||
BlockInfo lastBlk = file.getLastBlock();
|
BlockInfo lastBlk = file.getLastBlock();
|
||||||
// replace the last block of file
|
// replace the last block of file
|
||||||
final BlockInfo ucBlk;
|
final BlockInfo ucBlk;
|
||||||
if (blockType == BlockType.STRIPED) {
|
if (isStriped) {
|
||||||
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
BlockInfoStriped striped = (BlockInfoStriped) lastBlk;
|
||||||
ucBlk = new BlockInfoStriped(striped, ecPolicy);
|
ucBlk = new BlockInfoStriped(striped, ecPolicy);
|
||||||
} else {
|
} else {
|
||||||
@ -503,10 +505,15 @@ public static INodeSection.INodeFile.Builder buildINodeFile(
|
|||||||
.setModificationTime(file.getModificationTime())
|
.setModificationTime(file.getModificationTime())
|
||||||
.setPermission(buildPermissionStatus(file, state.getStringMap()))
|
.setPermission(buildPermissionStatus(file, state.getStringMap()))
|
||||||
.setPreferredBlockSize(file.getPreferredBlockSize())
|
.setPreferredBlockSize(file.getPreferredBlockSize())
|
||||||
.setReplication(file.getFileReplication())
|
|
||||||
.setStoragePolicyID(file.getLocalStoragePolicyID())
|
.setStoragePolicyID(file.getLocalStoragePolicyID())
|
||||||
.setBlockType(PBHelperClient.convert(file.getBlockType()));
|
.setBlockType(PBHelperClient.convert(file.getBlockType()));
|
||||||
|
|
||||||
|
if (file.isStriped()) {
|
||||||
|
b.setErasureCodingPolicyID(file.getErasureCodingPolicyID());
|
||||||
|
} else {
|
||||||
|
b.setReplication(file.getFileReplication());
|
||||||
|
}
|
||||||
|
|
||||||
AclFeature f = file.getAclFeature();
|
AclFeature f = file.getAclFeature();
|
||||||
if (f != null) {
|
if (f != null) {
|
||||||
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
b.setAcl(buildAclEntries(f, state.getStringMap()));
|
||||||
|
@ -62,6 +62,11 @@
|
|||||||
public class INodeFile extends INodeWithAdditionalFields
|
public class INodeFile extends INodeWithAdditionalFields
|
||||||
implements INodeFileAttributes, BlockCollection {
|
implements INodeFileAttributes, BlockCollection {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Erasure Coded striped blocks have replication factor of 1.
|
||||||
|
*/
|
||||||
|
public static final short DEFAULT_REPL_FOR_STRIPED_BLOCKS = 1;
|
||||||
|
|
||||||
/** The same as valueOf(inode, path, false). */
|
/** The same as valueOf(inode, path, false). */
|
||||||
public static INodeFile valueOf(INode inode, String path
|
public static INodeFile valueOf(INode inode, String path
|
||||||
) throws FileNotFoundException {
|
) throws FileNotFoundException {
|
||||||
@ -126,7 +131,6 @@ enum HeaderFormat {
|
|||||||
* Different types can be replica or EC
|
* Different types can be replica or EC
|
||||||
*/
|
*/
|
||||||
private static final int LAYOUT_BIT_WIDTH = 1;
|
private static final int LAYOUT_BIT_WIDTH = 1;
|
||||||
|
|
||||||
private static final int MAX_REDUNDANCY = (1 << 11) - 1;
|
private static final int MAX_REDUNDANCY = (1 << 11) - 1;
|
||||||
|
|
||||||
HeaderFormat(LongBitFormat previous, int length, long min) {
|
HeaderFormat(LongBitFormat previous, int length, long min) {
|
||||||
@ -134,9 +138,14 @@ enum HeaderFormat {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static short getReplication(long header) {
|
static short getReplication(long header) {
|
||||||
long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
|
if (isStriped(header)) {
|
||||||
|
return DEFAULT_REPL_FOR_STRIPED_BLOCKS;
|
||||||
|
} else {
|
||||||
|
long layoutRedundancy =
|
||||||
|
BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
|
||||||
return (short) (layoutRedundancy & MAX_REDUNDANCY);
|
return (short) (layoutRedundancy & MAX_REDUNDANCY);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static byte getECPolicyID(long header) {
|
static byte getECPolicyID(long header) {
|
||||||
long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
|
long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
|
||||||
@ -158,8 +167,7 @@ static byte getStoragePolicyID(long header) {
|
|||||||
static final long BLOCK_TYPE_MASK_STRIPED = 1 << 11;
|
static final long BLOCK_TYPE_MASK_STRIPED = 1 << 11;
|
||||||
|
|
||||||
static boolean isStriped(long header) {
|
static boolean isStriped(long header) {
|
||||||
long layoutRedundancy = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.retrieve(header);
|
return getBlockType(header) == STRIPED;
|
||||||
return (layoutRedundancy & BLOCK_TYPE_MASK) != 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static BlockType getBlockType(long header) {
|
static BlockType getBlockType(long header) {
|
||||||
@ -172,22 +180,40 @@ static BlockType getBlockType(long header) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static long toLong(long preferredBlockSize, short replication,
|
/**
|
||||||
BlockType blockType, byte storagePolicyID) {
|
* Construct block layout redundancy based on the given BlockType,
|
||||||
|
* replication factor and EC PolicyID.
|
||||||
|
*/
|
||||||
|
static long getBlockLayoutRedundancy(final BlockType blockType,
|
||||||
|
final Short replication, final Byte erasureCodingPolicyID) {
|
||||||
|
long layoutRedundancy = 0;
|
||||||
|
if (blockType == STRIPED) {
|
||||||
|
Preconditions.checkArgument(replication == null &&
|
||||||
|
erasureCodingPolicyID != null);
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
ErasureCodingPolicyManager.getPolicyByPolicyID(
|
||||||
|
erasureCodingPolicyID) != null);
|
||||||
|
layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
|
||||||
|
// Following bitwise OR with signed byte erasureCodingPolicyID is safe
|
||||||
|
// as the PolicyID can never be in negative.
|
||||||
|
layoutRedundancy |= erasureCodingPolicyID;
|
||||||
|
} else {
|
||||||
|
Preconditions.checkArgument(replication != null &&
|
||||||
|
erasureCodingPolicyID == null);
|
||||||
Preconditions.checkArgument(replication >= 0 &&
|
Preconditions.checkArgument(replication >= 0 &&
|
||||||
replication <= MAX_REDUNDANCY);
|
replication <= MAX_REDUNDANCY);
|
||||||
|
layoutRedundancy |= replication;
|
||||||
|
}
|
||||||
|
return layoutRedundancy;
|
||||||
|
}
|
||||||
|
|
||||||
|
static long toLong(long preferredBlockSize, long layoutRedundancy,
|
||||||
|
byte storagePolicyID) {
|
||||||
long h = 0;
|
long h = 0;
|
||||||
if (preferredBlockSize == 0) {
|
if (preferredBlockSize == 0) {
|
||||||
preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
|
preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
|
||||||
}
|
}
|
||||||
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
|
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
|
||||||
// For erasure coded files, replication is used to store ec policy id
|
|
||||||
// TODO: this is hacky. Add some utility to generate the layoutRedundancy
|
|
||||||
long layoutRedundancy = 0;
|
|
||||||
if (blockType == STRIPED) {
|
|
||||||
layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
|
|
||||||
}
|
|
||||||
layoutRedundancy |= replication;
|
|
||||||
h = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.combine(layoutRedundancy, h);
|
h = BLOCK_LAYOUT_AND_REDUNDANCY.BITS.combine(layoutRedundancy, h);
|
||||||
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
|
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
|
||||||
return h;
|
return h;
|
||||||
@ -202,15 +228,17 @@ static long toLong(long preferredBlockSize, short replication,
|
|||||||
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
||||||
long atime, BlockInfo[] blklist, short replication,
|
long atime, BlockInfo[] blklist, short replication,
|
||||||
long preferredBlockSize) {
|
long preferredBlockSize) {
|
||||||
this(id, name, permissions, mtime, atime, blklist, replication,
|
this(id, name, permissions, mtime, atime, blklist, replication, null,
|
||||||
preferredBlockSize, (byte) 0, CONTIGUOUS);
|
preferredBlockSize, (byte) 0, CONTIGUOUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
|
||||||
long atime, BlockInfo[] blklist, short replication,
|
long atime, BlockInfo[] blklist, Short replication, Byte ecPolicyID,
|
||||||
long preferredBlockSize, byte storagePolicyID, BlockType blockType) {
|
long preferredBlockSize, byte storagePolicyID, BlockType blockType) {
|
||||||
super(id, name, permissions, mtime, atime);
|
super(id, name, permissions, mtime, atime);
|
||||||
header = HeaderFormat.toLong(preferredBlockSize, replication, blockType,
|
final long layoutRedundancy = HeaderFormat.getBlockLayoutRedundancy(
|
||||||
|
blockType, replication, ecPolicyID);
|
||||||
|
header = HeaderFormat.toLong(preferredBlockSize, layoutRedundancy,
|
||||||
storagePolicyID);
|
storagePolicyID);
|
||||||
if (blklist != null && blklist.length > 0) {
|
if (blklist != null && blklist.length > 0) {
|
||||||
for (BlockInfo b : blklist) {
|
for (BlockInfo b : blklist) {
|
||||||
@ -462,6 +490,9 @@ public final short getFileReplication(int snapshot) {
|
|||||||
* */
|
* */
|
||||||
@Override // INodeFileAttributes
|
@Override // INodeFileAttributes
|
||||||
public final short getFileReplication() {
|
public final short getFileReplication() {
|
||||||
|
if (isStriped()) {
|
||||||
|
return DEFAULT_REPL_FOR_STRIPED_BLOCKS;
|
||||||
|
}
|
||||||
return getFileReplication(CURRENT_STATE_ID);
|
return getFileReplication(CURRENT_STATE_ID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,11 +56,13 @@ static class SnapshotCopy extends INodeAttributes.SnapshotCopy
|
|||||||
|
|
||||||
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
public SnapshotCopy(byte[] name, PermissionStatus permissions,
|
||||||
AclFeature aclFeature, long modificationTime, long accessTime,
|
AclFeature aclFeature, long modificationTime, long accessTime,
|
||||||
short replication, long preferredBlockSize,
|
Short replication, Byte ecPolicyID, long preferredBlockSize,
|
||||||
byte storagePolicyID, XAttrFeature xAttrsFeature, BlockType blockType) {
|
byte storagePolicyID, XAttrFeature xAttrsFeature, BlockType blockType) {
|
||||||
super(name, permissions, aclFeature, modificationTime, accessTime,
|
super(name, permissions, aclFeature, modificationTime, accessTime,
|
||||||
xAttrsFeature);
|
xAttrsFeature);
|
||||||
header = HeaderFormat.toLong(preferredBlockSize, replication, blockType,
|
final long layoutRedundancy = HeaderFormat.getBlockLayoutRedundancy(
|
||||||
|
blockType, replication, ecPolicyID);
|
||||||
|
header = HeaderFormat.toLong(preferredBlockSize, layoutRedundancy,
|
||||||
storagePolicyID);
|
storagePolicyID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||||
@ -232,9 +233,15 @@ private void loadFileDiffList(InputStream in, INodeFile file, int size)
|
|||||||
fileInPb.getXAttrs(), state.getStringTable()));
|
fileInPb.getXAttrs(), state.getStringTable()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean isStriped =
|
||||||
|
(fileInPb.getBlockType() == BlockTypeProto .STRIPED);
|
||||||
|
Short replication =
|
||||||
|
(!isStriped ? (short)fileInPb.getReplication() : null);
|
||||||
|
Byte ecPolicyID =
|
||||||
|
(isStriped ? (byte)fileInPb.getErasureCodingPolicyID() : null);
|
||||||
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
|
copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
|
||||||
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
|
.toByteArray(), permission, acl, fileInPb.getModificationTime(),
|
||||||
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
|
fileInPb.getAccessTime(), replication, ecPolicyID,
|
||||||
fileInPb.getPreferredBlockSize(),
|
fileInPb.getPreferredBlockSize(),
|
||||||
(byte)fileInPb.getStoragePolicyID(), xAttrs,
|
(byte)fileInPb.getStoragePolicyID(), xAttrs,
|
||||||
PBHelperClient.convert(fileInPb.getBlockType()));
|
PBHelperClient.convert(fileInPb.getBlockType()));
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
|
import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
|
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
import org.apache.hadoop.hdfs.web.JsonUtil;
|
import org.apache.hadoop.hdfs.web.JsonUtil;
|
||||||
import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
|
import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
|
||||||
@ -589,7 +590,11 @@ private long lookup(String path) throws IOException {
|
|||||||
map.put("pathSuffix",
|
map.put("pathSuffix",
|
||||||
printSuffix ? inode.getName().toStringUtf8() : "");
|
printSuffix ? inode.getName().toStringUtf8() : "");
|
||||||
map.put("permission", toString(p.getPermission()));
|
map.put("permission", toString(p.getPermission()));
|
||||||
|
if (f.hasErasureCodingPolicyID()) {
|
||||||
|
map.put("replication", INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS);
|
||||||
|
} else {
|
||||||
map.put("replication", f.getReplication());
|
map.put("replication", f.getReplication());
|
||||||
|
}
|
||||||
map.put("type", inode.getType());
|
map.put("type", inode.getType());
|
||||||
map.put("fileId", inode.getId());
|
map.put("fileId", inode.getId());
|
||||||
map.put("childrenNum", 0);
|
map.put("childrenNum", 0);
|
||||||
|
@ -496,9 +496,7 @@ private void dumpINodeFile(INodeSection.INodeFile f) {
|
|||||||
o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
|
o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
|
||||||
}
|
}
|
||||||
if (f.getBlockType() != BlockTypeProto.CONTIGUOUS) {
|
if (f.getBlockType() != BlockTypeProto.CONTIGUOUS) {
|
||||||
out.print("<" + INODE_SECTION_BLOCK_TYPE + ">");
|
o(INODE_SECTION_BLOCK_TYPE, f.getBlockType().name());
|
||||||
o(SECTION_NAME, f.getBlockType().name());
|
|
||||||
out.print("</" + INODE_SECTION_BLOCK_TYPE + ">\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f.hasFileUC()) {
|
if (f.hasFileUC()) {
|
||||||
|
@ -141,6 +141,7 @@ message INodeSection {
|
|||||||
optional XAttrFeatureProto xAttrs = 9;
|
optional XAttrFeatureProto xAttrs = 9;
|
||||||
optional uint32 storagePolicyID = 10;
|
optional uint32 storagePolicyID = 10;
|
||||||
optional BlockTypeProto blockType = 11;
|
optional BlockTypeProto blockType = 11;
|
||||||
|
optional uint32 erasureCodingPolicyID = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message QuotaByStorageTypeEntryProto {
|
message QuotaByStorageTypeEntryProto {
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -343,7 +344,9 @@ public void testMultiplePoliciesCoExist() throws Exception {
|
|||||||
assertEquals(policy, fs.getErasureCodingPolicy(file));
|
assertEquals(policy, fs.getErasureCodingPolicy(file));
|
||||||
assertEquals(policy, fs.getErasureCodingPolicy(dir));
|
assertEquals(policy, fs.getErasureCodingPolicy(dir));
|
||||||
INode iNode = namesystem.getFSDirectory().getINode(file.toString());
|
INode iNode = namesystem.getFSDirectory().getINode(file.toString());
|
||||||
assertEquals(policy.getId(), iNode.asFile().getFileReplication());
|
assertEquals(policy.getId(), iNode.asFile().getErasureCodingPolicyID());
|
||||||
|
assertEquals(INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS,
|
||||||
|
iNode.asFile().getFileReplication());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ private void testPersistHelper(Configuration conf) throws IOException {
|
|||||||
|
|
||||||
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
|
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
|
||||||
boolean isUC) throws IOException{
|
boolean isUC) throws IOException{
|
||||||
// contruct a INode with StripedBlock for saving and loading
|
// Construct an INode with StripedBlock for saving and loading
|
||||||
fsn.setErasureCodingPolicy("/", testECPolicy, false);
|
fsn.setErasureCodingPolicy("/", testECPolicy, false);
|
||||||
long id = 123456789;
|
long id = 123456789;
|
||||||
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
|
||||||
@ -163,29 +163,30 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con
|
|||||||
"testuser_groups", new FsPermission((short)0x755));
|
"testuser_groups", new FsPermission((short)0x755));
|
||||||
long mtime = 1426222916-3600;
|
long mtime = 1426222916-3600;
|
||||||
long atime = 1426222916;
|
long atime = 1426222916;
|
||||||
BlockInfoContiguous[] blks = new BlockInfoContiguous[0];
|
BlockInfoContiguous[] blocks = new BlockInfoContiguous[0];
|
||||||
short replication = testECPolicy.getId();
|
byte erasureCodingPolicyID = testECPolicy.getId();
|
||||||
long preferredBlockSize = 128*1024*1024;
|
long preferredBlockSize = 128*1024*1024;
|
||||||
INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime,
|
INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime,
|
||||||
blks, replication, preferredBlockSize, (byte) 0, BlockType.STRIPED);
|
blocks, null, erasureCodingPolicyID, preferredBlockSize,
|
||||||
|
(byte) 0, BlockType.STRIPED);
|
||||||
ByteArrayOutputStream bs = new ByteArrayOutputStream();
|
ByteArrayOutputStream bs = new ByteArrayOutputStream();
|
||||||
|
|
||||||
//construct StripedBlocks for the INode
|
// Construct StripedBlocks for the INode
|
||||||
BlockInfoStriped[] stripedBlks = new BlockInfoStriped[3];
|
BlockInfoStriped[] stripedBlocks = new BlockInfoStriped[3];
|
||||||
long stripedBlkId = 10000001;
|
long stripedBlkId = 10000001;
|
||||||
long timestamp = mtime+3600;
|
long timestamp = mtime+3600;
|
||||||
for (int i = 0; i < stripedBlks.length; i++) {
|
for (int i = 0; i < stripedBlocks.length; i++) {
|
||||||
stripedBlks[i] = new BlockInfoStriped(
|
stripedBlocks[i] = new BlockInfoStriped(
|
||||||
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
new Block(stripedBlkId + i, preferredBlockSize, timestamp),
|
||||||
testECPolicy);
|
testECPolicy);
|
||||||
file.addBlock(stripedBlks[i]);
|
file.addBlock(stripedBlocks[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
final String client = "testClient";
|
final String client = "testClient";
|
||||||
final String clientMachine = "testClientMachine";
|
final String clientMachine = "testClientMachine";
|
||||||
final String path = "testUnderConstructionPath";
|
final String path = "testUnderConstructionPath";
|
||||||
|
|
||||||
//save the INode to byte array
|
// Save the INode to byte array
|
||||||
DataOutput out = new DataOutputStream(bs);
|
DataOutput out = new DataOutputStream(bs);
|
||||||
if (isUC) {
|
if (isUC) {
|
||||||
file.toUnderConstruction(client, clientMachine);
|
file.toUnderConstruction(client, clientMachine);
|
||||||
@ -495,6 +496,7 @@ public void testSupportBlockGroup() throws Exception {
|
|||||||
FSNamesystem fsn = cluster.getNamesystem();
|
FSNamesystem fsn = cluster.getNamesystem();
|
||||||
INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
|
INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
|
||||||
assertTrue(inode.isStriped());
|
assertTrue(inode.isStriped());
|
||||||
|
assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
|
||||||
BlockInfo[] blks = inode.getBlocks();
|
BlockInfo[] blks = inode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertTrue(blks[0].isStriped());
|
assertTrue(blks[0].isStriped());
|
||||||
@ -513,6 +515,9 @@ public void testSupportBlockGroup() throws Exception {
|
|||||||
// check the information of file_3_2
|
// check the information of file_3_2
|
||||||
inode = fsn.dir.getINode(file_3_2.toString()).asFile();
|
inode = fsn.dir.getINode(file_3_2.toString()).asFile();
|
||||||
assertTrue(inode.isStriped());
|
assertTrue(inode.isStriped());
|
||||||
|
assertEquals(ErasureCodingPolicyManager.getPolicyByPolicyID(
|
||||||
|
HdfsConstants.RS_3_2_POLICY_ID).getId(),
|
||||||
|
inode.getErasureCodingPolicyID());
|
||||||
blks = inode.getBlocks();
|
blks = inode.getBlocks();
|
||||||
assertEquals(1, blks.length);
|
assertEquals(1, blks.length);
|
||||||
assertTrue(blks[0].isStriped());
|
assertTrue(blks[0].isStriped());
|
||||||
|
@ -70,6 +70,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
@ -104,15 +105,15 @@ INodeFile createINodeFile(short replication, long preferredBlockSize) {
|
|||||||
null, replication, preferredBlockSize);
|
null, replication, preferredBlockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
INodeFile createStripedINodeFile(short replication, long preferredBlockSize) {
|
INodeFile createStripedINodeFile(long preferredBlockSize) {
|
||||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||||
null, replication, preferredBlockSize,
|
null, null, HdfsConstants.RS_6_3_POLICY_ID, preferredBlockSize,
|
||||||
HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static INodeFile createINodeFile(byte storagePolicyID) {
|
private static INodeFile createINodeFile(byte storagePolicyID) {
|
||||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||||
null, (short)3, 1024L, storagePolicyID, CONTIGUOUS);
|
null, (short)3, null, 1024L, storagePolicyID, CONTIGUOUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -133,6 +134,61 @@ public void testStoragePolicyIdAboveUpperBound () throws IllegalArgumentExceptio
|
|||||||
createINodeFile((byte)16);
|
createINodeFile((byte)16);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContiguousLayoutRedundancy() {
|
||||||
|
INodeFile inodeFile;
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/,
|
||||||
|
HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/,
|
||||||
|
preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
|
||||||
|
fail("INodeFile construction should fail when both replication and " +
|
||||||
|
"ECPolicy requested!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, null /*replication*/, null /*ec policy*/,
|
||||||
|
preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
|
||||||
|
fail("INodeFile construction should fail when replication param not " +
|
||||||
|
"provided for contiguous layout!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, Short.MAX_VALUE /*replication*/,
|
||||||
|
null /*ec policy*/, preferredBlockSize,
|
||||||
|
HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
|
||||||
|
fail("INodeFile construction should fail when replication param is " +
|
||||||
|
"beyond the range supported!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
final Short replication = new Short((short) 3);
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, replication, null /*ec policy*/,
|
||||||
|
preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
|
fail("INodeFile construction should fail when replication param is " +
|
||||||
|
"provided for striped layout!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, replication, null /*ec policy*/,
|
||||||
|
preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
|
||||||
|
|
||||||
|
Assert.assertTrue(!inodeFile.isStriped());
|
||||||
|
Assert.assertEquals(replication.shortValue(),
|
||||||
|
inodeFile.getFileReplication());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test for the Replication value. Sets a value and checks if it was set
|
* Test for the Replication value. Sets a value and checks if it was set
|
||||||
* correct.
|
* correct.
|
||||||
@ -237,7 +293,7 @@ public void testGetBlockType() {
|
|||||||
preferredBlockSize = 128*1024*1024;
|
preferredBlockSize = 128*1024*1024;
|
||||||
INodeFile inf = createINodeFile(replication, preferredBlockSize);
|
INodeFile inf = createINodeFile(replication, preferredBlockSize);
|
||||||
assertEquals(inf.getBlockType(), CONTIGUOUS);
|
assertEquals(inf.getBlockType(), CONTIGUOUS);
|
||||||
INodeFile striped = createStripedINodeFile(replication, preferredBlockSize);
|
INodeFile striped = createStripedINodeFile(preferredBlockSize);
|
||||||
assertEquals(striped.getBlockType(), STRIPED);
|
assertEquals(striped.getBlockType(), STRIPED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,9 +17,12 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
|
||||||
|
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -76,8 +79,8 @@ public class TestStripedINodeFile {
|
|||||||
|
|
||||||
private static INodeFile createStripedINodeFile() {
|
private static INodeFile createStripedINodeFile() {
|
||||||
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
|
||||||
null, (short)0, 1024L, HdfsConstants.COLD_STORAGE_POLICY_ID,
|
null, null, HdfsConstants.RS_6_3_POLICY_ID, 1024L,
|
||||||
BlockType.STRIPED);
|
HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -95,6 +98,61 @@ public void testBlockStripedTotalBlockCount() {
|
|||||||
assertEquals(9, blockInfoStriped.getTotalBlockNum());
|
assertEquals(9, blockInfoStriped.getTotalBlockNum());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedLayoutRedundancy() {
|
||||||
|
INodeFile inodeFile;
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/,
|
||||||
|
HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/,
|
||||||
|
1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
|
fail("INodeFile construction should fail when both replication and " +
|
||||||
|
"ECPolicy requested!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, null /*replication*/, null /*ec policy*/,
|
||||||
|
1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
|
fail("INodeFile construction should fail when EC Policy param not " +
|
||||||
|
"provided for striped layout!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, null /*replication*/,
|
||||||
|
Byte.MAX_VALUE /*ec policy*/, 1024L,
|
||||||
|
HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
|
fail("INodeFile construction should fail when EC Policy is " +
|
||||||
|
"not in the supported list!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
final Byte ecPolicyID = HdfsConstants.RS_6_3_POLICY_ID;
|
||||||
|
try {
|
||||||
|
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID,
|
||||||
|
1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
|
||||||
|
fail("INodeFile construction should fail when replication param is " +
|
||||||
|
"provided for striped layout!");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
LOG.info("Expected exception: ", iae);
|
||||||
|
}
|
||||||
|
|
||||||
|
inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
|
||||||
|
null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID,
|
||||||
|
1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
|
||||||
|
|
||||||
|
Assert.assertTrue(inodeFile.isStriped());
|
||||||
|
Assert.assertEquals(ecPolicyID.byteValue(),
|
||||||
|
inodeFile.getErasureCodingPolicyID());
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBlockStripedLength()
|
public void testBlockStripedLength()
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
|
@ -48,6 +48,7 @@
|
|||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
import java.util.Random;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.regex.Matcher;
|
import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
@ -75,7 +76,10 @@
|
|||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
|
||||||
@ -103,6 +107,7 @@ public class TestOfflineImageViewer {
|
|||||||
private static final int FILES_PER_DIR = 4;
|
private static final int FILES_PER_DIR = 4;
|
||||||
private static final String TEST_RENEWER = "JobTracker";
|
private static final String TEST_RENEWER = "JobTracker";
|
||||||
private static File originalFsimage = null;
|
private static File originalFsimage = null;
|
||||||
|
private static int filesECCount = 0;
|
||||||
|
|
||||||
// namespace as written to dfs, to be compared with viewer's output
|
// namespace as written to dfs, to be compared with viewer's output
|
||||||
final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap();
|
final static HashMap<String, FileStatus> writtenFiles = Maps.newHashMap();
|
||||||
@ -129,7 +134,7 @@ public static void createOriginalFSImage() throws IOException {
|
|||||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||||
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
|
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
|
||||||
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
|
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
DistributedFileSystem hdfs = cluster.getFileSystem();
|
DistributedFileSystem hdfs = cluster.getFileSystem();
|
||||||
|
|
||||||
@ -224,9 +229,38 @@ public static void createOriginalFSImage() throws IOException {
|
|||||||
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
|
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
|
||||||
aclEntry(ACCESS, OTHER, EXECUTE)));
|
aclEntry(ACCESS, OTHER, EXECUTE)));
|
||||||
|
|
||||||
|
// Create an Erasure Coded dir
|
||||||
|
Path ecDir = new Path("/ec");
|
||||||
|
hdfs.mkdirs(ecDir);
|
||||||
|
dirCount++;
|
||||||
|
ErasureCodingPolicy ecPolicy =
|
||||||
|
ErasureCodingPolicyManager.getPolicyByPolicyID(
|
||||||
|
HdfsConstants.XOR_2_1_POLICY_ID);
|
||||||
|
hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy);
|
||||||
|
writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
|
||||||
|
|
||||||
|
// Create an empty Erasure Coded file
|
||||||
|
Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
|
||||||
|
hdfs.create(emptyECFile).close();
|
||||||
|
writtenFiles.put(emptyECFile.toString(),
|
||||||
|
pathToFileEntry(hdfs, emptyECFile.toString()));
|
||||||
|
filesECCount++;
|
||||||
|
|
||||||
|
// Create a small Erasure Coded file
|
||||||
|
Path smallECFile = new Path(ecDir, "SmallECFile.txt");
|
||||||
|
FSDataOutputStream out = hdfs.create(smallECFile);
|
||||||
|
Random r = new Random();
|
||||||
|
byte[] bytes = new byte[1024 * 10];
|
||||||
|
r.nextBytes(bytes);
|
||||||
|
out.write(bytes);
|
||||||
|
writtenFiles.put(smallECFile.toString(),
|
||||||
|
pathToFileEntry(hdfs, smallECFile.toString()));
|
||||||
|
filesECCount++;
|
||||||
|
|
||||||
// Write results to the fsimage file
|
// Write results to the fsimage file
|
||||||
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||||
hdfs.saveNamespace();
|
hdfs.saveNamespace();
|
||||||
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
|
||||||
|
|
||||||
// Determine location of fsimage file
|
// Determine location of fsimage file
|
||||||
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
|
originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
|
||||||
@ -292,7 +326,7 @@ public void testFileDistributionCalculator() throws IOException {
|
|||||||
Matcher matcher = p.matcher(outputString);
|
Matcher matcher = p.matcher(outputString);
|
||||||
assertTrue(matcher.find() && matcher.groupCount() == 1);
|
assertTrue(matcher.find() && matcher.groupCount() == 1);
|
||||||
int totalFiles = Integer.parseInt(matcher.group(1));
|
int totalFiles = Integer.parseInt(matcher.group(1));
|
||||||
assertEquals(NUM_DIRS * FILES_PER_DIR + 1, totalFiles);
|
assertEquals(NUM_DIRS * FILES_PER_DIR + filesECCount + 1, totalFiles);
|
||||||
|
|
||||||
p = Pattern.compile("totalDirectories = (\\d+)\n");
|
p = Pattern.compile("totalDirectories = (\\d+)\n");
|
||||||
matcher = p.matcher(outputString);
|
matcher = p.matcher(outputString);
|
||||||
@ -375,6 +409,24 @@ public void testWebImageViewer() throws Exception {
|
|||||||
url = new URL("http://localhost:" + port + "/foo");
|
url = new URL("http://localhost:" + port + "/foo");
|
||||||
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
|
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
|
||||||
|
|
||||||
|
// Verify the Erasure Coded empty file status
|
||||||
|
Path emptyECFilePath = new Path("/ec/EmptyECFile.txt");
|
||||||
|
FileStatus actualEmptyECFileStatus =
|
||||||
|
webhdfs.getFileStatus(new Path(emptyECFilePath.toString()));
|
||||||
|
FileStatus expectedEmptyECFileStatus = writtenFiles.get(
|
||||||
|
emptyECFilePath.toString());
|
||||||
|
System.out.println(webhdfs.getFileStatus(new Path(emptyECFilePath
|
||||||
|
.toString())));
|
||||||
|
compareFile(expectedEmptyECFileStatus, actualEmptyECFileStatus);
|
||||||
|
|
||||||
|
// Verify the Erasure Coded small file status
|
||||||
|
Path smallECFilePath = new Path("/ec/SmallECFile.txt");
|
||||||
|
FileStatus actualSmallECFileStatus =
|
||||||
|
webhdfs.getFileStatus(new Path(smallECFilePath.toString()));
|
||||||
|
FileStatus expectedSmallECFileStatus = writtenFiles.get(
|
||||||
|
smallECFilePath.toString());
|
||||||
|
compareFile(expectedSmallECFileStatus, actualSmallECFileStatus);
|
||||||
|
|
||||||
// GETFILESTATUS operation
|
// GETFILESTATUS operation
|
||||||
status = webhdfs.getFileStatus(new Path("/dir0/file0"));
|
status = webhdfs.getFileStatus(new Path("/dir0/file0"));
|
||||||
compareFile(expected, status);
|
compareFile(expected, status);
|
||||||
|
@ -143,6 +143,8 @@ private void testFileSize(int numBytes) throws IOException,
|
|||||||
// Verify space consumed present in BlockInfoStriped
|
// Verify space consumed present in BlockInfoStriped
|
||||||
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
|
||||||
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
|
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
|
||||||
|
assertEquals(ErasureCodingPolicyManager.getSystemDefaultPolicy().getId(),
|
||||||
|
fileNode.getErasureCodingPolicyID());
|
||||||
assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
|
assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
|
||||||
long actualFileSize = 0;
|
long actualFileSize = 0;
|
||||||
for (BlockInfo blockInfo : fileNode.getBlocks()) {
|
for (BlockInfo blockInfo : fileNode.getBlocks()) {
|
||||||
|
Loading…
Reference in New Issue
Block a user