HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped blocks. Contributed by Kai Sasaki.
This commit is contained in:
parent
11585883a9
commit
26773d9d6c
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
|
||||
/**
|
||||
@ -34,6 +35,7 @@
|
||||
* array to record the block index for each triplet.
|
||||
*/
|
||||
public class BlockInfoStriped extends BlockInfo {
|
||||
private final int chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
|
||||
private final short dataBlockNum;
|
||||
private final short parityBlockNum;
|
||||
/**
|
||||
@ -56,7 +58,7 @@ public BlockInfoStriped(Block blk, short dataBlockNum, short parityBlockNum) {
|
||||
this.setBlockCollection(b.getBlockCollection());
|
||||
}
|
||||
|
||||
short getTotalBlockNum() {
|
||||
public short getTotalBlockNum() {
|
||||
return (short) (dataBlockNum + parityBlockNum);
|
||||
}
|
||||
|
||||
@ -178,6 +180,14 @@ void replaceBlock(BlockInfo newBlock) {
|
||||
}
|
||||
}
|
||||
|
||||
public long spaceConsumed() {
|
||||
// In case striped blocks, total usage by this striped blocks should
|
||||
// be the total of data blocks and parity blocks because
|
||||
// `getNumBytes` is the total of actual data block size.
|
||||
return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
|
||||
* chunkSize * parityBlockNum + getNumBytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isStriped() {
|
||||
return true;
|
||||
|
@ -42,6 +42,7 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
|
||||
@ -676,6 +677,11 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
|
||||
|
||||
final long ssDeltaNoReplication;
|
||||
short replication;
|
||||
|
||||
if (isStriped()) {
|
||||
return computeQuotaUsageWithStriped(bsps, counts);
|
||||
}
|
||||
|
||||
if (last < lastSnapshotId) {
|
||||
ssDeltaNoReplication = computeFileSize(true, false);
|
||||
replication = getFileReplication();
|
||||
@ -698,6 +704,23 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
|
||||
return counts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute quota of striped file
|
||||
* @param bsps
|
||||
* @param counts
|
||||
* @param useCache
|
||||
* @param lastSnapshotId
|
||||
* @return quota counts
|
||||
*/
|
||||
public final QuotaCounts computeQuotaUsageWithStriped(
|
||||
BlockStoragePolicySuite bsps, QuotaCounts counts) {
|
||||
long nsDelta = 1;
|
||||
final long ssDelta = storagespaceConsumed();
|
||||
counts.addNameSpace(nsDelta);
|
||||
counts.addStorageSpace(ssDelta);
|
||||
return counts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final ContentSummaryComputationContext computeContentSummary(
|
||||
final ContentSummaryComputationContext summary) {
|
||||
@ -776,23 +799,37 @@ public final long computeFileSizeNotIncludingLastUcBlock() {
|
||||
* @return file size
|
||||
*/
|
||||
public final long computeFileSize(boolean includesLastUcBlock,
|
||||
boolean usePreferredBlockSize4LastUcBlock) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
boolean usePreferredBlockSize4LastUcBlock) {
|
||||
BlockInfo[] blockInfos = getBlocks();
|
||||
// In case of contiguous blocks
|
||||
if (blockInfos == null || blockInfos.length == 0) {
|
||||
return 0;
|
||||
}
|
||||
final int last = blocks.length - 1;
|
||||
final int last = blockInfos.length - 1;
|
||||
//check if the last block is BlockInfoUnderConstruction
|
||||
long size = blocks[last].getNumBytes();
|
||||
if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) {
|
||||
if (!includesLastUcBlock) {
|
||||
size = 0;
|
||||
} else if (usePreferredBlockSize4LastUcBlock) {
|
||||
size = getPreferredBlockSize();
|
||||
}
|
||||
long size = blockInfos[last].getNumBytes();
|
||||
if (blockInfos[last] instanceof BlockInfoContiguousUnderConstruction) {
|
||||
if (!includesLastUcBlock) {
|
||||
size = 0;
|
||||
} else if (usePreferredBlockSize4LastUcBlock) {
|
||||
size = getPreferredBlockSize();
|
||||
}
|
||||
} else if (blockInfos[last] instanceof BlockInfoStripedUnderConstruction) {
|
||||
if (!includesLastUcBlock) {
|
||||
size = 0;
|
||||
} else if (usePreferredBlockSize4LastUcBlock) {
|
||||
// Striped blocks keeps block group which counts
|
||||
// (data blocks num + parity blocks num). When you
|
||||
// count actual used size by BlockInfoStripedUC must
|
||||
// be multiplied by these blocks number.
|
||||
BlockInfoStripedUnderConstruction blockInfoStripedUC
|
||||
= (BlockInfoStripedUnderConstruction) blockInfos[last];
|
||||
size = getPreferredBlockSize() * blockInfoStripedUC.getTotalBlockNum();
|
||||
}
|
||||
}
|
||||
//sum other blocks
|
||||
for(int i = 0; i < last; i++) {
|
||||
size += blocks[i].getNumBytes();
|
||||
for (int i = 0; i < last; i++) {
|
||||
size += blockInfos[i].getNumBytes();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user