HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8327 and HDFS-8357). Contributed by Zhe Zhang.

This commit is contained in:
Zhe Zhang 2015-05-11 12:22:12 -07:00 committed by Zhe Zhang
parent 51ea117f88
commit 6bacaa9a52
7 changed files with 23 additions and 55 deletions

View File

@ -88,13 +88,21 @@ DatanodeStorageInfo getStorageInfo(int index) {
BlockInfo getPrevious(int index) { BlockInfo getPrevious(int index) {
assert this.triplets != null : "BlockInfo is not initialized"; assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
return (BlockInfo) triplets[index*3+1]; BlockInfo info = (BlockInfo)triplets[index*3+1];
assert info == null ||
info.getClass().getName().startsWith(BlockInfo.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
} }
BlockInfo getNext(int index) { BlockInfo getNext(int index) {
assert this.triplets != null : "BlockInfo is not initialized"; assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
return (BlockInfo) triplets[index*3+2]; BlockInfo info = (BlockInfo)triplets[index*3+2];
assert info == null || info.getClass().getName().startsWith(
BlockInfo.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
} }
void setStorageInfo(int index, DatanodeStorageInfo storage) { void setStorageInfo(int index, DatanodeStorageInfo storage) {

View File

@ -47,18 +47,6 @@ protected BlockInfoContiguous(BlockInfoContiguous from) {
this.setBlockCollection(from.getBlockCollection()); this.setBlockCollection(from.getBlockCollection());
} }
public BlockCollection getBlockCollection() {
return bc;
}
public void setBlockCollection(BlockCollection bc) {
this.bc = bc;
}
public boolean isDeleted() {
return (bc == null);
}
public DatanodeDescriptor getDatanode(int index) { public DatanodeDescriptor getDatanode(int index) {
DatanodeStorageInfo storage = getStorageInfo(index); DatanodeStorageInfo storage = getStorageInfo(index);
return storage == null ? null : storage.getDatanodeDescriptor(); return storage == null ? null : storage.getDatanodeDescriptor();
@ -70,32 +58,6 @@ DatanodeStorageInfo getStorageInfo(int index) {
return (DatanodeStorageInfo)triplets[index*3]; return (DatanodeStorageInfo)triplets[index*3];
} }
private BlockInfoContiguous getPrevious(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound";
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+1];
assert info == null ||
info.getClass().getName().startsWith(BlockInfoContiguous.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
BlockInfoContiguous getNext(int index) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound";
BlockInfoContiguous info = (BlockInfoContiguous)triplets[index*3+2];
assert info == null || info.getClass().getName().startsWith(
BlockInfoContiguous.class.getName()) :
"BlockInfo is expected at " + index*3;
return info;
}
private void setStorageInfo(int index, DatanodeStorageInfo storage) {
assert this.triplets != null : "BlockInfo is not initialized";
assert index >= 0 && index*3 < triplets.length : "Index is out of bound";
triplets[index*3] = storage;
}
/** /**
* Return the previous block on the block list for the datanode at * Return the previous block on the block list for the datanode at
* position index. Set the previous block on the list to "to". * position index. Set the previous block on the list to "to".

View File

@ -2477,7 +2477,7 @@ private void processQueuedMessages(Iterable<ReportedBlockInfo> rbis)
if (rbi.getReportedState() == null) { if (rbi.getReportedState() == null) {
// This is a DELETE_BLOCK request // This is a DELETE_BLOCK request
DatanodeStorageInfo storageInfo = rbi.getStorageInfo(); DatanodeStorageInfo storageInfo = rbi.getStorageInfo();
removeStoredBlock(rbi.getBlock(), removeStoredBlock(getStoredBlock(rbi.getBlock()),
storageInfo.getDatanodeDescriptor()); storageInfo.getDatanodeDescriptor());
} else { } else {
processAndHandleReportedBlock(rbi.getStorageInfo(), processAndHandleReportedBlock(rbi.getStorageInfo(),
@ -3222,7 +3222,7 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
QUEUE_REASON_FUTURE_GENSTAMP); QUEUE_REASON_FUTURE_GENSTAMP);
return; return;
} }
removeStoredBlock(block, node); removeStoredBlock(getStoredBlock(block), node);
} }
/** /**

View File

@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSPacket; import org.apache.hadoop.hdfs.DFSPacket;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.RemoteBlockReader2; import org.apache.hadoop.hdfs.RemoteBlockReader2;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer;
@ -872,7 +873,7 @@ private int initTargetStreams(boolean[] targetsStatus) {
unbufIn = saslStreams.in; unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut, out = new DataOutputStream(new BufferedOutputStream(unbufOut,
HdfsServerConstants.SMALL_BUFFER_SIZE)); DFSUtil.getSmallBufferSize(conf)));
in = new DataInputStream(unbufIn); in = new DataInputStream(unbufIn);
DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId()); DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId());

View File

@ -706,11 +706,7 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
*/ */
public final QuotaCounts computeQuotaUsageWithStriped( public final QuotaCounts computeQuotaUsageWithStriped(
BlockStoragePolicySuite bsps, QuotaCounts counts) { BlockStoragePolicySuite bsps, QuotaCounts counts) {
long nsDelta = 1; return null;
final long ssDelta = storagespaceConsumed();
counts.addNameSpace(nsDelta);
counts.addStorageSpace(ssDelta);
return counts;
} }
@Override @Override
@ -979,11 +975,11 @@ void computeQuotaDeltaForTruncate(
} }
long size = 0; long size = 0;
for (BlockInfoContiguous b : blocks) { for (BlockInfo b : blocks) {
size += b.getNumBytes(); size += b.getNumBytes();
} }
BlockInfoContiguous[] sblocks = null; BlockInfo[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) { if (sf != null) {
FileDiff diff = sf.getDiffs().getLast(); FileDiff diff = sf.getDiffs().getLast();

View File

@ -109,8 +109,8 @@ public void testBlockStripedConsumedSpace()
// a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0 // a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0
// b. <Num Bytes> % <Num Bytes per Stripes> = 1 // b. <Num Bytes> % <Num Bytes per Stripes> = 1
// c. <Last Stripe Length> * <Parity Block Num> = 1 * 3 // c. <Last Stripe Length> * <Parity Block Num> = 1 * 3
assertEquals(4, inf.storagespaceConsumedWithStriped()); assertEquals(4, inf.storagespaceConsumedWithStriped(null));
assertEquals(4, inf.storagespaceConsumed()); assertEquals(4, inf.storagespaceConsumed(null));
} }
@Test @Test
@ -134,8 +134,8 @@ public void testMultipleBlockStripedConsumedSpace()
inf.addBlock(blockInfoStriped1); inf.addBlock(blockInfoStriped1);
inf.addBlock(blockInfoStriped2); inf.addBlock(blockInfoStriped2);
// This is the double size of one block in above case. // This is the double size of one block in above case.
assertEquals(4 * 2, inf.storagespaceConsumedWithStriped()); assertEquals(4 * 2, inf.storagespaceConsumedWithStriped(null));
assertEquals(4 * 2, inf.storagespaceConsumed()); assertEquals(4 * 2, inf.storagespaceConsumed(null));
} }
@Test @Test

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@ -77,7 +78,7 @@ public void testTruncateWithoutSnapshot() {
@Test @Test
public void testTruncateWithSnapshotNoDivergence() { public void testTruncateWithSnapshotNoDivergence() {
INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION); INodeFile file = createMockFile(BLOCKSIZE * 2 + BLOCKSIZE / 2, REPLICATION);
addSnapshotFeature(file, file.getBlocks()); addSnapshotFeature(file, file.getContiguousBlocks());
// case 4: truncate to 1.5 blocks // case 4: truncate to 1.5 blocks
// all the blocks are in snapshot. truncate need to allocate a new block // all the blocks are in snapshot. truncate need to allocate a new block