HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. (Contributed by Zhe Zhang)

This commit is contained in:
yliu 2015-05-12 21:29:22 +08:00
parent 8badd82ce2
commit 6d5da94841
18 changed files with 66 additions and 53 deletions

View File

@ -543,6 +543,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
objects. (Li Lu via wheat9)
HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
(Contributed by Zhe Zhang)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -58,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection
* @return block replication value
*/
public short getBlockReplication();
public short getPreferredBlockReplication();
/**
* @return the storage policy ID.

View File

@ -380,7 +380,7 @@ public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction(
if(isComplete()) {
BlockInfoContiguousUnderConstruction ucBlock =
new BlockInfoContiguousUnderConstruction(this,
getBlockCollection().getBlockReplication(), s, targets);
getBlockCollection().getPreferredBlockReplication(), s, targets);
ucBlock.setBlockCollection(getBlockCollection());
return ucBlock;
}

View File

@ -1173,7 +1173,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b,
return;
}
short expectedReplicas =
b.corrupted.getBlockCollection().getBlockReplication();
b.corrupted.getBlockCollection().getPreferredBlockReplication();
// Add replica to the data-node if it is not already there
if (storageInfo != null) {
@ -1348,7 +1348,7 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
continue;
}
requiredReplication = bc.getBlockReplication();
requiredReplication = bc.getPreferredBlockReplication();
// get a source data-node
containingNodes = new ArrayList<DatanodeDescriptor>();
@ -1432,7 +1432,7 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
rw.targets = null;
continue;
}
requiredReplication = bc.getBlockReplication();
requiredReplication = bc.getPreferredBlockReplication();
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
@ -2584,7 +2584,7 @@ private Block addStoredBlock(final BlockInfoContiguous block,
}
// handle underReplication/overReplication
short fileReplication = bc.getBlockReplication();
short fileReplication = bc.getPreferredBlockReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedAndDecommissioning(), fileReplication);
@ -2815,7 +2815,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block
}
// calculate current replication
short expectedReplication =
block.getBlockCollection().getBlockReplication();
block.getBlockCollection().getPreferredBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
@ -3316,7 +3316,7 @@ void processOverReplicatedBlocksOnReCommission(
while(it.hasNext()) {
final Block block = it.next();
BlockCollection bc = blocksMap.getBlockCollection(block);
short expectedReplication = bc.getBlockReplication();
short expectedReplication = bc.getPreferredBlockReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) {
@ -3430,7 +3430,7 @@ private void updateNeededReplications(final Block block,
* process it as an over replicated block.
*/
public void checkReplication(BlockCollection bc) {
final short expected = bc.getBlockReplication();
final short expected = bc.getPreferredBlockReplication();
for (Block block : bc.getBlocks()) {
final NumberReplicas n = countNodes(block);
if (isNeededReplication(block, expected, n.liveReplicas())) {
@ -3469,7 +3469,7 @@ public boolean checkBlocksProperlyReplicated(
*/
private int getReplication(Block block) {
final BlockCollection bc = blocksMap.getBlockCollection(block);
return bc == null? 0: bc.getBlockReplication();
return bc == null? 0: bc.getPreferredBlockReplication();
}

View File

@ -251,7 +251,7 @@ private void setDecommissioned(DatanodeDescriptor dn) {
private boolean isSufficientlyReplicated(BlockInfoContiguous block,
BlockCollection bc,
NumberReplicas numberReplicas) {
final int numExpected = bc.getBlockReplication();
final int numExpected = bc.getPreferredBlockReplication();
final int numLive = numberReplicas.liveReplicas();
if (!blockManager.isNeededReplication(block, numExpected, numLive)) {
// Block doesn't need replication. Skip.
@ -288,7 +288,7 @@ private static void logBlockReplicationInfo(Block block, BlockCollection bc,
DatanodeDescriptor srcNode, NumberReplicas num,
Iterable<DatanodeStorageInfo> storages) {
int curReplicas = num.liveReplicas();
int curExpectedReplicas = bc.getBlockReplication();
int curExpectedReplicas = bc.getPreferredBlockReplication();
StringBuilder nodeList = new StringBuilder();
for (DatanodeStorageInfo storage : storages) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
@ -564,8 +564,8 @@ private void processBlocksForDecomInternal(
// Schedule under-replicated blocks for replication if not already
// pending
if (blockManager.isNeededReplication(block, bc.getBlockReplication(),
liveReplicas)) {
if (blockManager.isNeededReplication(block,
bc.getPreferredBlockReplication(), liveReplicas)) {
if (!blockManager.neededReplications.contains(block) &&
blockManager.pendingReplications.getNumReplicas(block) == 0 &&
namesystem.isPopulatingReplQueues()) {
@ -573,7 +573,7 @@ private void processBlocksForDecomInternal(
blockManager.neededReplications.add(block,
curReplicas,
num.decommissionedAndDecommissioning(),
bc.getBlockReplication());
bc.getPreferredBlockReplication());
}
}

View File

@ -387,7 +387,7 @@ static Block[] unprotectedSetReplication(
return null;
}
INodeFile file = inode.asFile();
final short oldBR = file.getBlockReplication();
final short oldBR = file.getPreferredBlockReplication();
// before setFileReplication, check for increasing block replication.
// if replication > oldBR, then newBR == replication.
@ -399,7 +399,7 @@ static Block[] unprotectedSetReplication(
file.setFileReplication(replication, iip.getLatestSnapshotId());
final short newBR = file.getBlockReplication();
final short newBR = file.getPreferredBlockReplication();
// check newBR < oldBR case.
if (newBR < oldBR) {
long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR;

View File

@ -167,9 +167,9 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build();
final short targetRepl = target.getBlockReplication();
final short targetRepl = target.getPreferredBlockReplication();
for (INodeFile src : srcList) {
short srcRepl = src.getBlockReplication();
short srcRepl = src.getPreferredBlockReplication();
long fileSize = src.computeFileSize();
if (targetRepl != srcRepl) {
deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));

View File

@ -489,7 +489,7 @@ BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath,
// check quota limits and updated space consumed
updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
fileINode.getBlockReplication(), true);
fileINode.getPreferredBlockReplication(), true);
// associate new last block for the file
BlockInfoContiguousUnderConstruction blockInfo =
@ -546,7 +546,7 @@ boolean unprotectedRemoveBlock(String path, INodesInPath iip,
// update space consumed
updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
fileNode.getBlockReplication(), true);
fileNode.getPreferredBlockReplication(), true);
return true;
}

View File

@ -971,7 +971,7 @@ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
}
// add the new block
BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction(
newBlock, file.getBlockReplication());
newBlock, file.getPreferredBlockReplication());
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
@ -1050,13 +1050,14 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// what about an old-version fsync() where fsync isn't called
// until several blocks in?
newBI = new BlockInfoContiguousUnderConstruction(
newBlock, file.getBlockReplication());
newBlock, file.getPreferredBlockReplication());
} else {
// OP_CLOSE should add finalized blocks. This code path
// is only executed when loading edits written by prior
// versions of Hadoop. Current versions always log
// OP_ADD operations as each block is allocated.
newBI = new BlockInfoContiguous(newBlock, file.getBlockReplication());
newBI = new BlockInfoContiguous(newBlock,
file.getPreferredBlockReplication());
}
fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);

View File

@ -2106,7 +2106,7 @@ Block prepareFileForTruncate(INodesInPath iip,
// Add new truncateBlock into blocksMap and
// use oldBlock as a source for copy-on-truncate recovery
truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock,
file.getBlockReplication());
file.getPreferredBlockReplication());
truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
truncatedBlockUC.setTruncateBlock(oldBlock);
file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock));
@ -2807,7 +2807,7 @@ private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
final BlockInfoContiguous lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = file.getBlockReplication();
final short repl = file.getPreferredBlockReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite()
.getPolicy(file.getStoragePolicyID());

View File

@ -355,7 +355,7 @@ public final short getFileReplication() {
}
@Override // BlockCollection
public short getBlockReplication() {
public short getPreferredBlockReplication() {
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
@ -728,7 +728,7 @@ public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
blocks = allBlocks;
}
final short replication = getBlockReplication();
final short replication = getPreferredBlockReplication();
for (BlockInfoContiguous b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() :
getPreferredBlockSize();
@ -850,10 +850,10 @@ void computeQuotaDeltaForTruncate(
truncatedBytes -= bi.getNumBytes();
}
delta.addStorageSpace(-truncatedBytes * getBlockReplication());
delta.addStorageSpace(-truncatedBytes * getPreferredBlockReplication());
if (bsps != null) {
List<StorageType> types = bsps.chooseStorageTypes(
getBlockReplication());
getPreferredBlockReplication());
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, -truncatedBytes);

View File

@ -257,15 +257,18 @@ public void blockIdCK(String blockId) {
NumberReplicas numberReplicas= bm.countNodes(block);
out.println("Block Id: " + blockId);
out.println("Block belongs to: "+iNode.getFullPathName());
out.println("No. of Expected Replica: " + bc.getBlockReplication());
out.println("No. of Expected Replica: " +
bc.getPreferredBlockReplication());
out.println("No. of live Replica: " + numberReplicas.liveReplicas());
out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes());
out.println("No. of stale Replica: " +
numberReplicas.replicasOnStaleNodes());
out.println("No. of decommissioned Replica: "
+ numberReplicas.decommissioned());
out.println("No. of decommissioning Replica: "
+ numberReplicas.decommissioning());
out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
out.println("No. of corrupted Replica: " +
numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
Collection<DatanodeDescriptor> corruptionRecord = null;
if (bm.getCorruptReplicas(block) != null) {

View File

@ -151,11 +151,12 @@ public QuotaCounts updateQuotaAndCollectBlocks(
long oldStoragespace;
if (removed.snapshotINode != null) {
short replication = removed.snapshotINode.getFileReplication();
short currentRepl = file.getBlockReplication();
short currentRepl = file.getPreferredBlockReplication();
if (replication > currentRepl) {
long oldFileSizeNoRep = currentRepl == 0
? file.computeFileSize(true, true)
: oldCounts.getStorageSpace() / file.getBlockReplication();
: oldCounts.getStorageSpace() /
file.getPreferredBlockReplication();
oldStoragespace = oldFileSizeNoRep * replication;
oldCounts.setStorageSpace(oldStoragespace);

View File

@ -433,7 +433,7 @@ private List<DatanodeDescriptor> startDecommission(int ... indexes) {
private BlockInfoContiguous addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short)3).when(bc).getBlockReplication();
Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication();
BlockInfoContiguous blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, bc);
@ -740,7 +740,7 @@ private BlockInfoContiguous addBlockToBM(long blkId) {
BlockInfoContiguous blockInfo =
new BlockInfoContiguous(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getBlockReplication();
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}
@ -750,7 +750,7 @@ private BlockInfoContiguous addUcBlockToBM(long blkId) {
BlockInfoContiguousUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction(block, (short) 3);
BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short) 3).when(bc).getBlockReplication();
Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication();
bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo;
}

View File

@ -1171,7 +1171,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
// queue.
BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1);
BlockCollection bc = mock(BlockCollection.class);
when(bc.getBlockReplication()).thenReturn((short)1);
when(bc.getPreferredBlockReplication()).thenReturn((short)1);
bm.addBlockCollection(info, bc);
// Adding this block will increase its current replication, and that will
@ -1215,7 +1215,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication()
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.getBlockReplication()).thenReturn((short)1);
when(mbc.getPreferredBlockReplication()).thenReturn((short)1);
when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long)1);

View File

@ -70,7 +70,7 @@ public void testUpdateQuotaAndCollectBlocks() {
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
when(file.getBlockReplication()).thenReturn(REPL_1);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);

View File

@ -841,12 +841,12 @@ private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr,
}
INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
assertEquals(REPLICATION_1, nodeFile13.getPreferredBlockReplication());
TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
blockmanager);
INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
assertEquals(REPLICATION_1, nodeFile12.getPreferredBlockReplication());
}
/** Test deleting snapshots with modification on the metadata of directory */

View File

@ -40,7 +40,7 @@
/**
* This class tests the replication handling/calculation of snapshots. In
* particular, {@link INodeFile#getFileReplication()} and
* {@link INodeFile#getBlockReplication()} are tested to make sure
* {@link INodeFile#getPreferredBlockReplication()} are tested to make sure
* the number of replication is calculated correctly with/without snapshots.
*/
public class TestSnapshotReplication {
@ -81,7 +81,7 @@ public void tearDown() throws Exception {
/**
* Check the replication of a given file. We test both
* {@link INodeFile#getFileReplication()} and
* {@link INodeFile#getBlockReplication()}.
* {@link INodeFile#getPreferredBlockReplication()}.
*
* @param file The given file
* @param replication The expected replication number
@ -95,10 +95,11 @@ private void checkFileReplication(Path file, short replication,
// INodeFile#getFileReplication().
short fileReplication = hdfs.getFileStatus(file1).getReplication();
assertEquals(replication, fileReplication);
// Check the correctness of getBlockReplication()
// Check the correctness of getPreferredBlockReplication()
INode inode = fsdir.getINode(file1.toString());
assertTrue(inode instanceof INodeFile);
assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
assertEquals(blockReplication,
((INodeFile) inode).getPreferredBlockReplication());
}
/**
@ -137,16 +138,19 @@ INodeFile getINodeFile(Path p) throws Exception {
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getBlockReplication for the INode of the currentFile
// First check the getPreferredBlockReplication for the INode of
// the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
assertEquals(expectedBlockRep,
inodeOfCurrentFile.getPreferredBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
final INodeFile ssInode = iip.getLastINode().asFile();
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getBlockReplication());
// INodeFileWithLink#getPreferredBlockReplication should
// always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshotId()));
@ -218,8 +222,9 @@ public void testReplicationAfterDeletion() throws Exception {
for (Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode = getINodeFile(ss);
// The replication number derived from the
// INodeFileWithLink#getBlockReplication should always == expectedBlockRep
assertEquals(REPLICATION, ssInode.getBlockReplication());
// INodeFileWithLink#getPreferredBlockReplication should
// always == expectedBlockRep
assertEquals(REPLICATION, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication());