diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7cff8d4aa2..cd477af633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -543,6 +543,9 @@ Release 2.8.0 - UNRELEASED HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter objects. (Li Lu via wheat9) + HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. + (Contributed by Zhe Zhang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index e9baf8535b..c0a959c926 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -58,7 +58,7 @@ public interface BlockCollection { * Get block replication for the collection * @return block replication value */ - public short getBlockReplication(); + public short getPreferredBlockReplication(); /** * @return the storage policy ID. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index df27882aeb..1ba3536f19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -380,7 +380,7 @@ public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction( if(isComplete()) { BlockInfoContiguousUnderConstruction ucBlock = new BlockInfoContiguousUnderConstruction(this, - getBlockCollection().getBlockReplication(), s, targets); + getBlockCollection().getPreferredBlockReplication(), s, targets); ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 9d9a631d10..ab2607b1bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1173,7 +1173,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, return; } short expectedReplicas = - b.corrupted.getBlockCollection().getBlockReplication(); + b.corrupted.getBlockCollection().getPreferredBlockReplication(); // Add replica to the data-node if it is not already there if (storageInfo != null) { @@ -1348,7 +1348,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { continue; } - requiredReplication = bc.getBlockReplication(); + requiredReplication = bc.getPreferredBlockReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1432,7 +1432,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { rw.targets = null; continue; } - requiredReplication = bc.getBlockReplication(); + requiredReplication = bc.getPreferredBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -2584,7 +2584,7 @@ private Block addStoredBlock(final BlockInfoContiguous block, } // handle underReplication/overReplication - short fileReplication = bc.getBlockReplication(); + short fileReplication = bc.getPreferredBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedAndDecommissioning(), fileReplication); @@ -2815,7 +2815,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfoContiguous block } // calculate current replication short expectedReplication = - block.getBlockCollection().getBlockReplication(); + block.getBlockCollection().getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -3316,7 +3316,7 @@ void processOverReplicatedBlocksOnReCommission( while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); - short expectedReplication = bc.getBlockReplication(); + short expectedReplication = bc.getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -3430,7 +3430,7 @@ private void updateNeededReplications(final Block block, * process it as an over replicated block. */ public void checkReplication(BlockCollection bc) { - final short expected = bc.getBlockReplication(); + final short expected = bc.getPreferredBlockReplication(); for (Block block : bc.getBlocks()) { final NumberReplicas n = countNodes(block); if (isNeededReplication(block, expected, n.liveReplicas())) { @@ -3469,7 +3469,7 @@ public boolean checkBlocksProperlyReplicated( */ private int getReplication(Block block) { final BlockCollection bc = blocksMap.getBlockCollection(block); - return bc == null? 0: bc.getBlockReplication(); + return bc == null? 0: bc.getPreferredBlockReplication(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java index 5c9aec76ad..5f7366e5a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java @@ -251,7 +251,7 @@ private void setDecommissioned(DatanodeDescriptor dn) { private boolean isSufficientlyReplicated(BlockInfoContiguous block, BlockCollection bc, NumberReplicas numberReplicas) { - final int numExpected = bc.getBlockReplication(); + final int numExpected = bc.getPreferredBlockReplication(); final int numLive = numberReplicas.liveReplicas(); if (!blockManager.isNeededReplication(block, numExpected, numLive)) { // Block doesn't need replication. Skip. @@ -288,7 +288,7 @@ private static void logBlockReplicationInfo(Block block, BlockCollection bc, DatanodeDescriptor srcNode, NumberReplicas num, Iterable storages) { int curReplicas = num.liveReplicas(); - int curExpectedReplicas = bc.getBlockReplication(); + int curExpectedReplicas = bc.getPreferredBlockReplication(); StringBuilder nodeList = new StringBuilder(); for (DatanodeStorageInfo storage : storages) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); @@ -564,8 +564,8 @@ private void processBlocksForDecomInternal( // Schedule under-replicated blocks for replication if not already // pending - if (blockManager.isNeededReplication(block, bc.getBlockReplication(), - liveReplicas)) { + if (blockManager.isNeededReplication(block, + bc.getPreferredBlockReplication(), liveReplicas)) { if (!blockManager.neededReplications.contains(block) && blockManager.pendingReplications.getNumReplicas(block) == 0 && namesystem.isPopulatingReplQueues()) { @@ -573,7 +573,7 @@ private void processBlocksForDecomInternal( blockManager.neededReplications.add(block, curReplicas, num.decommissionedAndDecommissioning(), - bc.getBlockReplication()); + bc.getPreferredBlockReplication()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index d01e2c8586..879738d2f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -387,7 +387,7 @@ static Block[] unprotectedSetReplication( return null; } INodeFile file = inode.asFile(); - final short oldBR = file.getBlockReplication(); + final short oldBR = file.getPreferredBlockReplication(); // before setFileReplication, check for increasing block replication. // if replication > oldBR, then newBR == replication. @@ -399,7 +399,7 @@ static Block[] unprotectedSetReplication( file.setFileReplication(replication, iip.getLatestSnapshotId()); - final short newBR = file.getBlockReplication(); + final short newBR = file.getPreferredBlockReplication(); // check newBR < oldBR case. if (newBR < oldBR) { long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 31a6af7b7b..3f22f51e87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -167,9 +167,9 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) { QuotaCounts deltas = new QuotaCounts.Builder().build(); - final short targetRepl = target.getBlockReplication(); + final short targetRepl = target.getPreferredBlockReplication(); for (INodeFile src : srcList) { - short srcRepl = src.getBlockReplication(); + short srcRepl = src.getPreferredBlockReplication(); long fileSize = src.computeFileSize(); if (targetRepl != srcRepl) { deltas.addStorageSpace(fileSize * (targetRepl - srcRepl)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index b289c390ce..c981626236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -489,7 +489,7 @@ BlockInfoContiguous addBlock(String path, INodesInPath inodesInPath, // check quota limits and updated space consumed updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), - fileINode.getBlockReplication(), true); + fileINode.getPreferredBlockReplication(), true); // associate new last block for the file BlockInfoContiguousUnderConstruction blockInfo = @@ -546,7 +546,7 @@ boolean unprotectedRemoveBlock(String path, INodesInPath iip, // update space consumed updateCount(iip, 0, -fileNode.getPreferredBlockSize(), - fileNode.getBlockReplication(), true); + fileNode.getPreferredBlockReplication(), true); return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 79641881cd..b7a4870308 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -971,7 +971,7 @@ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) } // add the new block BlockInfoContiguous newBI = new BlockInfoContiguousUnderConstruction( - newBlock, file.getBlockReplication()); + newBlock, file.getPreferredBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); @@ -1050,13 +1050,14 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoContiguousUnderConstruction( - newBlock, file.getBlockReplication()); + newBlock, file.getPreferredBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfoContiguous(newBlock, file.getBlockReplication()); + newBI = new BlockInfoContiguous(newBlock, + file.getPreferredBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9e30812127..33aaa72f0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2106,7 +2106,7 @@ Block prepareFileForTruncate(INodesInPath iip, // Add new truncateBlock into blocksMap and // use oldBlock as a source for copy-on-truncate recovery truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock, - file.getBlockReplication()); + file.getPreferredBlockReplication()); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setTruncateBlock(oldBlock); file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock)); @@ -2807,7 +2807,7 @@ private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) { final BlockInfoContiguous lastBlock = file.getLastBlock(); if (lastBlock != null) { final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes(); - final short repl = file.getBlockReplication(); + final short repl = file.getPreferredBlockReplication(); delta.addStorageSpace(diff * repl); final BlockStoragePolicy policy = dir.getBlockStoragePolicySuite() .getPolicy(file.getStoragePolicyID()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 3790c74f69..44f23bb5f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -355,7 +355,7 @@ public final short getFileReplication() { } @Override // BlockCollection - public short getBlockReplication() { + public short getPreferredBlockReplication() { short max = getFileReplication(CURRENT_STATE_ID); FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature(); if (sf != null) { @@ -728,7 +728,7 @@ public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) { blocks = allBlocks; } - final short replication = getBlockReplication(); + final short replication = getPreferredBlockReplication(); for (BlockInfoContiguous b : blocks) { long blockSize = b.isComplete() ? b.getNumBytes() : getPreferredBlockSize(); @@ -850,10 +850,10 @@ void computeQuotaDeltaForTruncate( truncatedBytes -= bi.getNumBytes(); } - delta.addStorageSpace(-truncatedBytes * getBlockReplication()); + delta.addStorageSpace(-truncatedBytes * getPreferredBlockReplication()); if (bsps != null) { List types = bsps.chooseStorageTypes( - getBlockReplication()); + getPreferredBlockReplication()); for (StorageType t : types) { if (t.supportTypeQuota()) { delta.addTypeSpace(t, -truncatedBytes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 11e89c9abe..61f8fdb286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -257,15 +257,18 @@ public void blockIdCK(String blockId) { NumberReplicas numberReplicas= bm.countNodes(block); out.println("Block Id: " + blockId); out.println("Block belongs to: "+iNode.getFullPathName()); - out.println("No. of Expected Replica: " + bc.getBlockReplication()); + out.println("No. of Expected Replica: " + + bc.getPreferredBlockReplication()); out.println("No. of live Replica: " + numberReplicas.liveReplicas()); out.println("No. of excess Replica: " + numberReplicas.excessReplicas()); - out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes()); + out.println("No. of stale Replica: " + + numberReplicas.replicasOnStaleNodes()); out.println("No. of decommissioned Replica: " + numberReplicas.decommissioned()); out.println("No. of decommissioning Replica: " + numberReplicas.decommissioning()); - out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas()); + out.println("No. of corrupted Replica: " + + numberReplicas.corruptReplicas()); //record datanodes that have corrupted block replica Collection corruptionRecord = null; if (bm.getCorruptReplicas(block) != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java index 3bb549b22f..213c186e54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java @@ -151,11 +151,12 @@ public QuotaCounts updateQuotaAndCollectBlocks( long oldStoragespace; if (removed.snapshotINode != null) { short replication = removed.snapshotINode.getFileReplication(); - short currentRepl = file.getBlockReplication(); + short currentRepl = file.getPreferredBlockReplication(); if (replication > currentRepl) { long oldFileSizeNoRep = currentRepl == 0 ? file.computeFileSize(true, true) - : oldCounts.getStorageSpace() / file.getBlockReplication(); + : oldCounts.getStorageSpace() / + file.getPreferredBlockReplication(); oldStoragespace = oldFileSizeNoRep * replication; oldCounts.setStorageSpace(oldStoragespace); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 1e09e19ebb..58210c1ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -433,7 +433,7 @@ private List startDecommission(int ... indexes) { private BlockInfoContiguous addBlockOnNodes(long blockId, List nodes) { BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short)3).when(bc).getBlockReplication(); + Mockito.doReturn((short)3).when(bc).getPreferredBlockReplication(); BlockInfoContiguous blockInfo = blockOnNodes(blockId, nodes); bm.blocksMap.addBlockCollection(blockInfo, bc); @@ -740,7 +740,7 @@ private BlockInfoContiguous addBlockToBM(long blkId) { BlockInfoContiguous blockInfo = new BlockInfoContiguous(block, (short) 3); BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short) 3).when(bc).getBlockReplication(); + Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); bm.blocksMap.addBlockCollection(blockInfo, bc); return blockInfo; } @@ -750,7 +750,7 @@ private BlockInfoContiguous addUcBlockToBM(long blkId) { BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(block, (short) 3); BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short) 3).when(bc).getBlockReplication(); + Mockito.doReturn((short) 3).when(bc).getPreferredBlockReplication(); bm.blocksMap.addBlockCollection(blockInfo, bc); return blockInfo; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 1d6dad8638..f117ef7813 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1171,7 +1171,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() // queue. BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); BlockCollection bc = mock(BlockCollection.class); - when(bc.getBlockReplication()).thenReturn((short)1); + when(bc.getPreferredBlockReplication()).thenReturn((short)1); bm.addBlockCollection(info, bc); // Adding this block will increase its current replication, and that will @@ -1215,7 +1215,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() final BlockCollection mbc = mock(BlockCollection.class); when(mbc.getLastBlock()).thenReturn(info); when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1); - when(mbc.getBlockReplication()).thenReturn((short)1); + when(mbc.getPreferredBlockReplication()).thenReturn((short)1); when(mbc.isUnderConstruction()).thenReturn(true); ContentSummary cs = mock(ContentSummary.class); when(cs.getLength()).thenReturn((long)1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java index 831d65d516..1fc06282c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java @@ -70,7 +70,7 @@ public void testUpdateQuotaAndCollectBlocks() { // INode only exists in the snapshot INodeFile snapshotINode = mock(INodeFile.class); - when(file.getBlockReplication()).thenReturn(REPL_1); + when(file.getPreferredBlockReplication()).thenReturn(REPL_1); Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48); Whitebox.setInternalState(diff, "snapshotINode", snapshotINode); when(diff.getSnapshotINode()).thenReturn(snapshotINode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index a679183052..97a77ab4a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -841,12 +841,12 @@ private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr, } INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString()); - assertEquals(REPLICATION_1, nodeFile13.getBlockReplication()); + assertEquals(REPLICATION_1, nodeFile13.getPreferredBlockReplication()); TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir, blockmanager); INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString()); - assertEquals(REPLICATION_1, nodeFile12.getBlockReplication()); + assertEquals(REPLICATION_1, nodeFile12.getPreferredBlockReplication()); } /** Test deleting snapshots with modification on the metadata of directory */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java index 5264cb71b0..4eac634b9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java @@ -40,7 +40,7 @@ /** * This class tests the replication handling/calculation of snapshots. In * particular, {@link INodeFile#getFileReplication()} and - * {@link INodeFile#getBlockReplication()} are tested to make sure + * {@link INodeFile#getPreferredBlockReplication()} are tested to make sure * the number of replication is calculated correctly with/without snapshots. */ public class TestSnapshotReplication { @@ -81,7 +81,7 @@ public void tearDown() throws Exception { /** * Check the replication of a given file. We test both * {@link INodeFile#getFileReplication()} and - * {@link INodeFile#getBlockReplication()}. + * {@link INodeFile#getPreferredBlockReplication()}. * * @param file The given file * @param replication The expected replication number @@ -95,10 +95,11 @@ private void checkFileReplication(Path file, short replication, // INodeFile#getFileReplication(). short fileReplication = hdfs.getFileStatus(file1).getReplication(); assertEquals(replication, fileReplication); - // Check the correctness of getBlockReplication() + // Check the correctness of getPreferredBlockReplication() INode inode = fsdir.getINode(file1.toString()); assertTrue(inode instanceof INodeFile); - assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication()); + assertEquals(blockReplication, + ((INodeFile) inode).getPreferredBlockReplication()); } /** @@ -137,16 +138,19 @@ INodeFile getINodeFile(Path p) throws Exception { */ private void checkSnapshotFileReplication(Path currentFile, Map snapshotRepMap, short expectedBlockRep) throws Exception { - // First check the getBlockReplication for the INode of the currentFile + // First check the getPreferredBlockReplication for the INode of + // the currentFile final INodeFile inodeOfCurrentFile = getINodeFile(currentFile); - assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication()); + assertEquals(expectedBlockRep, + inodeOfCurrentFile.getPreferredBlockReplication()); // Then check replication for every snapshot for (Path ss : snapshotRepMap.keySet()) { final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true); final INodeFile ssInode = iip.getLastINode().asFile(); // The replication number derived from the - // INodeFileWithLink#getBlockReplication should always == expectedBlockRep - assertEquals(expectedBlockRep, ssInode.getBlockReplication()); + // INodeFileWithLink#getPreferredBlockReplication should + // always == expectedBlockRep + assertEquals(expectedBlockRep, ssInode.getPreferredBlockReplication()); // Also check the number derived from INodeFile#getFileReplication assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication(iip.getPathSnapshotId())); @@ -218,8 +222,9 @@ public void testReplicationAfterDeletion() throws Exception { for (Path ss : snapshotRepMap.keySet()) { final INodeFile ssInode = getINodeFile(ss); // The replication number derived from the - // INodeFileWithLink#getBlockReplication should always == expectedBlockRep - assertEquals(REPLICATION, ssInode.getBlockReplication()); + // INodeFileWithLink#getPreferredBlockReplication should + // always == expectedBlockRep + assertEquals(REPLICATION, ssInode.getPreferredBlockReplication()); // Also check the number derived from INodeFile#getFileReplication assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication());