From ad06a087131d69d173d8e03dce5c97650a530f2e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 15 Oct 2012 13:48:56 +0000 Subject: [PATCH] HDFS-4037. Rename the getReplication() method in BlockCollection to getBlockReplication(). git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1398288 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 7 +++++-- .../server/blockmanagement/BlockCollection.java | 4 +++- .../hdfs/server/blockmanagement/BlockInfo.java | 4 ++-- .../hdfs/server/blockmanagement/BlockManager.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/FSDirectory.java | 12 ++++++------ .../hadoop/hdfs/server/namenode/FSEditLog.java | 4 ++-- .../hdfs/server/namenode/FSEditLogLoader.java | 4 ++-- .../hdfs/server/namenode/FSImageSerialization.java | 4 ++-- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 14 +++++++------- .../hadoop/hdfs/server/namenode/INodeFile.java | 4 ++-- .../namenode/INodeFileUnderConstruction.java | 2 +- .../hdfs/server/namenode/NamenodeJspHelper.java | 2 +- .../server/blockmanagement/TestBlockManager.java | 2 +- .../hadoop/hdfs/server/namenode/TestINodeFile.java | 2 +- 14 files changed, 42 insertions(+), 37 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 870a256079..9778ad25ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -350,8 +350,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3939. NN RPC address cleanup. (eli) - HDFS-3373. Change DFSClient input stream socket cache to global static and add - a thread to cleanup expired cache entries. (John George via szetszwo) + HDFS-3373. Change DFSClient input stream socket cache to global static and + add a thread to cleanup expired cache entries. (John George via szetszwo) HDFS-3896. Add descriptions for dfs.namenode.rpc-address and dfs.namenode.servicerpc-address to hdfs-default.xml. (Jeff Lord via atm) @@ -382,6 +382,9 @@ Release 2.0.3-alpha - Unreleased HADOOP-8911. CRLF characters in source and text files. (Raja Aluri via suresh) + HDFS-4037. Rename the getReplication() method in BlockCollection to + getBlockReplication(). (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index e3eecadce0..f344833a0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -19,12 +19,14 @@ import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; /** * This interface is used by the block manager to expose a * few characteristics of a collection of Block/BlockUnderConstruction. */ +@InterfaceAudience.Private public interface BlockCollection { /** * Get the last block of the collection. @@ -56,7 +58,7 @@ public interface BlockCollection { * Get block replication for the collection * @return block replication value */ - public short getReplication(); + public short getBlockReplication(); /** * Get the name of the collection. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 0739aab970..e08af3dd40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -73,7 +73,7 @@ public BlockInfo(Block blk, int replication) { * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { - this(from, from.bc.getReplication()); + this(from, from.bc.getBlockReplication()); this.bc = from.bc; } @@ -335,7 +335,7 @@ public BlockInfoUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeDescriptor[] targets) { if(isComplete()) { return new BlockInfoUnderConstruction( - this, getBlockCollection().getReplication(), s, targets); + this, getBlockCollection().getBlockReplication(), s, targets); } // the block is already under construction BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index bd6a494b9f..9af8eb4cbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -997,7 +997,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason); - if (countNodes(b.stored).liveReplicas() >= bc.getReplication()) { + if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { @@ -1135,7 +1135,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // get a source data-node containingNodes = new ArrayList(); @@ -1221,7 +1221,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { neededReplications.decrementReplicationIndex(priority); continue; } - requiredReplication = bc.getReplication(); + requiredReplication = bc.getBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); @@ -2089,7 +2089,7 @@ private Block addStoredBlock(final BlockInfo block, } // handle underReplication/overReplication - short fileReplication = bc.getReplication(); + short fileReplication = bc.getBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedReplicas(), fileReplication); @@ -2228,7 +2228,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be @@ -2699,7 +2699,7 @@ void processOverReplicatedBlocksOnReCommission( while(it.hasNext()) { final Block block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); - short expectedReplication = bc.getReplication(); + short expectedReplication = bc.getBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { @@ -2845,7 +2845,7 @@ private int getReplication(Block block) { if (bc == null) { // block does not belong to any file return 0; } - return bc.getReplication(); + return bc.getBlockReplication(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index fb9f54d21b..21e98dc524 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -345,13 +345,13 @@ BlockInfo addBlock(String path, // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, - fileINode.getPreferredBlockSize()*fileINode.getReplication(), true); + fileINode.getPreferredBlockSize()*fileINode.getBlockReplication(), true); // associate new last block for the file BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( block, - fileINode.getReplication(), + fileINode.getBlockReplication(), BlockUCState.UNDER_CONSTRUCTION, targets); getBlockManager().addBlockCollection(blockInfo, fileINode); @@ -442,7 +442,7 @@ void unprotectedRemoveBlock(String path, INodeFileUnderConstruction fileNode, // update space consumed INode[] pathINodes = getExistingPathINodes(path); updateCount(pathINodes, pathINodes.length-1, 0, - -fileNode.getPreferredBlockSize()*fileNode.getReplication(), true); + -fileNode.getPreferredBlockSize()*fileNode.getBlockReplication(), true); } /** @@ -821,7 +821,7 @@ Block[] unprotectedSetReplication(String src, return null; } INodeFile fileNode = (INodeFile)inode; - final short oldRepl = fileNode.getReplication(); + final short oldRepl = fileNode.getBlockReplication(); // check disk quota long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl); @@ -2061,7 +2061,7 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node) { if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); } return new HdfsFileStatus( @@ -2091,7 +2091,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus( if (node instanceof INodeFile) { INodeFile fileNode = (INodeFile)node; size = fileNode.computeFileSize(true); - replication = fileNode.getReplication(); + replication = fileNode.getBlockReplication(); blocksize = fileNode.getPreferredBlockSize(); loc = getFSNamesystem().getBlockManager().createLocatedBlocks( fileNode.getBlocks(), fileNode.computeFileSize(false), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 2286e2ebb5..8f15d79384 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -657,7 +657,7 @@ private void printStatistics(boolean force) { public void logOpenFile(String path, INodeFileUnderConstruction newNode) { AddOp op = AddOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) @@ -675,7 +675,7 @@ public void logOpenFile(String path, INodeFileUnderConstruction newNode) { public void logCloseFile(String path, INodeFile newNode) { CloseOp op = CloseOp.getInstance(cache.get()) .setPath(path) - .setReplication(newNode.getReplication()) + .setReplication(newNode.getBlockReplication()) .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 5a874fc136..945164bf3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -592,13 +592,13 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoUnderConstruction( - newBlock, file.getReplication()); + newBlock, file.getBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. - newBI = new BlockInfo(newBlock, file.getReplication()); + newBI = new BlockInfo(newBlock, file.getBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index a8df0f706c..fc0d6556a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -126,7 +126,7 @@ static void writeINodeUnderConstruction(DataOutputStream out, String path) throws IOException { writeString(path, out); - out.writeShort(cons.getReplication()); + out.writeShort(cons.getBlockReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); int nrBlocks = cons.getBlocks().length; @@ -175,7 +175,7 @@ static void saveINode2Image(INode node, filePerm); } else { INodeFile fileINode = (INodeFile)node; - out.writeShort(fileINode.getReplication()); + out.writeShort(fileINode.getBlockReplication()); out.writeLong(fileINode.getModificationTime()); out.writeLong(fileINode.getAccessTime()); out.writeLong(fileINode.getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a907a57ff7..32d367879a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1411,7 +1411,7 @@ private void concatInternal(String target, String [] srcs) } si.add(trgInode); - short repl = trgInode.getReplication(); + short repl = trgInode.getBlockReplication(); // now check the srcs boolean endSrc = false; // final src file doesn't have to have full end block @@ -1431,10 +1431,10 @@ private void concatInternal(String target, String [] srcs) } // check replication and blocks size - if(repl != srcInode.getReplication()) { + if(repl != srcInode.getBlockReplication()) { throw new IllegalArgumentException(src + " and " + target + " " + "should have same replication: " - + repl + " vs. " + srcInode.getReplication()); + + repl + " vs. " + srcInode.getBlockReplication()); } //boolean endBlock=false; @@ -1877,7 +1877,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file, boolean writeToEditLog) throws IOException { INodeFileUnderConstruction cons = new INodeFileUnderConstruction( file.getLocalNameBytes(), - file.getReplication(), + file.getBlockReplication(), file.getModificationTime(), file.getPreferredBlockSize(), file.getBlocks(), @@ -2191,7 +2191,7 @@ LocatedBlock getAdditionalBlock(String src, fileLength = pendingFile.computeContentSummary().getLength(); blockSize = pendingFile.getPreferredBlockSize(); clientNode = pendingFile.getClientNode(); - replication = pendingFile.getReplication(); + replication = pendingFile.getBlockReplication(); } finally { writeUnlock(); } @@ -2435,7 +2435,7 @@ private boolean completeFileInternal(String src, * them into invalidateBlocks. */ private void checkReplicationFactor(INodeFile file) { - short numExpectedReplicas = file.getReplication(); + short numExpectedReplicas = file.getBlockReplication(); Block[] pendingBlocks = file.getBlocks(); int nrBlocks = pendingBlocks.length; for (int i = 0; i < nrBlocks; i++) { @@ -3154,7 +3154,7 @@ private void commitOrCompleteLastBlock(final INodeFileUnderConstruction fileINod if (diff > 0) { try { String path = leaseManager.findPath(fileINode); - dir.updateSpaceConsumed(path, 0, -diff * fileINode.getReplication()); + dir.updateSpaceConsumed(path, 0, -diff * fileINode.getBlockReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index f69c351110..26a0b6ed22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -71,7 +71,7 @@ boolean isDirectory() { /** @return the replication factor of the file. */ @Override - public short getReplication() { + public short getBlockReplication() { return (short) ((header & HEADERMASK) >> BLOCKBITS); } @@ -215,7 +215,7 @@ private long diskspaceConsumed(Block[] blkArr) { isUnderConstruction()) { size += getPreferredBlockSize() - blkArr[blkArr.length-1].getNumBytes(); } - return size * getReplication(); + return size * getBlockReplication(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java index 188050898b..aff956e3cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java @@ -104,7 +104,7 @@ assert allBlocksComplete() : "non-complete blocks! Blocks are: " + blocksAsString(); INodeFile obj = new INodeFile(getPermissionStatus(), getBlocks(), - getReplication(), + getBlockReplication(), getModificationTime(), getModificationTime(), getPreferredBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 07f3f5ff77..348f8dae6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -834,7 +834,7 @@ public void toXML(XMLOutputter doc) throws IOException { doc.endTag(); doc.startTag("replication"); - doc.pcdata(""+inode.getReplication()); + doc.pcdata(""+inode.getBlockReplication()); doc.endTag(); doc.startTag("disk_space_consumed"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 07682fe9b2..22bf9b146b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -379,7 +379,7 @@ private List startDecommission(int ... indexes) { private BlockInfo addBlockOnNodes(long blockId, List nodes) { BlockCollection bc = Mockito.mock(BlockCollection.class); - Mockito.doReturn((short)3).when(bc).getReplication(); + Mockito.doReturn((short)3).when(bc).getBlockReplication(); BlockInfo blockInfo = blockOnNodes(blockId, nodes); bm.blocksMap.addBlockCollection(blockInfo, bc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 346844da73..bb802185ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -48,7 +48,7 @@ public void testReplication () { FsPermission.getDefault()), null, replication, 0L, 0L, preferredBlockSize); assertEquals("True has to be returned in this case", replication, - inf.getReplication()); + inf.getBlockReplication()); } /**