diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bf51c6a6f7..2117c6d8e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -417,6 +417,8 @@ Release 2.0.0 - UNRELEASED HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs. (szetszwo) + HDFS-3339. Change INode to package private. (John George via szetszwo) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 52042b45e8..a812106413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -55,7 +55,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; -import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -2380,7 +2379,7 @@ public void removeStoredBlock(Block block, DatanodeDescriptor node) { // necessary. In that case, put block on a possibly-will- // be-replicated list. // - INode fileINode = blocksMap.getINode(block); + INodeFile fileINode = blocksMap.getINode(block); if (fileINode != null) { namesystem.decrementSafeBlockCount(block); updateNeededReplications(block, -1, 0); @@ -2612,7 +2611,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, NumberReplicas num) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = getReplication(block); - INode fileINode = blocksMap.getINode(block); + INodeFile fileINode = blocksMap.getINode(block); Iterator nodeIter = blocksMap.nodeIterator(block); StringBuilder nodeList = new StringBuilder(); while (nodeIter.hasNext()) { @@ -2663,7 +2662,7 @@ boolean isReplicationInProgress(DatanodeDescriptor srcNode) { final Iterator it = srcNode.getBlockIterator(); while(it.hasNext()) { final Block block = it.next(); - INode fileINode = blocksMap.getINode(block); + INodeFile fileINode = blocksMap.getINode(block); if (fileINode != null) { NumberReplicas num = countNodes(block); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index c4bc1cc464..cdad315f7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -20,6 +20,7 @@ import java.util.Arrays; import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; @@ -34,7 +35,8 @@ * This is a base INode class containing common fields for file and * directory inodes. */ -public abstract class INode implements Comparable, FSInodeInfo { +@InterfaceAudience.Private +abstract class INode implements Comparable, FSInodeInfo { /* * The inode name is in java UTF8 encoding; * The name in HdfsFileStatus should keep the same encoding as this.