diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1abbf22d42..fac4f44038 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -442,6 +442,9 @@ Branch-2 ( Unreleased changes ) HDFS-3871. Change NameNodeProxies to use RetryUtils. (Arun C Murthy via szetszwo) + HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy. + (Jing Zhao via szetszwo) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 01ee2a1222..7215aa5582 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -1315,8 +1315,9 @@ public DatanodeDescriptor[] chooseTarget(final String src, final HashMap excludedNodes, final long blocksize) throws IOException { // choose targets for the new block to be allocated. - final DatanodeDescriptor targets[] = blockplacement.chooseTarget( - src, numOfReplicas, client, excludedNodes, blocksize); + final DatanodeDescriptor targets[] = blockplacement.chooseTarget(src, + numOfReplicas, client, new ArrayList(), false, + excludedNodes, blocksize); if (targets.length < minReplication) { throw new IOException("File " + src + " could only be replicated to " + targets.length + " nodes instead of minReplication (=" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index e3317467bd..4243bcdc65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -70,21 +70,6 @@ abstract DatanodeDescriptor[] chooseTarget(String srcPath, List chosenNodes, long blocksize); - /** - * Same as - * {{@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, HashMap, long)} - * with returnChosenNodes equal to false. - */ - final DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, chosenNodes, false, - excludedNodes, blocksize); - } - /** * choose numOfReplicas data nodes for writer * to re-replicate a block with size blocksize @@ -131,7 +116,7 @@ DatanodeDescriptor[] chooseTarget(BlockCollection srcBC, HashMap excludedNodes, long blocksize) { return chooseTarget(srcBC.getName(), numOfReplicas, writer, - chosenNodes, excludedNodes, blocksize); + chosenNodes, false, excludedNodes, blocksize); } /** @@ -198,51 +183,6 @@ public static BlockPlacementPolicy getInstance(Configuration conf, replicator.initialize(conf, stats, clusterMap); return replicator; } - - /** - * choose numOfReplicas nodes for writer to replicate - * a block with size blocksize - * If not, return as many as we can. - * - * @param srcPath a string representation of the file for which chooseTarget is invoked - * @param numOfReplicas number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as targets - * and sorted as a pipeline. - */ - DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, - new ArrayList(), - blocksize); - } - - /** - * choose numOfReplicas nodes for writer to replicate - * a block with size blocksize - * If not, return as many as we can. - * - * @param srcPath a string representation of the file for which chooseTarget is invoked - * @param numOfReplicas number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param blocksize size of the data to be written. - * @param excludedNodes datanodes that should not be considered as targets. - * @return array of DatanodeDescriptor instances chosen as targets - * and sorted as a pipeline. - */ - public DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcPath, numOfReplicas, writer, - new ArrayList(), - excludedNodes, - blocksize); - } /** * Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 0ee13519f6..912dee1037 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -25,6 +25,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.EnumSet; import javax.servlet.ServletContext; @@ -163,8 +164,9 @@ static DatanodeInfo chooseDatanode(final NameNode namenode, final DatanodeDescriptor clientNode = bm.getDatanodeManager( ).getDatanodeByHost(getRemoteAddress()); if (clientNode != null) { - final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy( - ).chooseTarget(path, 1, clientNode, null, blocksize); + final DatanodeDescriptor[] datanodes = bm.getBlockPlacementPolicy() + .chooseTarget(path, 1, clientNode, + new ArrayList(), false, null, blocksize); if (datanodes.length > 0) { return datanodes[0]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ada74ce7fc..4d7356eb4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -111,30 +111,30 @@ public void testChooseTarget1() throws Exception { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + 2, dataNodes[0], new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -249,30 +249,30 @@ public void testChooseTarget3() throws Exception { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); for(int i=1; i<4; i++) { @@ -305,23 +305,23 @@ public void testChoooseTarget4() throws Exception { } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -350,21 +350,21 @@ public void testChooseTarget5() throws Exception { DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4"); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, writerDesc, BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - - targets = replicator.chooseTarget(filename, - 1, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 1, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); - - targets = replicator.chooseTarget(filename, - 2, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 2, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - - targets = replicator.chooseTarget(filename, - 3, writerDesc, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 3, writerDesc, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 5fbd44d5e1..d8efd3a029 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -114,31 +114,31 @@ public void testChooseTarget1() throws Exception { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameNodeGroup(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -220,30 +220,30 @@ public void testChooseTarget3() throws Exception { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 4, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 4, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isNodeGroupAware()); @@ -275,23 +275,23 @@ public void testChooseTarget4() throws Exception { } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 1, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 2, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 3, dataNodes[0], + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -313,21 +313,21 @@ public void testChooseTarget4() throws Exception { public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, NODE, BLOCK_SIZE); + targets = replicator.chooseTarget(filename, 0, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 0); - - targets = replicator.chooseTarget(filename, - 1, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 1, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 1); - - targets = replicator.chooseTarget(filename, - 2, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 2, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - - targets = replicator.chooseTarget(filename, - 3, NODE, BLOCK_SIZE); + + targets = replicator.chooseTarget(filename, 3, NODE, + new ArrayList(), BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1]));