From d2b3ba9b8fb76753fa1b51661dacbde74aa5c6df Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 24 Feb 2017 15:44:11 -0800 Subject: [PATCH] HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek. --- .../BlockPlacementPolicyDefault.java | 2 +- .../blockmanagement/DatanodeStorageInfo.java | 5 +++ .../TestReplicationPolicy.java | 35 +++++++++++++------ .../TestReplicationPolicyWithNodeGroup.java | 23 +++++++++--- 4 files changed, 49 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index eb546670ab..767633485e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -968,7 +968,7 @@ public DatanodeStorageInfo chooseReplicaToDelete( } final DatanodeDescriptor node = storage.getDatanodeDescriptor(); - long free = node.getRemaining(); + long free = storage.getRemaining(); long lastHeartbeat = node.getLastUpdateMonotonic(); if (lastHeartbeat < oldestHeartbeat) { oldestHeartbeat = lastHeartbeat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index b4c8aaae7c..ab666b7603 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -388,6 +388,11 @@ static DatanodeStorageInfo getDatanodeStorageInfo( return null; } + @VisibleForTesting + void setRemainingForTests(int remaining) { + this.remaining = remaining; + } + static enum AddBlockResult { ADDED, REPLACED, ALREADY_EXIST } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 1af013d641..27dcbf1856 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -950,24 +950,31 @@ public void testChooseReplicaToDelete() throws Exception { List replicaList = new ArrayList<>(); final Map> rackMap = new HashMap>(); - - dataNodes[0].setRemaining(4*1024*1024); + + storages[0].setRemainingForTests(4*1024*1024); + dataNodes[0].setRemaining(calculateRemaining(dataNodes[0])); replicaList.add(storages[0]); - - dataNodes[1].setRemaining(3*1024*1024); + + storages[1].setRemainingForTests(3*1024*1024); + dataNodes[1].setRemaining(calculateRemaining(dataNodes[1])); replicaList.add(storages[1]); - - dataNodes[2].setRemaining(2*1024*1024); + + storages[2].setRemainingForTests(2*1024*1024); + dataNodes[2].setRemaining(calculateRemaining(dataNodes[2])); replicaList.add(storages[2]); - - dataNodes[5].setRemaining(1*1024*1024); + + //Even if this node has the most space, because the storage[5] has + //the lowest it should be chosen in case of block delete. + storages[4].setRemainingForTests(100 * 1024 * 1024); + storages[5].setRemainingForTests(512 * 1024); + dataNodes[5].setRemaining(calculateRemaining(dataNodes[5])); replicaList.add(storages[5]); - + // Refresh the last update time for all the datanodes for (int i = 0; i < dataNodes.length; i++) { DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0); } - + List first = new ArrayList<>(); List second = new ArrayList<>(); replicator.splitNodesWithRack(replicaList, replicaList, rackMap, first, @@ -999,6 +1006,14 @@ public void testChooseReplicaToDelete() throws Exception { assertEquals(chosen, storages[1]); } + private long calculateRemaining(DatanodeDescriptor dataNode) { + long sum = 0; + for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){ + sum += storageInfo.getRemaining(); + } + return sum; + } + @Test public void testChooseReplicasToDelete() throws Exception { Collection nonExcess = new ArrayList<>(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 2f184bb143..ebd4b81497 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -625,16 +625,21 @@ public void testRereplicate3() throws Exception { public void testChooseReplicaToDelete() throws Exception { List replicaList = new ArrayList<>(); final Map> rackMap = new HashMap<>(); - dataNodes[0].setRemaining(4*1024*1024); + storages[0].setRemainingForTests(4*1024*1024); + dataNodes[0].setRemaining(calculateRemaining(dataNodes[0])); replicaList.add(storages[0]); - dataNodes[1].setRemaining(3*1024*1024); + storages[1].setRemainingForTests(3*1024*1024); + dataNodes[1].setRemaining(calculateRemaining(dataNodes[1])); replicaList.add(storages[1]); - dataNodes[2].setRemaining(2*1024*1024); + storages[2].setRemainingForTests(2*1024*1024); + dataNodes[2].setRemaining(calculateRemaining(dataNodes[2])); replicaList.add(storages[2]); - dataNodes[5].setRemaining(1*1024*1024); + storages[4].setRemainingForTests(100 * 1024 * 1024); + storages[5].setRemainingForTests(512 * 1024); + dataNodes[5].setRemaining(calculateRemaining(dataNodes[5])); replicaList.add(storages[5]); List first = new ArrayList<>(); @@ -671,7 +676,15 @@ public void testChooseReplicaToDelete() throws Exception { first, second, excessTypes, rackMap); assertEquals(chosen, storages[5]); } - + + private long calculateRemaining(DatanodeDescriptor dataNode) { + long sum = 0; + for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){ + sum += storageInfo.getRemaining(); + } + return sum; + } + /** * Test replica placement policy in case of boundary topology. * Rack 2 has only 1 node group & can't be placed with two replicas