HDFS-16420. Avoid deleting unique data blocks when deleting redundancy striped blocks. (#3880)

Reviewed-by: litao <tomleescut@gmail.com>
Signed-off-by: Takanobu Asanuma <tasanuma@apache.org>
(cherry picked from commit d8862822d2)
This commit is contained in:
Jackson Wang 2022-01-14 21:38:11 +08:00 committed by Takanobu Asanuma
parent 6649c2813e
commit 926222a0d0
4 changed files with 72 additions and 2 deletions

View File

@ -756,6 +756,11 @@ public BlockPlacementPolicy getBlockPlacementPolicy() {
return placementPolicies.getPolicy(CONTIGUOUS); return placementPolicies.getPolicy(CONTIGUOUS);
} }
@VisibleForTesting
public BlockPlacementPolicy getStriptedBlockPlacementPolicy() {
return placementPolicies.getPolicy(STRIPED);
}
public void refreshBlockPlacementPolicy(Configuration conf) { public void refreshBlockPlacementPolicy(Configuration conf) {
BlockPlacementPolicies bpp = BlockPlacementPolicies bpp =
new BlockPlacementPolicies(conf, datanodeManager.getFSClusterStats(), new BlockPlacementPolicies(conf, datanodeManager.getFSClusterStats(),

View File

@ -196,9 +196,10 @@ public void adjustSetsWithChosenReplica(
if (moreThanOne.remove(cur)) { if (moreThanOne.remove(cur)) {
if (storages.size() == 1) { if (storages.size() == 1) {
final DatanodeStorageInfo remaining = storages.get(0); final DatanodeStorageInfo remaining = storages.get(0);
moreThanOne.remove(remaining); if (moreThanOne.remove(remaining)) {
exactlyOne.add(remaining); exactlyOne.add(remaining);
} }
}
} else { } else {
exactlyOne.remove(cur); exactlyOne.remove(cur);
} }

View File

@ -50,6 +50,7 @@ abstract public class BaseReplicationPolicyTest {
protected NameNode namenode; protected NameNode namenode;
protected DatanodeManager dnManager; protected DatanodeManager dnManager;
protected BlockPlacementPolicy replicator; protected BlockPlacementPolicy replicator;
private BlockPlacementPolicy striptedPolicy;
protected final String filename = "/dummyfile.txt"; protected final String filename = "/dummyfile.txt";
protected DatanodeStorageInfo[] storages; protected DatanodeStorageInfo[] storages;
protected String blockPlacementPolicy; protected String blockPlacementPolicy;
@ -90,6 +91,7 @@ public void setupCluster() throws Exception {
final BlockManager bm = namenode.getNamesystem().getBlockManager(); final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy(); replicator = bm.getBlockPlacementPolicy();
striptedPolicy = bm.getStriptedBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology(); cluster = bm.getDatanodeManager().getNetworkTopology();
dnManager = bm.getDatanodeManager(); dnManager = bm.getDatanodeManager();
// construct network topology // construct network topology
@ -111,6 +113,10 @@ void updateHeartbeatWithUsage() {
} }
} }
public BlockPlacementPolicy getStriptedPolicy() {
return striptedPolicy;
}
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
namenode.stop(); namenode.stop();

View File

@ -1018,6 +1018,64 @@ public void testChooseReplicaToDelete() throws Exception {
assertEquals(chosen, storages[1]); assertEquals(chosen, storages[1]);
} }
/**
* Test for the chooseReplicaToDelete are processed based on
* EC and STRIPED Policy.
*/
@Test
public void testStripedChooseReplicaToDelete() throws Exception {
List<DatanodeStorageInfo> replicaList = new ArrayList<>();
List<DatanodeStorageInfo> candidate = new ArrayList<>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
replicaList.add(storages[0]);
replicaList.add(storages[1]);
replicaList.add(storages[2]);
replicaList.add(storages[4]);
candidate.add(storages[0]);
candidate.add(storages[2]);
candidate.add(storages[4]);
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
}
List<DatanodeStorageInfo> first = new ArrayList<>();
List<DatanodeStorageInfo> second = new ArrayList<>();
BlockPlacementPolicy policy = getStriptedPolicy();
policy.splitNodesWithRack(replicaList, candidate, rackMap, first,
second);
// storages[0] is in first set as its rack has two replica nodes,
// while storages[2] and dataNodes[4] are in second set.
assertEquals(1, first.size());
assertEquals(2, second.size());
List<StorageType> excessTypes = new ArrayList<>();
excessTypes.add(StorageType.DEFAULT);
DatanodeStorageInfo chosen = ((BlockPlacementPolicyDefault) policy)
.chooseReplicaToDelete(first, second, excessTypes, rackMap);
// Within all storages, storages[0] is in the rack that has two replica blocks
assertEquals(chosen, storages[0]);
policy.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, storages[2] should be next to be deleted in order.
excessTypes.add(StorageType.DEFAULT);
chosen = ((BlockPlacementPolicyDefault) policy).chooseReplicaToDelete(
first, second, excessTypes, rackMap);
assertEquals(chosen, storages[2]);
policy.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(1, second.size());
chosen = ((BlockPlacementPolicyDefault) policy).chooseReplicaToDelete(
first, second, excessTypes, rackMap);
assertEquals(chosen, null);
}
private long calculateRemaining(DatanodeDescriptor dataNode) { private long calculateRemaining(DatanodeDescriptor dataNode) {
long sum = 0; long sum = 0;
for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){ for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){