HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.

This commit is contained in:
Rakesh Radhakrishnan 2017-06-09 14:03:13 +05:30 committed by Uma Maheswara Rao Gangumalla
parent d638a7dc03
commit 20f9c62336
2 changed files with 58 additions and 9 deletions

View File

@ -501,15 +501,20 @@ private boolean findSourceAndTargetToMove(
// avoid choosing a target which already has this block.
for (int i = 0; i < sourceWithStorageList.size(); i++) {
StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
existingTypeNodePair.dn, expected);
if (chosenTarget != null) {
sourceNodes.add(existingTypeNodePair.dn);
sourceStorageTypes.add(existingTypeNodePair.storageType);
targetNodes.add(chosenTarget.dn);
targetStorageTypes.add(chosenTarget.storageType);
expected.remove(chosenTarget.storageType);
// TODO: We can increment scheduled block count for this node?
// Check whether the block replica is already placed in the expected
// storage type in this source datanode.
if (!expected.contains(existingTypeNodePair.storageType)) {
StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
blockInfo, existingTypeNodePair.dn, expected);
if (chosenTarget != null) {
sourceNodes.add(existingTypeNodePair.dn);
sourceStorageTypes.add(existingTypeNodePair.storageType);
targetNodes.add(chosenTarget.dn);
targetStorageTypes.add(chosenTarget.storageType);
expected.remove(chosenTarget.storageType);
// TODO: We can increment scheduled block count for this node?
}
}
// To avoid choosing this excludeNodes as targets later
excludeNodes.add(existingTypeNodePair.dn);

View File

@ -763,6 +763,50 @@ public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception {
}
}
/**
* If replica with expected storage type already exist in source DN then that
* DN should be skipped.
*/
@Test(timeout = 300000)
public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
throws Exception {
StorageType[][] diskTypes = new StorageType[][] {
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE}};
try {
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
storagesPerDatanode, capacity);
dfs = hdfsCluster.getFileSystem();
// 1. Write two replica on disk
DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE,
(short) 2, 0);
// 2. Change policy to COLD, so third replica will be written to ARCHIVE.
dfs.setStoragePolicy(new Path(file), "COLD");
// 3.Change replication factor to 3.
dfs.setReplication(new Path(file), (short) 3);
DFSTestUtil
.waitExpectedStorageType(file, StorageType.DISK, 2, 30000, dfs);
DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000,
dfs);
// 4. Change policy to HOT, so we can move the all block to DISK.
dfs.setStoragePolicy(new Path(file), "HOT");
// 4. Satisfy the policy.
dfs.satisfyStoragePolicy(new Path(file));
// 5. Block should move successfully .
DFSTestUtil
.waitExpectedStorageType(file, StorageType.DISK, 3, 30000, dfs);
} finally {
shutdownCluster();
}
}
/**
* Tests that movements should not be assigned when there is no space in
* target DN.