From 20f9c623360d8ec534f8ddb0b993b4363a359e89 Mon Sep 17 00:00:00 2001 From: Rakesh Radhakrishnan Date: Fri, 9 Jun 2017 14:03:13 +0530 Subject: [PATCH] HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore. --- .../namenode/StoragePolicySatisfier.java | 23 ++++++---- .../namenode/TestStoragePolicySatisfier.java | 44 +++++++++++++++++++ 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index 9e2a4a0962..1b2afa374a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -501,15 +501,20 @@ private boolean findSourceAndTargetToMove( // avoid choosing a target which already has this block. for (int i = 0; i < sourceWithStorageList.size(); i++) { StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i); - StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo, - existingTypeNodePair.dn, expected); - if (chosenTarget != null) { - sourceNodes.add(existingTypeNodePair.dn); - sourceStorageTypes.add(existingTypeNodePair.storageType); - targetNodes.add(chosenTarget.dn); - targetStorageTypes.add(chosenTarget.storageType); - expected.remove(chosenTarget.storageType); - // TODO: We can increment scheduled block count for this node? + + // Check whether the block replica is already placed in the expected + // storage type in this source datanode. + if (!expected.contains(existingTypeNodePair.storageType)) { + StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode( + blockInfo, existingTypeNodePair.dn, expected); + if (chosenTarget != null) { + sourceNodes.add(existingTypeNodePair.dn); + sourceStorageTypes.add(existingTypeNodePair.storageType); + targetNodes.add(chosenTarget.dn); + targetStorageTypes.add(chosenTarget.storageType); + expected.remove(chosenTarget.storageType); + // TODO: We can increment scheduled block count for this node? + } } // To avoid choosing this excludeNodes as targets later excludeNodes.add(existingTypeNodePair.dn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 8e08a1ed88..f1a4169603 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -763,6 +763,50 @@ public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception { } } + /** + * If replica with expected storage type already exist in source DN then that + * DN should be skipped. + */ + @Test(timeout = 300000) + public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource() + throws Exception { + StorageType[][] diskTypes = new StorageType[][] { + {StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.DISK, StorageType.ARCHIVE}}; + + try { + hdfsCluster = startCluster(config, diskTypes, diskTypes.length, + storagesPerDatanode, capacity); + dfs = hdfsCluster.getFileSystem(); + // 1. Write two replica on disk + DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE, + (short) 2, 0); + // 2. Change policy to COLD, so third replica will be written to ARCHIVE. + dfs.setStoragePolicy(new Path(file), "COLD"); + + // 3.Change replication factor to 3. + dfs.setReplication(new Path(file), (short) 3); + + DFSTestUtil + .waitExpectedStorageType(file, StorageType.DISK, 2, 30000, dfs); + DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, + dfs); + + // 4. Change policy to HOT, so we can move the all block to DISK. + dfs.setStoragePolicy(new Path(file), "HOT"); + + // 4. Satisfy the policy. + dfs.satisfyStoragePolicy(new Path(file)); + + // 5. Block should move successfully . + DFSTestUtil + .waitExpectedStorageType(file, StorageType.DISK, 3, 30000, dfs); + } finally { + shutdownCluster(); + } + } + /** * Tests that movements should not be assigned when there is no space in * target DN.