diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 75845824bd..59a130e33a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -23,6 +23,9 @@ HDFS-6584: Archival Storage HDFS-6847. Support storage policy on directories and include storage policy in HdfsFileStatus. (Jing Zhao via szetszwo) + HDFS-7072. Fix TestBlockManager and TestStorageMover. (Jing Zhao + via szetszwo) + Trunk (Unreleased) INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index c363f4b297..58ca2ace25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -109,7 +109,7 @@ public class DatanodeStorageInfo { private long capacity; private long dfsUsed; - private long remaining; + private volatile long remaining; private long blockPoolUsed; private volatile BlockInfo blockList = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 9b980dc7f1..7c0623cd46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -666,6 +666,7 @@ public class TestBlockManager { excessTypes.add(StorageType.DEFAULT); Assert.assertTrue(BlockManager.useDelHint(true, delHint, null, moreThan1Racks, excessTypes)); + excessTypes.remove(0); excessTypes.add(StorageType.SSD); Assert.assertFalse(BlockManager.useDelHint(true, delHint, null, moreThan1Racks, excessTypes)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index ceedfc2881..eeea62b5b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -588,6 +588,20 @@ public class TestStorageMover { } } + private void waitForAllReplicas(int expectedReplicaNum, Path file, + DistributedFileSystem dfs) throws Exception { + for (int i = 0; i < 5; i++) { + LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, + BLOCK_SIZE); + LocatedBlock lb = lbs.get(0); + if (lb.getLocations().length >= expectedReplicaNum) { + return; + } else { + Thread.sleep(1000); + } + } + } + /** * Test DISK is running out of spaces. */ @@ -618,6 +632,7 @@ public class TestStorageMover { for (; ; hotFileCount++) { final Path p = new Path(pathPolicyMap.hot, "file" + hotFileCount); DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L); + waitForAllReplicas(replication, p, test.dfs); } } catch (IOException e) { LOG.info("Expected: hotFileCount=" + hotFileCount, e); @@ -632,6 +647,7 @@ public class TestStorageMover { for (; ; hotFileCount_r1++) { final Path p = new Path(pathPolicyMap.hot, "file_r1_" + hotFileCount_r1); DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 1, 0L); + waitForAllReplicas(1, p, test.dfs); } } catch (IOException e) { LOG.info("Expected: hotFileCount_r1=" + hotFileCount_r1, e); @@ -699,6 +715,7 @@ public class TestStorageMover { for (; ; coldFileCount++) { final Path p = new Path(pathPolicyMap.cold, "file" + coldFileCount); DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L); + waitForAllReplicas(replication, p, test.dfs); } } catch (IOException e) { LOG.info("Expected: coldFileCount=" + coldFileCount, e); @@ -713,6 +730,7 @@ public class TestStorageMover { for (; ; coldFileCount_r1++) { final Path p = new Path(pathPolicyMap.cold, "file_r1_" + coldFileCount_r1); DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 1, 0L); + waitForAllReplicas(1, p, test.dfs); } } catch (IOException e) { LOG.info("Expected: coldFileCount_r1=" + coldFileCount_r1, e);