From 4b4200f1f87ad40d9c19ba160f706ffd0470a8d4 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Mon, 29 Apr 2019 14:49:35 -0700 Subject: [PATCH] HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq. --- .../fsdataset/impl/FsDatasetImpl.java | 2 +- .../datanode/fsdataset/impl/ReplicaMap.java | 14 ++++ .../datanode/TestDataNodeHotSwapVolumes.java | 68 +++++++++++++++++++ .../fsdataset/impl/TestReplicaMap.java | 22 ++++++ 4 files changed, 105 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 6fe0d0f9ac..9cfdbc3253 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -419,7 +419,7 @@ private void activateVolume( LOG.error(errorMsg); throw new IOException(errorMsg); } - volumeMap.addAll(replicaMap); + volumeMap.mergeAll(replicaMap); storageMap.put(sd.getStorageUuid(), new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index 73d3c600b9..786078f135 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -166,6 +166,20 @@ ReplicaInfo addAndGet(String bpid, ReplicaInfo replicaInfo) { void addAll(ReplicaMap other) { map.putAll(other.map); } + + + /** + * Merge all entries from the given replica map into the local replica map. + */ + void mergeAll(ReplicaMap other) { + other.map.forEach( + (bp, replicaInfos) -> { + replicaInfos.forEach( + replicaInfo -> add(bp, replicaInfo) + ); + } + ); + } /** * Remove the replica's meta information from the map that matches diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java index f34ac51ce6..6672cd2375 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java @@ -420,6 +420,74 @@ public void testAddOneNewVolume() verifyFileLength(cluster.getFileSystem(), testFile, numBlocks); } + /** + * Test re-adding one volume with some blocks on a running MiniDFSCluster + * with only one NameNode to reproduce HDFS-13677. + */ + @Test(timeout=60000) + public void testReAddVolumeWithBlocks() + throws IOException, ReconfigurationException, + InterruptedException, TimeoutException { + startDFSCluster(1, 1); + String bpid = cluster.getNamesystem().getBlockPoolId(); + final int numBlocks = 10; + + Path testFile = new Path("/test"); + createFile(testFile, numBlocks); + + List> blockReports = + cluster.getAllBlockReports(bpid); + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(2, blockReports.get(0).size()); // 2 volumes + + // Now remove the second volume + DataNode dn = cluster.getDataNodes().get(0); + Collection oldDirs = getDataDirs(dn); + String newDirs = oldDirs.iterator().next(); // Keep the first volume. + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + assertFileLocksReleased( + new ArrayList(oldDirs).subList(1, oldDirs.size())); + + // Now create another file - the first volume should have 15 blocks + // and 5 blocks on the previously removed volume + createFile(new Path("/test2"), numBlocks); + dn.scheduleAllBlockReport(0); + blockReports = cluster.getAllBlockReports(bpid); + + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(1, blockReports.get(0).size()); // 1 volume + for (BlockListAsLongs blockList : blockReports.get(0).values()) { + assertEquals(15, blockList.getNumberOfBlocks()); + } + + // Now add the original volume back again and ensure 15 blocks are reported + assertThat( + "DN did not update its own config", + dn.reconfigurePropertyImpl( + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, String.join(",", oldDirs)), + is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY))); + dn.scheduleAllBlockReport(0); + blockReports = cluster.getAllBlockReports(bpid); + + assertEquals(1, blockReports.size()); // 1 DataNode + assertEquals(2, blockReports.get(0).size()); // 2 volumes + + // The order of the block reports is not guaranteed. As we expect 2, get the + // max block count and the min block count and then assert on that. + int minNumBlocks = Integer.MAX_VALUE; + int maxNumBlocks = Integer.MIN_VALUE; + for (BlockListAsLongs blockList : blockReports.get(0).values()) { + minNumBlocks = Math.min(minNumBlocks, blockList.getNumberOfBlocks()); + maxNumBlocks = Math.max(maxNumBlocks, blockList.getNumberOfBlocks()); + } + assertEquals(5, minNumBlocks); + assertEquals(15, maxNumBlocks); + } + @Test(timeout=60000) public void testAddVolumesDuringWrite() throws IOException, InterruptedException, TimeoutException, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java index 4fa91b040e..1059c08079 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReplicaMap.java @@ -108,4 +108,26 @@ public void testRemove() { map.add(bpid, new FinalizedReplica(block, null, null)); assertNotNull(map.remove(bpid, block.getBlockId())); } + + @Test + public void testMergeAll() { + ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock()); + Block tmpBlock = new Block(5678, 5678, 5678); + temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null)); + + map.mergeAll(temReplicaMap); + assertNotNull(map.get(bpid, 1234)); + assertNotNull(map.get(bpid, 5678)); + } + + @Test + public void testAddAll() { + ReplicaMap temReplicaMap = new ReplicaMap(new AutoCloseableLock()); + Block tmpBlock = new Block(5678, 5678, 5678); + temReplicaMap.add(bpid, new FinalizedReplica(tmpBlock, null, null)); + + map.addAll(temReplicaMap); + assertNull(map.get(bpid, 1234)); + assertNotNull(map.get(bpid, 5678)); + } }