From 4b2c442d4e34f4708fa2ca442208427ca10798c1 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 25 Aug 2017 10:41:52 -0700 Subject: [PATCH] HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula. --- .../server/datanode/DirectoryScanner.java | 5 +- .../server/datanode/TestDirectoryScanner.java | 60 +++++++++++++++++-- 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 18188dddbc..966bcb0aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -606,12 +606,11 @@ public class DirectoryScanner implements Runnable { public ScanInfoPerBlockPool call() throws IOException { String[] bpList = volume.getBlockPoolList(); ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length); + perfTimer.start(); + throttleTimer.start(); for (String bpid : bpList) { LinkedList report = new LinkedList<>(); - perfTimer.start(); - throttleTimer.start(); - try { result.put(bpid, volume.compileReport(bpid, report, this)); } catch (InterruptedException ex) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 956406daba..c95c71bf59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics; import org.apache.hadoop.util.AutoCloseableLock; @@ -316,15 +317,22 @@ public class TestDirectoryScanner { missingMemoryBlocks, mismatchBlocks, 0); } - private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile, - long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException { + private void scan(long totalBlocks, int diffsize, long missingMetaFile, + long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks, + long duplicateBlocks) throws IOException { scanner.reconcile(); - + verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile, + missingMemoryBlocks, mismatchBlocks, duplicateBlocks); + } + + private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile, + long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks, + long duplicateBlocks) { assertTrue(scanner.diffs.containsKey(bpid)); LinkedList diff = scanner.diffs.get(bpid); assertTrue(scanner.stats.containsKey(bpid)); DirectoryScanner.Stats stats = scanner.stats.get(bpid); - + assertEquals(diffsize, diff.size()); assertEquals(totalBlocks, stats.totalBlocks); assertEquals(missingMetaFile, stats.missingMetaFile); @@ -1035,4 +1043,48 @@ public class TestDirectoryScanner { cluster.shutdown(); } } + + @Test + public void testDirectoryScannerInFederatedCluster() throws Exception { + //Create Federated cluster with two nameservices and one DN + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF) + .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2)) + .numDataNodes(1).build()) { + cluster.waitActive(); + cluster.transitionToActive(1); + cluster.transitionToActive(3); + DataNode dataNode = cluster.getDataNodes().get(0); + fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0)); + //Create one block in first nameservice + FileSystem fs = cluster.getFileSystem(1); + int bp1Files = 1; + writeFile(fs, bp1Files); + //Create two blocks in second nameservice + FileSystem fs2 = cluster.getFileSystem(3); + int bp2Files = 2; + writeFile(fs2, bp2Files); + //Call the Directory scanner + scanner = new DirectoryScanner(dataNode, fds, CONF); + scanner.setRetainDiffs(true); + scanner.reconcile(); + //Check blocks in corresponding BP + bpid = cluster.getNamesystem(1).getBlockPoolId(); + verifyStats(bp1Files, 0, 0, 0, 0, 0, 0); + bpid = cluster.getNamesystem(3).getBlockPoolId(); + verifyStats(bp2Files, 0, 0, 0, 0, 0, 0); + } finally { + if (scanner != null) { + scanner.shutdown(); + scanner = null; + } + } + } + + private void writeFile(FileSystem fs, int numFiles) throws IOException { + final String fileName = "/" + GenericTestUtils.getMethodName(); + final Path filePath = new Path(fileName); + for (int i = 0; i < numFiles; i++) { + DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0); + } + } }