diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 298918b38c..62f1a507fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -1219,10 +1219,28 @@ void generateInputs(int[] ignore) throws IOException { private ExtendedBlock addBlocks(String fileName, String clientName) throws IOException { + DatanodeInfo[] excludeNodes = null; + DatanodeInfo[] dnInfos = clientProto.getDatanodeReport( + HdfsConstants.DatanodeReportType.LIVE); + if (dnInfos != null && dnInfos.length > 0) { + List tmpNodes = new ArrayList<>(); + String localHost = DNS.getDefaultHost("default", "default"); + for (DatanodeInfo dnInfo : dnInfos) { + if (!localHost.equals(dnInfo.getHostName()) || + (dnInfo.getXferPort() > datanodes.length)) { + tmpNodes.add(dnInfo); + } + } + + if (tmpNodes.size() > 0) { + excludeNodes = tmpNodes.toArray(new DatanodeInfo[tmpNodes.size()]); + } + } + ExtendedBlock prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { LocatedBlock loc = addBlock(fileName, clientName, - prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null); + prevBlock, excludeNodes, HdfsConstants.GRANDFATHER_INODE_ID, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = dnInfo.getXferPort() - 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 44bf5b7878..f7a8d92864 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -166,4 +166,25 @@ public void testNNThroughputForAppendOp() throws Exception { } } } + + /** + * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster + * for block report operation. + */ + @Test(timeout = 120000) + public void testNNThroughputForBlockReportOp() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(3).build()) { + cluster.waitActive(); + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + NNThroughputBenchmark.runBenchmark(benchConf, + new String[]{"-fs", cluster.getURI().toString(), "-op", + "blockReport", "-datanodes", "3", "-reports", "2"}); + } + } }