From 8a6d5b9151cd4a922372835b18a9a031c9d3475e Mon Sep 17 00:00:00 2001 From: Hui Fei Date: Wed, 28 Oct 2020 09:13:25 +0800 Subject: [PATCH] HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416) --- .../namenode/NNThroughputBenchmark.java | 41 +++++++++++++------ .../namenode/TestNNThroughputBenchmark.java | 8 +++- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 245f5be39c..513c6094c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -169,6 +169,7 @@ abstract class OperationStatsBase { protected final String baseDir; protected short replication; + protected int blockSize; protected int numThreads = 0; // number of threads protected int numOpsRequired = 0; // number of operations requested protected int numOpsExecuted = 0; // number of operations executed @@ -230,6 +231,7 @@ abstract class OperationStatsBase { OperationStatsBase() { baseDir = BASE_DIR_NAME + "/" + getOpName(); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); + blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); numOpsRequired = 10; numThreads = 3; logLevel = Level.ERROR; @@ -517,7 +519,8 @@ class CreateFileStats extends OperationStatsBase { // Operation types static final String OP_CREATE_NAME = "create"; static final String OP_CREATE_USAGE = - "-op create [-threads T] [-files N] [-filesPerDir P] [-close]"; + "-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-close]"; protected FileNameGenerator nameGenerator; protected String[][] fileNames; @@ -542,6 +545,9 @@ void parseArguments(List args) { if(args.get(i).equals("-files")) { if(i+1 == args.size()) printUsage(); numOpsRequired = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-threads")) { if(i+1 == args.size()) printUsage(); numThreads = Integer.parseInt(args.get(++i)); @@ -598,7 +604,7 @@ long executeOp(int daemonId, int inputIdx, String clientName) FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, - replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null, + replication, blockSize, CryptoProtocolVersion.supported(), null, null); long end = Time.now(); for (boolean written = !closeUponCreate; !written; @@ -720,7 +726,8 @@ class OpenFileStats extends CreateFileStats { // Operation types static final String OP_OPEN_NAME = "open"; static final String OP_USAGE_ARGS = - " [-threads T] [-files N] [-filesPerDir P] [-useExisting]"; + " [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" + + " [-useExisting]"; static final String OP_OPEN_USAGE = "-op " + OP_OPEN_NAME + OP_USAGE_ARGS; @@ -752,6 +759,7 @@ void generateInputs(int[] opsPerThread) throws IOException { "-op", "create", "-threads", String.valueOf(this.numThreads), "-files", String.valueOf(numOpsRequired), + "-blockSize", String.valueOf(blockSize), "-filesPerDir", String.valueOf(nameGenerator.getFilesPerDirectory()), "-close"}; @@ -782,7 +790,8 @@ void generateInputs(int[] opsPerThread) throws IOException { long executeOp(int daemonId, int inputIdx, String ignore) throws IOException { long start = Time.now(); - clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE); + clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, + blockSize); long end = Time.now(); return end-start; } @@ -1072,7 +1081,7 @@ class BlockReportStats extends OperationStatsBase { static final String OP_BLOCK_REPORT_NAME = "blockReport"; static final String OP_BLOCK_REPORT_USAGE = "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F]"; + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S]"; private int blocksPerReport; private int blocksPerFile; @@ -1119,6 +1128,9 @@ void parseArguments(List args) { } else if(args.get(i).equals("-blocksPerFile")) { if(i+1 == args.size()) printUsage(); blocksPerFile = Integer.parseInt(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1149,7 +1161,7 @@ void generateInputs(int[] ignore) throws IOException { String fileName = nameGenerator.getNextFileName("ThroughputBench"); clientProto.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, - BLOCK_SIZE, CryptoProtocolVersion.supported(), null, null); + blockSize, CryptoProtocolVersion.supported(), null, null); ExtendedBlock lastBlock = addBlocks(fileName, clientName); clientProto.complete(fileName, clientName, lastBlock, HdfsConstants.GRANDFATHER_INODE_ID); } @@ -1260,8 +1272,9 @@ void printResults() { class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_NAME = "replication"; static final String OP_REPLICATION_USAGE = - "-op replication [-datanodes T] [-nodesToDecommission D] " + - "[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]"; + "-op replication [-datanodes T] [-nodesToDecommission D] " + + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " + + "[-replication R]"; private final BlockReportStats blockReportObject; private int numDatanodes; @@ -1286,10 +1299,11 @@ class ReplicationStats extends OperationStatsBase { / (numDatanodes*numDatanodes); String[] blkReportArgs = { - "-op", "blockReport", - "-datanodes", String.valueOf(numDatanodes), - "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), - "-blocksPerFile", String.valueOf(numDatanodes)}; + "-op", "blockReport", + "-datanodes", String.valueOf(numDatanodes), + "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), + "-blocksPerFile", String.valueOf(numDatanodes), + "-blockSize", String.valueOf(blockSize)}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1319,6 +1333,9 @@ void parseArguments(List args) { } else if(args.get(i).equals("-replication")) { if(i+1 == args.size()) printUsage(); replication = Short.parseShort(args.get(++i)); + } else if (args.get(i).equals("-blockSize")) { + if(i+1 == args.size()) printUsage(); + blockSize = Integer.parseInt(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 9f1ebd122c..ec0d6df232 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -49,6 +49,7 @@ public void cleanUp() { @Test public void testNNThroughput() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -63,6 +64,7 @@ public void testNNThroughput() throws Exception { @Test(timeout = 120000) public void testNNThroughputWithFsOption() throws Exception { Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); @@ -84,6 +86,7 @@ public void testNNThroughputAgainstRemoteNN() throws Exception { cluster.waitActive(); final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); FileSystem.setDefaultUri(benchConf, cluster.getURI()); NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-op", "all"}); } finally { @@ -101,12 +104,15 @@ public void testNNThroughputAgainstRemoteNN() throws Exception { public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); - NNThroughputBenchmark.runBenchmark(new HdfsConfiguration(), + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-fs", cluster.getURI().toString(), "-op", "all"}); } finally { if (cluster != null) {