From 6ed7670a93da26d475ce1b917abb45ce902c0627 Mon Sep 17 00:00:00 2001 From: He Xiaoqiao Date: Sat, 17 Jul 2021 21:57:00 +0800 Subject: [PATCH] HDFS-16067. Support Append API in NNThroughputBenchmark. Contributed by Renukaprasad C. --- .../src/site/markdown/Benchmarking.md | 1 + .../namenode/NNThroughputBenchmark.java | 52 +++++++++++++++++++ .../namenode/TestNNThroughputBenchmark.java | 46 ++++++++++++++++ 3 files changed, 99 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md index ebd7086a99..26d5db37d6 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md @@ -58,6 +58,7 @@ Following are all the operations supported along with their respective operation |`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] | |`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | |`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | +|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] | |`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | |`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | |`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] | diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index e861a34fc0..7f6d572fce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -826,6 +826,53 @@ long executeOp(int daemonId, int inputIdx, String ignore) } } + /** + * Append file statistics. + * Measure how many append calls the name-node can handle per second. + */ + class AppendFileStats extends OpenFileStats { + // Operation types + static final String OP_APPEND_NAME = "append"; + public static final String APPEND_NEW_BLK = "-appendNewBlk"; + static final String OP_APPEND_USAGE = + "-op " + OP_APPEND_NAME + OP_USAGE_ARGS + " [" + APPEND_NEW_BLK + ']'; + private boolean appendNewBlk = false; + + AppendFileStats(List args) { + super(args); + } + + @Override + String getOpName() { + return OP_APPEND_NAME; + } + + @Override + void parseArguments(List args) { + appendNewBlk = args.contains(APPEND_NEW_BLK); + if (this.appendNewBlk) { + args.remove(APPEND_NEW_BLK); + } + super.parseArguments(args); + } + + @Override + long executeOp(int daemonId, int inputIdx, String ignore) + throws IOException { + long start = Time.now(); + String src = fileNames[daemonId][inputIdx]; + EnumSetWritable enumSet = null; + if (appendNewBlk) { + enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.NEW_BLOCK)); + } else { + enumSet = new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)); + } + clientProto.append(src, "TestClient", enumSet); + long end = Time.now(); + return end - start; + } + } + /** * List file status statistics. * @@ -1434,6 +1481,7 @@ static void printUsage() { + " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE + " | \n\t" + OpenFileStats.OP_OPEN_USAGE + " | \n\t" + DeleteFileStats.OP_DELETE_USAGE + + " | \n\t" + AppendFileStats.OP_APPEND_USAGE + " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE + " | \n\t" + RenameFileStats.OP_RENAME_USAGE + " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE @@ -1496,6 +1544,10 @@ public int run(String[] aArgs) throws Exception { opStat = new DeleteFileStats(args); ops.add(opStat); } + if (runAll || AppendFileStats.OP_APPEND_NAME.equals(type)) { + opStat = new AppendFileStats(args); + ops.add(opStat); + } if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) { opStat = new FileStatusStats(args); ops.add(opStat); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index ec0d6df232..44bf5b7878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -26,8 +26,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.util.ExitUtil; import org.junit.After; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -120,4 +123,47 @@ public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception { } } } + + /** + * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster + * for append operation. + */ + @Test(timeout = 120000) + public void testNNThroughputForAppendOp() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + FileSystem.setDefaultUri(benchConf, cluster.getURI()); + NNThroughputBenchmark.runBenchmark(benchConf, + new String[] {"-op", "create", "-keepResults", "-files", "3", + "-close" }); + FSNamesystem fsNamesystem = cluster.getNamesystem(); + DirectoryListing listing = + fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); + HdfsFileStatus[] partialListing = listing.getPartialListing(); + + NNThroughputBenchmark.runBenchmark(benchConf, + new String[] {"-op", "append", "-files", "3", "-useExisting" }); + listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); + HdfsFileStatus[] partialListingAfter = listing.getPartialListing(); + + Assert.assertEquals(partialListing.length, partialListingAfter.length); + for (int i = 0; i < partialListing.length; i++) { + //Check the modification time after append operation + Assert.assertNotEquals(partialListing[i].getModificationTime(), + partialListingAfter[i].getModificationTime()); + } + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }