diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 26824ba1b7..b001d233de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1884,6 +1884,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9721. Allow Delimited PB OIV tool to run upon fsimage that contains
INodeReference. (Xiao Chen via lei)
+ HDFS-9503. Use generic option -fs for NNThroughputBenchmark instead of
+ -namenode. (Mingliang Liu via shv)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 7fa3803fd5..a8a8ba574b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -75,6 +75,8 @@
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
@@ -94,6 +96,12 @@
* by calling directly the respective name-node method.
* The name-node here is real all other components are simulated.
*
+ * This benchmark supports
+ *
+ * standard command-line options. If you use remote namenode by -fs
+ * option, its config dfs.namenode.fs-limits.min-block-size should be
+ * set as 16.
+ *
* Command line arguments for the benchmark include:
*
* - total number of operations to be performed,
@@ -107,9 +115,6 @@
* By default the refresh is never called.
* - -keepResults do not clean up the name-space after execution.
* - -useExisting do not recreate the name-space, use existing data.
- * - -namenode will run the test (except {@link ReplicationStats}) against a
- * namenode in another process or on another host. If you use this option,
- * the namenode must have dfs.namenode.fs-limits.min-block-size set to 16.
*
*
* The benchmark first generates inputs for each thread so that the
@@ -124,12 +129,8 @@
public class NNThroughputBenchmark implements Tool {
private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
private static final int BLOCK_SIZE = 16;
- private static final String GENERAL_OPTIONS_USAGE =
- " [-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G] |" +
- " [-namenode ]\n" +
- " If using -namenode, set the namenode's " +
- "dfs.namenode.fs-limits.min-block-size to 16. Replication test does not " +
- "support -namenode.";
+ private static final String GENERAL_OPTIONS_USAGE =
+ "[-keepResults] | [-logLevel L] | [-UGCacheRefreshCount G]";
static Configuration config;
static NameNode nameNode;
@@ -139,8 +140,6 @@ public class NNThroughputBenchmark implements Tool {
static RefreshUserMappingsProtocol refreshUserMappingsProto;
static String bpid = null;
- private String namenodeUri = null; // NN URI to use, if specified
-
NNThroughputBenchmark(Configuration conf) throws IOException {
config = conf;
// We do not need many handlers, since each thread simulates a handler
@@ -384,14 +383,6 @@ protected boolean verifyOpArgument(List args) {
args.remove(ugrcIndex);
}
- if (args.indexOf("-namenode") >= 0) {
- try {
- namenodeUri = StringUtils.popOptionWithArgument("-namenode", args);
- } catch (IllegalArgumentException iae) {
- printUsage();
- }
- }
-
String type = args.get(1);
if(OP_ALL_NAME.equals(type)) {
type = getOpName();
@@ -1443,15 +1434,19 @@ static void printUsage() {
+ " | \n\t" + CleanAllStats.OP_CLEAN_USAGE
+ " | \n\t" + GENERAL_OPTIONS_USAGE
);
- System.exit(-1);
+ System.err.println();
+ GenericOptionsParser.printGenericCommandUsage(System.err);
+ System.err.println("If connecting to a remote NameNode with -fs option, " +
+ "dfs.namenode.fs-limits.min-block-size should be set to 16.");
+ ExitUtil.terminate(-1);
}
- public static void runBenchmark(Configuration conf, List args)
+ public static void runBenchmark(Configuration conf, String[] args)
throws Exception {
NNThroughputBenchmark bench = null;
try {
bench = new NNThroughputBenchmark(conf);
- bench.run(args.toArray(new String[]{}));
+ ToolRunner.run(bench, args);
} finally {
if(bench != null)
bench.close();
@@ -1471,6 +1466,7 @@ public int run(String[] aArgs) throws Exception {
String type = args.get(1);
boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
+ final URI nnUri = FileSystem.getDefaultUri(config);
// Start the NameNode
String[] argv = new String[] {};
@@ -1506,10 +1502,9 @@ public int run(String[] aArgs) throws Exception {
ops.add(opStat);
}
if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
- if (namenodeUri != null || args.contains("-namenode")) {
+ if (nnUri.getScheme() != null && nnUri.getScheme().equals("hdfs")) {
LOG.warn("The replication test is ignored as it does not support " +
- "standalone namenode in another process or on another host. " +
- "Please run replication test without -namenode argument.");
+ "standalone namenode in another process or on another host. ");
} else {
opStat = new ReplicationStats(args);
ops.add(opStat);
@@ -1523,7 +1518,10 @@ public int run(String[] aArgs) throws Exception {
printUsage();
}
- if (namenodeUri == null) {
+ if (nnUri.getScheme() == null || nnUri.getScheme().equals("file")) {
+ LOG.info("Remote NameNode is not specified. Creating one.");
+ FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+ config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
nameNode = NameNode.createNameNode(argv, config);
NamenodeProtocols nnProtos = nameNode.getRpcServer();
nameNodeProto = nnProtos;
@@ -1532,10 +1530,8 @@ public int run(String[] aArgs) throws Exception {
refreshUserMappingsProto = nnProtos;
bpid = nameNode.getNamesystem().getBlockPoolId();
} else {
- FileSystem.setDefaultUri(getConf(), namenodeUri);
DistributedFileSystem dfs = (DistributedFileSystem)
FileSystem.get(getConf());
- final URI nnUri = new URI(namenodeUri);
nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri,
UserGroupInformation.getCurrentUser());
clientProto = dfs.getClient().getNamenode();
@@ -1570,14 +1566,7 @@ private void getBlockPoolId(DistributedFileSystem unused)
}
public static void main(String[] args) throws Exception {
- NNThroughputBenchmark bench = null;
- try {
- bench = new NNThroughputBenchmark(new HdfsConfiguration());
- ToolRunner.run(bench, args);
- } finally {
- if(bench != null)
- bench.close();
- }
+ runBenchmark(new HdfsConfiguration(), args);
}
@Override // Configurable
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
index d964230da5..9f1ebd122c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
-import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@@ -27,11 +26,18 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.util.ExitUtil;
import org.junit.After;
+import org.junit.BeforeClass;
import org.junit.Test;
public class TestNNThroughputBenchmark {
+ @BeforeClass
+ public static void setUp() {
+ ExitUtil.disableSystemExit();
+ }
+
@After
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
@@ -46,10 +52,66 @@ public void testNNThroughput() throws Exception {
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
- FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
- conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
DFSTestUtil.formatNameNode(conf);
- String[] args = new String[] {"-op", "all"};
- NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
+ NNThroughputBenchmark.runBenchmark(conf, new String[] {"-op", "all"});
+ }
+
+ /**
+ * This test runs all benchmarks defined in {@link NNThroughputBenchmark},
+ * with explicit local -fs option.
+ */
+ @Test(timeout = 120000)
+ public void testNNThroughputWithFsOption() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ nameDir.getAbsolutePath());
+ DFSTestUtil.formatNameNode(conf);
+ NNThroughputBenchmark.runBenchmark(conf,
+ new String[] {"-fs", "file:///", "-op", "all"});
+ }
+
+ /**
+ * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster.
+ */
+ @Test(timeout = 120000)
+ public void testNNThroughputAgainstRemoteNN() throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster.waitActive();
+
+ final Configuration benchConf = new HdfsConfiguration();
+ FileSystem.setDefaultUri(benchConf, cluster.getURI());
+ NNThroughputBenchmark.runBenchmark(benchConf, new String[]{"-op", "all"});
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+
+ /**
+ * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
+ * with explicit -fs option.
+ */
+ @Test(timeout = 120000)
+ public void testNNThroughputRemoteAgainstNNWithFsOption() throws Exception {
+ final Configuration conf = new HdfsConfiguration();
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster.waitActive();
+
+ NNThroughputBenchmark.runBenchmark(new HdfsConfiguration(),
+ new String[]{"-fs", cluster.getURI().toString(), "-op", "all"});
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
}
}