HDFS-17555. Fix NumberFormatException of NNThroughputBenchmark when configured dfs.blocksize. (#6894). Contributed by wangzhongwei

Reviewed-by: He Xiaoqiao <hexiaoqiao@apache.org>
Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
gavin.wang 2024-07-09 16:22:15 +08:00 committed by GitHub
parent b1d96f6101
commit 783a852029
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 45 additions and 5 deletions

View File

@ -88,6 +88,7 @@ When running benchmarks with the above operation(s), please provide operation-sp
|`-totalBlocks` | Number of total blocks to operate. |
|`-replication` | Replication factor. Will be adjusted to number of data-nodes if it is larger than that. |
|`-baseDirName` | The base dir name for benchmarks, to support multiple clients submitting benchmark tests at the same time. |
|`-blockSize` | The block size for new files. |
### Reports

View File

@ -168,7 +168,7 @@ abstract class OperationStatsBase {
private String baseDir;
protected short replication;
protected int blockSize;
protected long blockSize;
protected int numThreads = 0; // number of threads
protected int numOpsRequired = 0; // number of operations requested
protected int numOpsExecuted = 0; // number of operations executed
@ -231,7 +231,7 @@ abstract class OperationStatsBase {
OperationStatsBase() {
baseDir = baseDirName + "/" + getOpName();
replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
numOpsRequired = 10;
numThreads = 3;
logLevel = Level.ERROR;
@ -587,7 +587,7 @@ void parseArguments(List<String> args) {
numOpsRequired = Integer.parseInt(args.get(++i));
} else if (args.get(i).equals("-blockSize")) {
if(i+1 == args.size()) printUsage();
blockSize = Integer.parseInt(args.get(++i));
blockSize = Long.parseLong(args.get(++i));
} else if(args.get(i).equals("-threads")) {
if(i+1 == args.size()) printUsage();
numThreads = Integer.parseInt(args.get(++i));
@ -1260,7 +1260,7 @@ void parseArguments(List<String> args) {
blocksPerFile = Integer.parseInt(args.get(++i));
} else if (args.get(i).equals("-blockSize")) {
if(i+1 == args.size()) printUsage();
blockSize = Integer.parseInt(args.get(++i));
blockSize = Long.parseLong(args.get(++i));
} else if(args.get(i).equals("-baseDirName")) {
if (i + 1 == args.size()) {
printUsage();
@ -1498,7 +1498,7 @@ void parseArguments(List<String> args) {
replication = Short.parseShort(args.get(++i));
} else if (args.get(i).equals("-blockSize")) {
if(i+1 == args.size()) printUsage();
blockSize = Integer.parseInt(args.get(++i));
blockSize = Long.parseLong(args.get(++i));
} else if(args.get(i).equals("-baseDirName")) {
if (i + 1 == args.size()) {
printUsage();

View File

@ -246,4 +246,43 @@ public void testNNThroughputWithBaseDir() throws Exception {
}
}
}
/**
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* for blockSize with letter suffix.
*/
@Test(timeout = 120000)
public void testNNThroughputForBlockSizeWithLetterSuffix() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "1m");
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
cluster.waitActive();
final Configuration benchConf = new HdfsConfiguration();
benchConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
benchConf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "1m");
FileSystem.setDefaultUri(benchConf, cluster.getURI());
NNThroughputBenchmark.runBenchmark(benchConf,
new String[]{"-op", "create", "-keepResults", "-files", "3", "-close"});
}
}
/**
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
* with explicit -blockSize option.
*/
@Test(timeout = 120000)
public void testNNThroughputWithBlockSize() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
cluster.waitActive();
final Configuration benchConf = new HdfsConfiguration();
benchConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
FileSystem.setDefaultUri(benchConf, cluster.getURI());
NNThroughputBenchmark.runBenchmark(benchConf,
new String[]{"-op", "create", "-keepResults", "-files", "3",
"-blockSize", "32", "-close"});
}
}
}