HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark. Contributed by Plamen Jeliazkov.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1554071 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e7120079bd
commit
5a54b91df9
@ -618,9 +618,6 @@ Release 2.4.0 - UNRELEASED
|
||||
HDFS-5004. Add additional JMX bean for NameNode status data
|
||||
(Trevor Lorimer via cos)
|
||||
|
||||
HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
|
||||
(shv)
|
||||
|
||||
HDFS-4994. Audit log getContentSummary() calls. (Robert Parker via kihwal)
|
||||
|
||||
HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via
|
||||
@ -866,6 +863,12 @@ Release 2.3.0 - UNRELEASED
|
||||
HDFS-5662. Can't decommission a DataNode due to file's replication factor
|
||||
larger than the rest of the cluster size. (brandonli)
|
||||
|
||||
HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
|
||||
(shv)
|
||||
|
||||
HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
|
||||
(Plamen Jeliazkov via shv)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -605,6 +605,98 @@ void printResults() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Directory creation statistics.
|
||||
*
|
||||
* Each thread creates the same (+ or -1) number of directories.
|
||||
* Directory names are pre-generated during initialization.
|
||||
*/
|
||||
class MkdirsStats extends OperationStatsBase {
|
||||
// Operation types
|
||||
static final String OP_MKDIRS_NAME = "mkdirs";
|
||||
static final String OP_MKDIRS_USAGE = "-op mkdirs [-threads T] [-dirs N] " +
|
||||
"[-dirsPerDir P]";
|
||||
|
||||
protected FileNameGenerator nameGenerator;
|
||||
protected String[][] dirPaths;
|
||||
|
||||
MkdirsStats(List<String> args) {
|
||||
super();
|
||||
parseArguments(args);
|
||||
}
|
||||
|
||||
@Override
|
||||
String getOpName() {
|
||||
return OP_MKDIRS_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
void parseArguments(List<String> args) {
|
||||
boolean ignoreUnrelatedOptions = verifyOpArgument(args);
|
||||
int nrDirsPerDir = 2;
|
||||
for (int i = 2; i < args.size(); i++) { // parse command line
|
||||
if(args.get(i).equals("-dirs")) {
|
||||
if(i+1 == args.size()) printUsage();
|
||||
numOpsRequired = Integer.parseInt(args.get(++i));
|
||||
} else if(args.get(i).equals("-threads")) {
|
||||
if(i+1 == args.size()) printUsage();
|
||||
numThreads = Integer.parseInt(args.get(++i));
|
||||
} else if(args.get(i).equals("-dirsPerDir")) {
|
||||
if(i+1 == args.size()) printUsage();
|
||||
nrDirsPerDir = Integer.parseInt(args.get(++i));
|
||||
} else if(!ignoreUnrelatedOptions)
|
||||
printUsage();
|
||||
}
|
||||
nameGenerator = new FileNameGenerator(getBaseDir(), nrDirsPerDir);
|
||||
}
|
||||
|
||||
@Override
|
||||
void generateInputs(int[] opsPerThread) throws IOException {
|
||||
assert opsPerThread.length == numThreads : "Error opsPerThread.length";
|
||||
nameNodeProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,
|
||||
false);
|
||||
LOG.info("Generate " + numOpsRequired + " inputs for " + getOpName());
|
||||
dirPaths = new String[numThreads][];
|
||||
for(int idx=0; idx < numThreads; idx++) {
|
||||
int threadOps = opsPerThread[idx];
|
||||
dirPaths[idx] = new String[threadOps];
|
||||
for(int jdx=0; jdx < threadOps; jdx++)
|
||||
dirPaths[idx][jdx] = nameGenerator.
|
||||
getNextFileName("ThroughputBench");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* returns client name
|
||||
*/
|
||||
@Override
|
||||
String getExecutionArgument(int daemonId) {
|
||||
return getClientName(daemonId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Do mkdirs operation.
|
||||
*/
|
||||
@Override
|
||||
long executeOp(int daemonId, int inputIdx, String clientName)
|
||||
throws IOException {
|
||||
long start = Time.now();
|
||||
nameNodeProto.mkdirs(dirPaths[daemonId][inputIdx],
|
||||
FsPermission.getDefault(), true);
|
||||
long end = Time.now();
|
||||
return end-start;
|
||||
}
|
||||
|
||||
@Override
|
||||
void printResults() {
|
||||
LOG.info("--- " + getOpName() + " inputs ---");
|
||||
LOG.info("nrDirs = " + numOpsRequired);
|
||||
LOG.info("nrThreads = " + numThreads);
|
||||
LOG.info("nrDirsPerDir = " + nameGenerator.getFilesPerDirectory());
|
||||
printStats();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Open file statistics.
|
||||
*
|
||||
@ -1279,6 +1371,7 @@ static void printUsage() {
|
||||
System.err.println("Usage: NNThroughputBenchmark"
|
||||
+ "\n\t" + OperationStatsBase.OP_ALL_USAGE
|
||||
+ " | \n\t" + CreateFileStats.OP_CREATE_USAGE
|
||||
+ " | \n\t" + MkdirsStats.OP_MKDIRS_USAGE
|
||||
+ " | \n\t" + OpenFileStats.OP_OPEN_USAGE
|
||||
+ " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
|
||||
+ " | \n\t" + FileStatusStats.OP_FILE_STATUS_USAGE
|
||||
@ -1328,6 +1421,10 @@ public int run(String[] aArgs) throws Exception {
|
||||
opStat = new CreateFileStats(args);
|
||||
ops.add(opStat);
|
||||
}
|
||||
if(runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
|
||||
opStat = new MkdirsStats(args);
|
||||
ops.add(opStat);
|
||||
}
|
||||
if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
|
||||
opStat = new OpenFileStats(args);
|
||||
ops.add(opStat);
|
||||
|
Loading…
Reference in New Issue
Block a user