HDFS-12967. NNBench should support multi-cluster access. Contributed by Chen Zhang.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
parent
90afb7bf8c
commit
8a59cd1b8a
@ -117,7 +117,7 @@ public class NNBench extends Configured implements Tool {
|
|||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
private void cleanupBeforeTestrun() throws IOException {
|
private void cleanupBeforeTestrun() throws IOException {
|
||||||
FileSystem tempFS = FileSystem.get(getConf());
|
FileSystem tempFS = FileSystem.get(new Path(baseDir).toUri(), getConf());
|
||||||
|
|
||||||
// Delete the data directory only if it is the create/write operation
|
// Delete the data directory only if it is the create/write operation
|
||||||
if (operation.equals(OP_CREATE_WRITE)) {
|
if (operation.equals(OP_CREATE_WRITE)) {
|
||||||
@ -193,7 +193,8 @@ private static void displayUsage() {
|
|||||||
"\t-replicationFactorPerFile <Replication factor for the files." +
|
"\t-replicationFactorPerFile <Replication factor for the files." +
|
||||||
" default is 1. This is not mandatory>\n" +
|
" default is 1. This is not mandatory>\n" +
|
||||||
"\t-baseDir <base DFS path. default is /benchmarks/NNBench. " +
|
"\t-baseDir <base DFS path. default is /benchmarks/NNBench. " +
|
||||||
"This is not mandatory>\n" +
|
"Supports cross-cluster access by using full path with schema and " +
|
||||||
|
"cluster. This is not mandatory>\n" +
|
||||||
"\t-readFileAfterOpen <true or false. if true, it reads the file and " +
|
"\t-readFileAfterOpen <true or false. if true, it reads the file and " +
|
||||||
"reports the average time to read. This is valid with the open_read " +
|
"reports the average time to read. This is valid with the open_read " +
|
||||||
"operation. default is false. This is not mandatory>\n" +
|
"operation. default is false. This is not mandatory>\n" +
|
||||||
@ -305,7 +306,7 @@ private void parseInputs(final String[] args) {
|
|||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
private int analyzeResults() throws IOException {
|
private int analyzeResults() throws IOException {
|
||||||
final FileSystem fs = FileSystem.get(getConf());
|
final FileSystem fs = FileSystem.get(new Path(baseDir).toUri(), getConf());
|
||||||
Path reduceDir = new Path(baseDir, OUTPUT_DIR_NAME);
|
Path reduceDir = new Path(baseDir, OUTPUT_DIR_NAME);
|
||||||
|
|
||||||
long totalTimeAL1 = 0l;
|
long totalTimeAL1 = 0l;
|
||||||
@ -644,7 +645,8 @@ public void configure(JobConf conf) {
|
|||||||
setConf(conf);
|
setConf(conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
filesystem = FileSystem.get(conf);
|
String dir = conf.get("test.nnbench.basedir");
|
||||||
|
filesystem = FileSystem.get(new Path(dir).toUri(), conf);
|
||||||
} catch(Exception e) {
|
} catch(Exception e) {
|
||||||
throw new RuntimeException("Cannot get file system.", e);
|
throw new RuntimeException("Cannot get file system.", e);
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.mapred.HadoopTestCase;
|
import org.apache.hadoop.mapred.HadoopTestCase;
|
||||||
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
@ -73,12 +74,32 @@ public void testNNBenchCreateAndRename() throws Exception {
|
|||||||
getFileSystem().exists(renamedPath));
|
getFileSystem().exists(renamedPath));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void runNNBench(Configuration conf, String operation)
|
@Test(timeout = 30000)
|
||||||
|
public void testNNBenchCrossCluster() throws Exception {
|
||||||
|
MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new JobConf())
|
||||||
|
.numDataNodes(1).build();
|
||||||
|
dfsCluster.waitClusterUp();
|
||||||
|
String nnAddress = dfsCluster.getNameNode(0).getHostAndPort();
|
||||||
|
String baseDir = "hdfs://" + nnAddress + BASE_DIR;
|
||||||
|
runNNBench(createJobConf(), "create_write", baseDir);
|
||||||
|
|
||||||
|
Path path = new Path(BASE_DIR + "/data/file_0_0");
|
||||||
|
assertTrue("create_write should create the file",
|
||||||
|
dfsCluster.getFileSystem().exists(path));
|
||||||
|
dfsCluster.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runNNBench(Configuration conf, String operation, String baseDir)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
String[] genArgs = { "-operation", operation, "-baseDir", BASE_DIR,
|
String[] genArgs = {"-operation", operation, "-baseDir", baseDir,
|
||||||
"-startTime", "" + (Time.now() / 1000 + 3) };
|
"-startTime", "" + (Time.now() / 1000 + 3), "-blockSize", "1024"};
|
||||||
|
|
||||||
assertEquals(0, ToolRunner.run(conf, new NNBench(), genArgs));
|
assertEquals(0, ToolRunner.run(conf, new NNBench(), genArgs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void runNNBench(Configuration conf, String operation)
|
||||||
|
throws Exception {
|
||||||
|
runNNBench(conf, operation, BASE_DIR);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user