MAPREDUCE-7320. organize test directories for ClusterMapReduceTestCase (#2722). Contributed by Ahmed Hussein

(cherry picked from commit e04bcb3a06)
This commit is contained in:
Ahmed Hussein 2021-02-26 13:42:33 -06:00 committed by Jim Brennan
parent e4dcc31114
commit 792329fde9
13 changed files with 118 additions and 67 deletions

View File

@ -229,6 +229,22 @@ public static int uniqueSequenceId() {
return sequence.incrementAndGet();
}
/**
* Creates a directory for the data/logs of the unit test.
* It first deletes the directory if it exists.
*
* @param testClass the unit test class.
* @return the Path of the root directory.
*/
public static File setupTestRootDir(Class<?> testClass) {
File testRootDir = getTestDir(testClass.getSimpleName());
if (testRootDir.exists()) {
FileUtil.fullyDelete(testRootDir);
}
testRootDir.mkdirs();
return testRootDir;
}
/**
* Get the (created) base directory for tests.
* @return the absolute directory

View File

@ -132,6 +132,10 @@ private static void createJar(File dir, File jarFile) throws IOException {
* @return path to the Jar containing the class.
*/
public static String getJar(Class klass) {
return getJar(klass, null);
}
public static String getJar(Class klass, String testSubDir) {
Preconditions.checkNotNull(klass, "klass");
ClassLoader loader = klass.getClassLoader();
if (loader != null) {
@ -154,7 +158,9 @@ else if ("file".equals(url.getProtocol())) {
klassName = klassName.replace(".", "/") + ".class";
path = path.substring(0, path.length() - klassName.length());
File baseDir = new File(path);
File testDir = GenericTestUtils.getTestDir();
File testDir =
testSubDir == null ? GenericTestUtils.getTestDir()
: GenericTestUtils.getTestDir(testSubDir);
testDir = testDir.getAbsoluteFile();
if (!testDir.exists()) {
testDir.mkdirs();

View File

@ -20,9 +20,12 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
@ -43,8 +46,18 @@
* The DFS filesystem is formated before the testcase starts and after it ends.
*/
public abstract class ClusterMapReduceTestCase {
private static File testRootDir;
private static File dfsFolder;
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
private MiniMRClientCluster mrCluster = null;
protected static void setupClassBase(Class<?> testClass) throws Exception {
// setup the test root directory
testRootDir = GenericTestUtils.setupTestRootDir(testClass);
dfsFolder = new File(testRootDir, "dfs");
}
/**
* Creates Hadoop Cluster and DFS before a test case is run.
@ -78,37 +91,10 @@ protected synchronized void startCluster(boolean reformatDFS, Properties props)
conf.set((String) entry.getKey(), (String) entry.getValue());
}
}
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(reformatDFS).racks(null).build();
ConfigurableMiniMRCluster.setConfiguration(props);
//noinspection deprecation
mrCluster = new ConfigurableMiniMRCluster(2,
getFileSystem().getUri().toString(), 1, conf);
}
}
private static class ConfigurableMiniMRCluster extends MiniMRCluster {
private static Properties config;
public static void setConfiguration(Properties props) {
config = props;
}
public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode,
int numDir, JobConf conf)
throws Exception {
super(0,0, numTaskTrackers, namenode, numDir, null, null, null, conf);
}
public JobConf createJobConf() {
JobConf conf = super.createJobConf();
if (config != null) {
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
}
return conf;
dfsCluster =
new MiniDFSCluster.Builder(conf, dfsFolder)
.numDataNodes(2).format(reformatDFS).racks(null).build();
mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
}
}
@ -125,7 +111,7 @@ public JobConf createJobConf() {
*/
protected void stopCluster() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
mrCluster.stop();
mrCluster = null;
}
if (dfsCluster != null) {
@ -157,17 +143,13 @@ protected FileSystem getFileSystem() throws IOException {
return dfsCluster.getFileSystem();
}
protected MiniMRCluster getMRCluster() {
return mrCluster;
}
/**
* Returns the path to the root directory for the testcase.
*
* @return path to the root directory for the testcase.
*/
protected Path getTestRootDir() {
return new Path("x").getParent();
return new Path(testRootDir.getPath());
}
/**
@ -194,8 +176,8 @@ protected Path getOutputDir() {
*
* @return configuration that works on the testcase Hadoop instance
*/
protected JobConf createJobConf() {
return mrCluster.createJobConf();
protected JobConf createJobConf() throws IOException {
return new JobConf(mrCluster.getConfig());
}
}

View File

@ -55,7 +55,8 @@ public static MiniMRClientCluster create(Class<?> caller, String identifier,
Path appJar = new Path(testRootDir, "MRAppJar.jar");
// Copy MRAppJar and make it private.
Path appMasterJar = new Path(MiniMRYarnCluster.APPJAR);
Path appMasterJar =
new Path(MiniMRYarnCluster.copyAppJarIntoTestDir(identifier));
fs.copyFromLocalFile(appMasterJar, appJar);
fs.setPermission(appJar, new FsPermission("744"));
@ -64,7 +65,7 @@ public static MiniMRClientCluster create(Class<?> caller, String identifier,
job.addFileToClassPath(appJar);
Path callerJar = new Path(JarFinder.getJar(caller));
Path callerJar = new Path(JarFinder.getJar(caller, identifier));
Path remoteCallerJar = new Path(testRootDir, callerJar.getName());
fs.copyFromLocalFile(callerJar, remoteCallerJar);
fs.setPermission(remoteCallerJar, new FsPermission("744"));

View File

@ -37,6 +37,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
@ -58,7 +60,12 @@ public class TestBadRecords extends ClusterMapReduceTestCase {
Arrays.asList("hello08","hello10");
private List<String> input;
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestBadRecords.class);
}
public TestBadRecords() {
input = new ArrayList<String>();
for(int i=1;i<=10;i++) {

View File

@ -29,6 +29,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
@ -36,6 +38,12 @@
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertFalse;
public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase {
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestClusterMapReduceTestCase.class);
}
public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
Writer wr = new OutputStreamWriter(os);
@ -88,7 +96,6 @@ public void _testMapReduce(boolean restart) throws Exception {
reader.close();
assertEquals(4, counter);
}
}
@Test

View File

@ -29,12 +29,19 @@
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestJobName extends ClusterMapReduceTestCase {
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestJobName.class);
}
@Test
public void testComplexName() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),

View File

@ -29,10 +29,17 @@
import org.apache.hadoop.mapreduce.TestMRJobClient;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.util.Tool;
import org.junit.BeforeClass;
import org.junit.Ignore;
@Ignore
public class TestMRCJCJobClient extends TestMRJobClient {
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestMRCJCJobClient.class);
}
private String runJob() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));

View File

@ -30,6 +30,7 @@
import org.apache.hadoop.util.ToolRunner;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -63,6 +64,11 @@ public class TestMRJobClient extends ClusterMapReduceTestCase {
private static final Logger LOG =
LoggerFactory.getLogger(TestMRJobClient.class);
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestMRJobClient.class);
}
private Job runJob(Configuration conf) throws Exception {
String input = "hello1\nhello2\nhello3\n";

View File

@ -19,7 +19,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -31,58 +30,55 @@
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Assert;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.URL;
public class TestEncryptedShuffle {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestEncryptedShuffle.class.getSimpleName();
private String classpathDir;
private static File testRootDir;
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
testRootDir =
GenericTestUtils.setupTestRootDir(TestEncryptedShuffle.class);
}
@Before
public void createCustomYarnClasspath() throws Exception {
classpathDir = KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
new File(classpathDir, "core-site.xml").delete();
dfsFolder = new File(testRootDir, String.format("dfs-%d",
Time.monotonicNow()));
}
@After
public void cleanUpMiniClusterSpecialConfig() throws Exception {
new File(classpathDir, "core-site.xml").delete();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String keystoresDir = testRootDir.getAbsolutePath();
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, classpathDir);
}
private String classpathDir;
private MiniDFSCluster dfsCluster = null;
private MiniMRClientCluster mrCluster = null;
private File dfsFolder;
private void startCluster(Configuration conf) throws Exception {
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "target/test-dir");
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
}
conf.set("dfs.block.access.token.enable", "false");
conf.set("dfs.permissions", "true");
@ -92,7 +88,7 @@ private void startCluster(Configuration conf) throws Exception {
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
+ File.pathSeparator + classpathDir;
conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
dfsCluster = new MiniDFSCluster.Builder(conf).build();
dfsCluster = new MiniDFSCluster.Builder(conf, dfsFolder).build();
FileSystem fileSystem = dfsCluster.getFileSystem();
fileSystem.mkdirs(new Path("/tmp"));
fileSystem.mkdirs(new Path("/user"));
@ -129,7 +125,7 @@ private void encryptedShuffleWithCerts(boolean useClientCerts)
throws Exception {
try {
Configuration conf = new Configuration();
String keystoresDir = new File(BASEDIR).getAbsolutePath();
String keystoresDir = testRootDir.getAbsolutePath();
String sslConfsDir =
KeyStoreTestUtil.getClasspathDir(TestEncryptedShuffle.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf,

View File

@ -74,6 +74,7 @@ public MiniMRYarnCluster(String testName) {
this(testName, 1);
}
@SuppressWarnings("deprecation")
public MiniMRYarnCluster(String testName, int noOfNMs) {
this(testName, noOfNMs, false);
}
@ -84,6 +85,10 @@ public MiniMRYarnCluster(String testName, int noOfNMs, boolean enableAHS) {
addService(historyServerWrapper);
}
public static String copyAppJarIntoTestDir(String testSubdir) {
return JarFinder.getJar(LocalContainerLauncher.class, testSubdir);
}
public static String getResolvedMRHistoryWebAppURLWithoutScheme(
Configuration conf, boolean isSSLEnabled) {
InetSocketAddress address = null;

View File

@ -31,13 +31,13 @@
import java.util.Properties;
import java.util.StringTokenizer;
import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapred.Utils;
@ -65,7 +65,12 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase
private static final String badReducer =
UtilTest.makeJavaCommand(BadApp.class, new String[]{"true"});
private static final int INPUTSIZE=100;
@BeforeClass
public static void setupClass() throws Exception {
setupClassBase(TestStreamingBadRecords.class);
}
public TestStreamingBadRecords() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());

View File

@ -41,8 +41,10 @@
import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.conf.HAUtil;
@ -171,8 +173,11 @@ public MiniYARNCluster(
this.numLocalDirs = numLocalDirs;
this.numLogDirs = numLogDirs;
this.enableAHS = enableAHS;
String testSubDir = testName.replace("$", "");
File targetWorkDir = new File("target", testSubDir);
String yarnFolderName = String.format("yarn-%d", Time.monotonicNow());
File targetWorkDirRoot = GenericTestUtils.getTestDir(getName());
// make sure that the folder exists
targetWorkDirRoot.mkdirs();
File targetWorkDir = new File(targetWorkDirRoot, yarnFolderName);
try {
FileContext.getLocalFSFileContext().delete(
new Path(targetWorkDir.getAbsolutePath()), true);
@ -227,6 +232,7 @@ public MiniYARNCluster(
* @param numLocalDirs the number of nm-local-dirs per nodemanager
* @param numLogDirs the number of nm-log-dirs per nodemanager
*/
@SuppressWarnings("deprecation")
public MiniYARNCluster(
String testName, int numResourceManagers, int numNodeManagers,
int numLocalDirs, int numLogDirs) {