Revert "MAPREDUCE-6543. Migrate MR client test cases part 2. Contributed by Dustin Cote."

This reverts commit 2c268cc936.
This commit is contained in:
Akira Ajisaka 2016-03-28 16:05:35 +09:00
parent 01cfee6381
commit 115be193df
80 changed files with 569 additions and 834 deletions

View File

@ -28,6 +28,8 @@
import java.util.Date;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -37,9 +39,8 @@
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*;
import org.junit.Ignore;
import org.junit.Test;
/**
/**
* Distributed i/o benchmark.
* <p>
* This test writes into or reads from a specified number of files.
@ -67,7 +68,7 @@
* </ul>
*/
@Ignore
public class DFSCIOTest {
public class DFSCIOTest extends TestCase {
// Constants
private static final Log LOG = LogFactory.getLog(DFSCIOTest.class);
private static final int TEST_TYPE_READ = 0;
@ -97,7 +98,6 @@ public class DFSCIOTest {
*
* @throws Exception
*/
@Test
public void testIOs() throws Exception {
testIOs(10, 10);
}

View File

@ -34,6 +34,8 @@
import java.net.InetSocketAddress;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -48,15 +50,8 @@
import org.apache.hadoop.mapred.lib.LongSumReducer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
public class TestFileSystem {
public class TestFileSystem extends TestCase {
private static final Log LOG = FileSystem.LOG;
private static Configuration conf = new Configuration();
@ -71,7 +66,6 @@ public class TestFileSystem {
private static Path READ_DIR = new Path(ROOT, "fs_read");
private static Path DATA_DIR = new Path(ROOT, "fs_data");
@Test
public void testFs() throws Exception {
testFs(10 * MEGA, 100, 0);
}
@ -96,7 +90,6 @@ public static void testFs(long megaBytes, int numFiles, long seed)
fs.delete(READ_DIR, true);
}
@Test
public static void testCommandFormat() throws Exception {
// This should go to TestFsShell.java when it is added.
CommandFormat cf;
@ -495,7 +488,6 @@ public static void main(String[] args) throws Exception {
}
}
@Test
public void testFsCache() throws Exception {
{
long now = System.currentTimeMillis();
@ -569,7 +561,6 @@ static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws IOExcep
+ StringUtils.toUpperCase(add.getHostName()) + ":" + add.getPort()));
}
@Test
public void testFsClose() throws Exception {
{
Configuration conf = new Configuration();
@ -578,7 +569,6 @@ public void testFsClose() throws Exception {
}
}
@Test
public void testFsShutdownHook() throws Exception {
final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
Configuration conf = new Configuration();
@ -610,7 +600,7 @@ public void testFsShutdownHook() throws Exception {
assertTrue(closed.contains(fsWithoutAuto));
}
@Test
public void testCacheKeysAreCaseInsensitive()
throws Exception
{

View File

@ -23,18 +23,19 @@
import java.io.OutputStreamWriter;
import java.io.File;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test Job History Log Analyzer.
*
* @see JHLogAnalyzer
*/
public class TestJHLA {
public class TestJHLA extends TestCase {
private static final Log LOG = LogFactory.getLog(JHLogAnalyzer.class);
private String historyLog = System.getProperty("test.build.data",
"build/test/data") + "/history/test.log";
@ -132,7 +133,6 @@ public void tearDown() throws Exception {
/**
* Run log analyzer in test mode for file test.log.
*/
@Test
public void testJHLA() {
String[] args = {"-test", historyLog, "-jobDelimiter", ".!!FILE=.*!!"};
JHLogAnalyzer.main(args);

View File

@ -32,25 +32,21 @@
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.mapred.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestSequenceFileMergeProgress {
public class TestSequenceFileMergeProgress extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int RECORDS = 10000;
@Test
public void testMergeProgressWithNoCompression() throws IOException {
runTest(SequenceFile.CompressionType.NONE);
}
@Test
public void testMergeProgressWithRecordCompression() throws IOException {
runTest(SequenceFile.CompressionType.RECORD);
}
@Test
public void testMergeProgressWithBlockCompression() throws IOException {
runTest(SequenceFile.CompressionType.BLOCK);
}
@ -96,7 +92,7 @@ public void runTest(CompressionType compressionType) throws IOException {
count++;
}
assertEquals(RECORDS, count);
assertEquals(1.0f, rIter.getProgress().get(), 0.0000);
assertEquals(1.0f, rIter.getProgress().get());
}
}

View File

@ -17,11 +17,10 @@
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.util.Map;
@ -42,7 +41,7 @@
* <p/>
* The DFS filesystem is formated before the testcase starts and after it ends.
*/
public abstract class ClusterMapReduceTestCase {
public abstract class ClusterMapReduceTestCase extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
@ -51,8 +50,9 @@ public abstract class ClusterMapReduceTestCase {
*
* @throws Exception
*/
@Before
public void setUp() throws Exception {
protected void setUp() throws Exception {
super.setUp();
startCluster(true, null);
}
@ -139,9 +139,9 @@ protected void stopCluster() throws Exception {
*
* @throws Exception
*/
@After
public void tearDown() throws Exception {
protected void tearDown() throws Exception {
stopCluster();
super.tearDown();
}
/**

View File

@ -28,13 +28,13 @@
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.mapred.AuditLogger.Keys;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
/**
* Tests {@link AuditLogger}.
*/
public class TestAuditLogger {
public class TestAuditLogger extends TestCase {
private static final String USER = "test";
private static final String OPERATION = "oper";
private static final String TARGET = "tgt";
@ -44,7 +44,6 @@ public class TestAuditLogger {
/**
* Test the AuditLog format with key-val pair.
*/
@Test
public void testKeyValLogFormat() {
StringBuilder actLog = new StringBuilder();
StringBuilder expLog = new StringBuilder();
@ -115,7 +114,6 @@ private void testFailureLogFormat(boolean checkIP) {
/**
* Test {@link AuditLogger} without IP set.
*/
@Test
public void testAuditLoggerWithoutIP() throws Exception {
// test without ip
testSuccessLogFormat(false);
@ -139,7 +137,6 @@ public void ping() {
/**
* Test {@link AuditLogger} with IP set.
*/
@Test
public void testAuditLoggerWithIP() throws Exception {
Configuration conf = new Configuration();
// start the IPC server

View File

@ -40,11 +40,6 @@
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
@Ignore
public class TestBadRecords extends ClusterMapReduceTestCase {
@ -211,8 +206,7 @@ private List<String> getProcessed(List<String> inputs, List<String> badRecs) {
}
return processed;
}
@Test
public void testBadMapRed() throws Exception {
JobConf conf = createJobConf();
conf.setMapperClass(BadMapper.class);

View File

@ -29,12 +29,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertFalse;
public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase {
public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));
@ -91,17 +85,14 @@ public void _testMapReduce(boolean restart) throws Exception {
}
@Test
public void testMapReduce() throws Exception {
_testMapReduce(false);
}
@Test
public void testMapReduceRestarting() throws Exception {
_testMapReduce(true);
}
@Test
public void testDFSRestart() throws Exception {
Path file = new Path(getInputDir(), "text.txt");
OutputStream os = getFileSystem().create(file);
@ -118,7 +109,6 @@ public void testDFSRestart() throws Exception {
}
@Test
public void testMRConfig() throws Exception {
JobConf conf = createJobConf();
assertNull(conf.get("xyz"));

View File

@ -21,15 +21,15 @@
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.UtilsForTests.RandomInputFormat;
import org.apache.hadoop.mapreduce.MRConfig;
import org.junit.Test;
import junit.framework.TestCase;
import java.io.*;
import java.util.*;
/**
* TestCollect checks if the collect can handle simultaneous invocations.
*/
public class TestCollect
public class TestCollect extends TestCase
{
final static Path OUTPUT_DIR = new Path("build/test/test.collect.output");
static final int NUM_FEEDERS = 10;
@ -127,7 +127,7 @@ public void configure(JobConf conf) throws IOException {
conf.setNumMapTasks(1);
conf.setNumReduceTasks(1);
}
@Test
public void testCollect() throws IOException {
JobConf conf = new JobConf();
configure(conf);
@ -144,5 +144,9 @@ public void testCollect() throws IOException {
fs.delete(OUTPUT_DIR, true);
}
}
public static void main(String[] args) throws IOException {
new TestCollect().testCollect();
}
}

View File

@ -21,29 +21,28 @@
import java.io.FileOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* check for the job submission options of
* -libjars -files -archives
*/
@Ignore
public class TestCommandLineJobSubmission {
// Input output paths for this..
public class TestCommandLineJobSubmission extends TestCase {
// Input output paths for this..
// these are all dummy and does not test
// much in map reduce except for the command line
// params
static final Path input = new Path("/test/input/");
static final Path output = new Path("/test/output");
File buildDir = new File(System.getProperty("test.build.data", "/tmp"));
@Test
public void testJobShell() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;

View File

@ -23,12 +23,11 @@
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper;
import org.apache.hadoop.mapreduce.lib.fieldsel.TestMRFieldSelection;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
import java.text.NumberFormat;
public class TestFieldSelection {
public class TestFieldSelection extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
@ -36,7 +35,6 @@ public class TestFieldSelection {
idFormat.setGroupingUsed(false);
}
@Test
public void testFieldSelection() throws Exception {
launch();
}

View File

@ -17,14 +17,12 @@
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.io.Writer;
@ -32,7 +30,7 @@
import java.util.Set;
import java.util.HashSet;
public class TestFileInputFormatPathFilter {
public class TestFileInputFormatPathFilter extends TestCase {
public static class DummyFileInputFormat extends FileInputFormat {
@ -57,12 +55,12 @@ public RecordReader getRecordReader(InputSplit split, JobConf job,
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestFileInputFormatPathFilter");
@Before
public void setUp() throws Exception {
tearDown();
localFs.mkdirs(workDir);
}
@After
public void tearDown() throws Exception {
if (localFs.exists(workDir)) {
localFs.delete(workDir, true);
@ -131,19 +129,18 @@ private void _testInputFiles(boolean withFilter, boolean withGlob) throws Except
assertEquals(createdFiles, computedFiles);
}
@Test
public void testWithoutPathFilterWithoutGlob() throws Exception {
_testInputFiles(false, false);
}
@Test
public void testWithoutPathFilterWithGlob() throws Exception {
_testInputFiles(false, true);
}
@Test
public void testWithPathFilterWithoutGlob() throws Exception {
_testInputFiles(true, false);
}
@Test
public void testWithPathFilterWithGlob() throws Exception {
_testInputFiles(true, true);
}

View File

@ -20,11 +20,10 @@
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.net.NetworkTopology;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
public class TestGetSplitHosts extends TestCase {
public class TestGetSplitHosts {
@Test
public void testGetSplitHosts() throws Exception {
int numBlocks = 3;

View File

@ -21,12 +21,11 @@
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.Test;
import static org.junit.Assert.fail;
import static org.junit.Assert.assertEquals;
public class TestIFileStreams {
@Test
import junit.framework.TestCase;
public class TestIFileStreams extends TestCase {
public void testIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
@ -43,7 +42,7 @@ public void testIFileStream() throws Exception {
}
ifis.close();
}
@Test
public void testBadIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
@ -74,7 +73,7 @@ public void testBadIFileStream() throws Exception {
}
fail("Did not detect bad data in checksum");
}
@Test
public void testBadLength() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);

View File

@ -17,15 +17,14 @@
*/
package org.apache.hadoop.mapred;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestInputPath {
@Test
public class TestInputPath extends TestCase {
public void testInputPath() throws Exception {
JobConf jobConf = new JobConf();
Path workingDir = jobConf.getWorkingDirectory();

View File

@ -26,6 +26,8 @@
import java.util.Iterator;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -34,11 +36,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.apache.hadoop.mapreduce.MRConfig;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
public class TestJavaSerialization {
public class TestJavaSerialization extends TestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp")).toURI()
@ -91,7 +90,7 @@ private void cleanAndCreateInput(FileSystem fs) throws IOException {
wr.write("b a\n");
wr.close();
}
@Test
public void testMapReduceJob() throws Exception {
JobConf conf = new JobConf(TestJavaSerialization.class);
@ -150,7 +149,6 @@ public void testMapReduceJob() throws Exception {
* coupled to Writable types, if so, the job will fail.
*
*/
@Test
public void testWriteToSequencefile() throws Exception {
JobConf conf = new JobConf(TestJavaSerialization.class);
conf.setJobName("JavaSerialization");

View File

@ -29,13 +29,8 @@
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestJobName extends ClusterMapReduceTestCase {
@Test
public void testComplexName() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));
@ -70,7 +65,6 @@ public void testComplexName() throws Exception {
reader.close();
}
@Test
public void testComplexNameWithRegex() throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(),
"text.txt"));

View File

@ -21,6 +21,8 @@
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -30,15 +32,11 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
/**
* A JUnit test to test Job System Directory with Mini-DFS.
*/
public class TestJobSysDirWithDFS {
public class TestJobSysDirWithDFS extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestJobSysDirWithDFS.class.getName());
@ -117,7 +115,7 @@ static void runWordCount(MiniMRCluster mr, JobConf jobConf, String sysDir)
// between Job Client & Job Tracker
assertTrue(result.job.isSuccessful());
}
@Test
public void testWithDFS() throws IOException {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;

View File

@ -20,6 +20,7 @@
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
@ -27,11 +28,8 @@
import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestKeyValueTextInputFormat {
public class TestKeyValueTextInputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestKeyValueTextInputFormat.class.getName());
@ -49,7 +47,7 @@ public class TestKeyValueTextInputFormat {
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestKeyValueTextInputFormat");
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf();
Path file = new Path(workDir, "test.txt");
@ -136,7 +134,7 @@ private LineReader makeStream(String str) throws IOException {
(str.getBytes("UTF-8")),
defaultConf);
}
@Test
public void testUTF8() throws Exception {
LineReader in = null;
@ -155,7 +153,7 @@ public void testUTF8() throws Exception {
}
}
}
@Test
public void testNewLines() throws Exception {
LineReader in = null;
try {
@ -221,8 +219,7 @@ private static List<Text> readSplit(KeyValueTextInputFormat format,
/**
* Test using the gzip codec for reading
*/
@Test
public void testGzip() throws IOException {
public static void testGzip() throws IOException {
JobConf job = new JobConf();
CompressionCodec gzip = new GzipCodec();
ReflectionUtils.setConf(gzip, job);

View File

@ -35,15 +35,14 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.lib.LazyOutputFormat;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
/**
* A JUnit test to test the Map-Reduce framework's feature to create part
* files only if there is an explicit output.collect. This helps in preventing
* 0 byte files
*/
public class TestLazyOutput {
public class TestLazyOutput extends TestCase {
private static final int NUM_HADOOP_SLAVES = 3;
private static final int NUM_MAPS_PER_NODE = 2;
private static final Path INPUT = new Path("/testlazy/input");
@ -133,7 +132,7 @@ public void createInput(FileSystem fs, int numMappers) throws Exception {
}
}
@Test
public void testLazyOutput() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;

View File

@ -17,6 +17,16 @@
*/
package org.apache.hadoop.mapred;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -26,21 +36,9 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.Text;
import org.junit.After;
import org.junit.Test;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@SuppressWarnings("deprecation")
public class TestMRCJCFileInputFormat {
public class TestMRCJCFileInputFormat extends TestCase {
Configuration conf = new Configuration();
MiniDFSCluster dfs = null;
@ -52,7 +50,6 @@ private MiniDFSCluster newDFSCluster(JobConf conf) throws Exception {
.build();
}
@Test
public void testLocality() throws Exception {
JobConf job = new JobConf(conf);
dfs = newDFSCluster(job);
@ -112,7 +109,6 @@ private void createInputs(FileSystem fs, Path inDir, String fileName)
DFSTestUtil.waitReplication(fs, path, replication);
}
@Test
public void testNumInputs() throws Exception {
JobConf job = new JobConf(conf);
dfs = newDFSCluster(job);
@ -161,7 +157,6 @@ public RecordReader<Text, Text> getRecordReader(InputSplit split,
}
}
@Test
public void testMultiLevelInput() throws Exception {
JobConf job = new JobConf(conf);
@ -200,7 +195,6 @@ public void testMultiLevelInput() throws Exception {
}
@SuppressWarnings("rawtypes")
@Test
public void testLastInputSplitAtSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1024l * 1024 * 1024,
128l * 1024 * 1024);
@ -214,7 +208,6 @@ public void testLastInputSplitAtSplitBoundary() throws Exception {
}
@SuppressWarnings("rawtypes")
@Test
public void testLastInputSplitExceedingSplitBoundary() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(1027l * 1024 * 1024,
128l * 1024 * 1024);
@ -228,7 +221,6 @@ public void testLastInputSplitExceedingSplitBoundary() throws Exception {
}
@SuppressWarnings("rawtypes")
@Test
public void testLastInputSplitSingleSplit() throws Exception {
FileInputFormat fif = new FileInputFormatForTest(100l * 1024 * 1024,
128l * 1024 * 1024);
@ -313,7 +305,7 @@ static void writeFile(Configuration conf, Path name,
DFSTestUtil.waitReplication(fileSys, name, replication);
}
@After
@Override
public void tearDown() throws Exception {
if (dfs != null) {
dfs.shutdown();

View File

@ -18,25 +18,18 @@
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobStatus;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.io.*;
import java.net.URI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
public class TestMRCJCFileOutputCommitter {
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.JobContextImpl;
import org.apache.hadoop.mapred.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.JobStatus;
public class TestMRCJCFileOutputCommitter extends TestCase {
private static Path outDir = new Path(
System.getProperty("test.build.data", "/tmp"), "output");
@ -74,7 +67,6 @@ private void setConfForFileOutputCommitter(JobConf job) {
}
@SuppressWarnings("unchecked")
@Test
public void testCommitter() throws Exception {
JobConf job = new JobConf();
setConfForFileOutputCommitter(job);
@ -116,7 +108,6 @@ public void testCommitter() throws Exception {
FileUtil.fullyDelete(new File(outDir.toString()));
}
@Test
public void testAbort() throws IOException {
JobConf job = new JobConf();
setConfForFileOutputCommitter(job);
@ -170,7 +161,6 @@ public boolean delete(Path p, boolean recursive) throws IOException {
}
}
@Test
public void testFailAbort() throws IOException {
JobConf job = new JobConf();
job.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");

View File

@ -22,6 +22,8 @@
import java.io.IOException;
import java.util.List;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
@ -38,8 +40,6 @@
import org.apache.hadoop.mapreduce.split.JobSplitWriter;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* Validates map phase progress.
@ -55,7 +55,7 @@
* once mapTask.run() is finished. Sort phase progress in map task is not
* validated here.
*/
public class TestMapProgress {
public class TestMapProgress extends TestCase {
public static final Log LOG = LogFactory.getLog(TestMapProgress.class);
private static String TEST_ROOT_DIR;
static {
@ -234,8 +234,7 @@ private void createInputFile(Path rootDir) throws IOException {
/**
* Validates map phase progress after each record is processed by map task
* using custom task reporter.
*/
@Test
*/
public void testMapProgress() throws Exception {
JobConf job = new JobConf();
fs = FileSystem.getLocal(job);

View File

@ -44,8 +44,8 @@
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.mapred.Task.TaskReporter;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
@SuppressWarnings(value={"unchecked", "deprecation"})
/**
@ -56,7 +56,7 @@
* framework's merge on the reduce side will merge the partitions created to
* generate the final output which is sorted on the key.
*/
public class TestMerge {
public class TestMerge extends TestCase {
private static final int NUM_HADOOP_DATA_NODES = 2;
// Number of input files is same as the number of mappers.
private static final int NUM_MAPPERS = 10;
@ -69,7 +69,6 @@ public class TestMerge {
// Where output goes.
private static final Path OUTPUT = new Path("/testplugin/output");
@Test
public void testMerge() throws Exception {
MiniDFSCluster dfsCluster = null;
MiniMRClientCluster mrCluster = null;

View File

@ -18,16 +18,14 @@
package org.apache.hadoop.mapred;
import org.junit.Test;
import java.io.IOException;
import junit.framework.TestCase;
/**
* A Unit-test to test bringup and shutdown of Mini Map-Reduce Cluster.
*/
public class TestMiniMRBringup {
public class TestMiniMRBringup extends TestCase {
@Test
public void testBringUp() throws IOException {
MiniMRCluster mr = null;
try {

View File

@ -18,23 +18,20 @@
package org.apache.hadoop.mapred;
import java.io.*;
import junit.framework.TestCase;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.MRCaching.TestResult;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
/**
* A JUnit test to test caching with DFS
*
*/
@Ignore
public class TestMiniMRDFSCaching {
public class TestMiniMRDFSCaching extends TestCase {
@Test
public void testWithDFS() throws IOException {
MiniMRCluster mr = null;
MiniDFSCluster dfs = null;
@ -73,4 +70,9 @@ public void testWithDFS() throws IOException {
}
}
}
public static void main(String[] argv) throws Exception {
TestMiniMRDFSCaching td = new TestMiniMRDFSCaching();
td.testWithDFS();
}
}

View File

@ -21,17 +21,17 @@
import java.util.BitSet;
import java.util.HashMap;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestMultiFileInputFormat {
public class TestMultiFileInputFormat extends TestCase{
private static JobConf job = new JobConf();
@ -79,8 +79,7 @@ private Path initFiles(FileSystem fs, int numFiles, int numBytes) throws IOExcep
FileInputFormat.setInputPaths(job, multiFileDir);
return multiFileDir;
}
@Test
public void testFormat() throws IOException {
LOG.info("Test started");
LOG.info("Max split count = " + MAX_SPLIT_COUNT);
@ -123,8 +122,7 @@ public void testFormat() throws IOException {
}
LOG.info("Test Finished");
}
@Test
public void testFormatWithLessPathsThanSplits() throws Exception {
MultiFileInputFormat<Text,Text> format = new DummyMultiFileInputFormat();
FileSystem fs = FileSystem.getLocal(job);
@ -137,4 +135,9 @@ public void testFormatWithLessPathsThanSplits() throws Exception {
initFiles(fs, 2, 500);
assertEquals(2, format.getSplits(job, 4).length);
}
public static void main(String[] args) throws Exception{
TestMultiFileInputFormat test = new TestMultiFileInputFormat();
test.testFormat();
}
}

View File

@ -27,19 +27,16 @@
import java.io.OutputStream;
import java.util.Arrays;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
*
* test MultiFileSplit class
*/
public class TestMultiFileSplit {
public class TestMultiFileSplit extends TestCase{
@Test
public void testReadWrite() throws Exception {
MultiFileSplit split = new MultiFileSplit(new JobConf(), new Path[] {new Path("/test/path/1"), new Path("/test/path/2")}, new long[] {100,200});
@ -73,7 +70,6 @@ public void testReadWrite() throws Exception {
* test method getLocations
* @throws IOException
*/
@Test
public void testgetLocations() throws IOException{
JobConf job= new JobConf();

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -28,17 +32,12 @@
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Ignore;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
/**
* This test checks whether the task caches are created and used properly.
*/
@Ignore
public class TestMultipleLevelCaching {
public class TestMultipleLevelCaching extends TestCase {
private static final int MAX_LEVEL = 5;
final Path inDir = new Path("/cachetesting");
final Path outputPath = new Path("/output");
@ -72,7 +71,6 @@ private static String getRack(int id, int level) {
return rack.toString();
}
@Test
public void testMultiLevelCaching() throws Exception {
for (int i = 1 ; i <= MAX_LEVEL; ++i) {
testCachingAtLevel(i);

View File

@ -18,19 +18,15 @@
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.MultipleTextOutputFormat;
import org.junit.Test;
import java.io.*;
import junit.framework.TestCase;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.apache.hadoop.mapred.lib.*;
public class TestMultipleTextOutputFormat {
public class TestMultipleTextOutputFormat extends TestCase {
private static JobConf defaultConf = new JobConf();
private static FileSystem localFs = null;
@ -87,8 +83,7 @@ private static void test2(JobConf job) throws IOException {
writeData(rw);
rw.close(null);
}
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
@ -150,4 +145,8 @@ public void testFormat() throws Exception {
//System.out.printf("File_2 output: %s\n", output);
assertEquals(output, expectedOutput.toString());
}
public static void main(String[] args) throws Exception {
new TestMultipleTextOutputFormat().testFormat();
}
}

View File

@ -19,18 +19,17 @@
package org.apache.hadoop.mapred;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestReduceFetch extends TestReduceFetchFromPartialMem {
static {
setSuite(TestReduceFetch.class);
}
/**
* Verify that all segments are read from disk
* @throws Exception might be thrown
*/
@Test
public void testReduceFromDisk() throws Exception {
final int MAP_TASKS = 8;
JobConf job = mrCluster.createJobConf();
@ -54,7 +53,6 @@ public void testReduceFromDisk() throws Exception {
* Verify that no segment hits disk.
* @throws Exception might be thrown
*/
@Test
public void testReduceFromMem() throws Exception {
final int MAP_TASKS = 3;
JobConf job = mrCluster.createJobConf();

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.mapred;
import junit.extensions.TestSetup;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -26,9 +30,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.mapreduce.MRConfig;
import java.io.DataInput;
import java.io.DataOutput;
@ -37,27 +39,34 @@
import java.util.Formatter;
import java.util.Iterator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestReduceFetchFromPartialMem {
public class TestReduceFetchFromPartialMem extends TestCase {
protected static MiniMRCluster mrCluster = null;
protected static MiniDFSCluster dfsCluster = null;
protected static TestSuite mySuite;
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
mrCluster = new MiniMRCluster(2,
dfsCluster.getFileSystem().getUri().toString(), 1);
protected static void setSuite(Class<? extends TestCase> klass) {
mySuite = new TestSuite(klass);
}
@After
public void tearDown() throws Exception {
if (dfsCluster != null) { dfsCluster.shutdown(); }
if (mrCluster != null) { mrCluster.shutdown(); }
static {
setSuite(TestReduceFetchFromPartialMem.class);
}
public static Test suite() {
TestSetup setup = new TestSetup(mySuite) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
mrCluster = new MiniMRCluster(2,
dfsCluster.getFileSystem().getUri().toString(), 1);
}
protected void tearDown() throws Exception {
if (dfsCluster != null) { dfsCluster.shutdown(); }
if (mrCluster != null) { mrCluster.shutdown(); }
}
};
return setup;
}
private static final String tagfmt = "%04d";
@ -69,7 +78,6 @@ private static int getValLen(int id, int nMaps) {
}
/** Verify that at least one segment does not hit disk */
@Test
public void testReduceFromPartialMem() throws Exception {
final int MAP_TASKS = 7;
JobConf job = mrCluster.createJobConf();

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
@ -26,17 +30,11 @@
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.util.Progressable;
import org.junit.Test;
import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* This test exercises the ValueIterator.
*/
public class TestReduceTask {
public class TestReduceTask extends TestCase {
static class NullProgress implements Progressable {
public void progress() { }
@ -121,10 +119,9 @@ public void runValueIterator(Path tmpDir, Pair[] vals,
}
assertEquals(vals.length, i);
// make sure we have progress equal to 1.0
assertEquals(1.0f, rawItr.getProgress().get(),0.0000);
assertEquals(1.0f, rawItr.getProgress().get());
}
@Test
public void testValueIterator() throws Exception {
Path tmpDir = new Path("build/test/test.reduce.task");
Configuration conf = new Configuration();
@ -132,8 +129,7 @@ public void testValueIterator() throws Exception {
runValueIterator(tmpDir, testCase, conf, null);
}
}
@Test
public void testValueIteratorWithCompression() throws Exception {
Path tmpDir = new Path("build/test/test.reduce.task.compression");
Configuration conf = new Configuration();

View File

@ -18,26 +18,19 @@
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import java.io.IOException;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
public class TestSequenceFileAsBinaryInputFormat {
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestSequenceFileAsBinaryInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int RECORDS = 10000;
@Test
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);

View File

@ -18,35 +18,24 @@
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.junit.Test;
import java.io.IOException;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
public class TestSequenceFileAsBinaryOutputFormat {
import junit.framework.TestCase;
import org.apache.commons.logging.*;
public class TestSequenceFileAsBinaryOutputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSequenceFileAsBinaryOutputFormat.class.getName());
private static final int RECORDS = 10000;
// A random task attempt id for testing.
private static final String attempt = "attempt_200707121733_0001_m_000000_0";
@Test
public void testBinary() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
@ -140,8 +129,7 @@ public void testBinary() throws IOException {
assertEquals("Some records not found", RECORDS, count);
}
@Test
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);
@ -175,7 +163,6 @@ public void testSequenceOutputClassDefaultsToMapRedOutputClass()
job));
}
@Test
public void testcheckOutputSpecsForbidRecordCompression() throws IOException {
JobConf job = new JobConf();
FileSystem fs = FileSystem.getLocal(job);

View File

@ -18,29 +18,22 @@
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import java.util.BitSet;
import java.util.Random;
import org.apache.commons.logging.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileAsTextInputFormat {
public class TestSequenceFileAsTextInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
@ -119,4 +112,8 @@ public void testFormat() throws Exception {
}
}
public static void main(String[] args) throws Exception {
new TestSequenceFileAsTextInputFormat().testFormat();
}
}

View File

@ -18,21 +18,17 @@
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.*;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileInputFilter {
public class TestSequenceFileInputFilter extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static final int MAX_LENGTH = 15000;
@ -101,8 +97,7 @@ private int countRecords(int numSplits) throws IOException {
}
return count;
}
@Test
public void testRegexFilter() throws Exception {
// set the filter class
LOG.info("Testing Regex Filter with patter: \\A10*");
@ -126,7 +121,6 @@ public void testRegexFilter() throws Exception {
fs.delete(inDir, true);
}
@Test
public void testPercentFilter() throws Exception {
LOG.info("Testing Percent Filter with frequency: 1000");
// set the filter class
@ -153,8 +147,7 @@ public void testPercentFilter() throws Exception {
// clean up
fs.delete(inDir, true);
}
@Test
public void testMD5Filter() throws Exception {
// set the filter class
LOG.info("Testing MD5 Filter with frequency: 1000");
@ -175,4 +168,9 @@ public void testMD5Filter() throws Exception {
// clean up
fs.delete(inDir, true);
}
public static void main(String[] args) throws Exception {
TestSequenceFileInputFilter filter = new TestSequenceFileInputFilter();
filter.testRegexFilter();
}
}

View File

@ -18,28 +18,22 @@
package org.apache.hadoop.mapred;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.junit.Test;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import java.util.BitSet;
import java.util.Random;
import org.apache.commons.logging.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
public class TestSequenceFileInputFormat {
public class TestSequenceFileInputFormat extends TestCase {
private static final Log LOG = FileInputFormat.LOG;
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf(conf);
FileSystem fs = FileSystem.getLocal(conf);
@ -116,4 +110,8 @@ public void testFormat() throws Exception {
}
}
public static void main(String[] args) throws Exception {
new TestSequenceFileInputFormat().testFormat();
}
}

View File

@ -17,20 +17,18 @@
*/
package org.apache.hadoop.mapred;
import java.util.Iterator;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.SortedRanges.Range;
import org.junit.Test;
import java.util.Iterator;
import static org.junit.Assert.assertEquals;
public class TestSortedRanges {
private static final Log LOG =
public class TestSortedRanges extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSortedRanges.class);
@Test
public void testAdd() {
SortedRanges sr = new SortedRanges();
sr.add(new Range(2,9));
@ -68,8 +66,7 @@ public void testAdd() {
assertEquals(77, it.next().longValue());
}
@Test
public void testRemove() {
SortedRanges sr = new SortedRanges();
sr.add(new Range(2,19));

View File

@ -18,6 +18,12 @@
package org.apache.hadoop.mapred;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.URI;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -28,20 +34,14 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.Progressable;
import org.junit.Test;
import java.io.DataOutputStream;
import java.io.IOException;
import java.net.URI;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* A JUnit test to test that jobs' output filenames are not HTML-encoded (cf HADOOP-1795).
*/
public class TestSpecialCharactersInOutputPath {
public class TestSpecialCharactersInOutputPath extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestSpecialCharactersInOutputPath.class.getName());
@ -96,8 +96,7 @@ public static boolean launchJob(URI fileSys,
LOG.info("job is complete: " + runningJob.isSuccessful());
return (runningJob.isSuccessful());
}
@Test
public void testJobWithDFS() throws IOException {
String namenode = null;
MiniDFSCluster dfs = null;

View File

@ -19,18 +19,14 @@
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.mapred.StatisticsCollector.TimeWindow;
import org.apache.hadoop.mapred.StatisticsCollector.Stat;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
public class TestStatisticsCollector {
public class TestStatisticsCollector extends TestCase{
@SuppressWarnings("rawtypes")
@Test
public void testMovingWindow() throws Exception {
StatisticsCollector collector = new StatisticsCollector(1);
TimeWindow window = new TimeWindow("test", 6, 2);

View File

@ -17,15 +17,6 @@
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
@ -35,10 +26,18 @@
import java.io.OutputStreamWriter;
import java.io.Writer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
public class TestUserDefinedCounters {
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
public class TestUserDefinedCounters extends TestCase {
private static String TEST_ROOT_DIR =
new File(System.getProperty("test.build.data", "/tmp")).toURI()
.toString().replace(' ', '+')
@ -76,7 +75,6 @@ private void cleanAndCreateInput(FileSystem fs) throws IOException {
wr.close();
}
@Test
public void testMapReduceJob() throws Exception {
JobConf conf = new JobConf(TestUserDefinedCounters.class);

View File

@ -18,6 +18,12 @@
package org.apache.hadoop.mapred;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
@ -25,15 +31,8 @@
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.util.GenericsUtil;
import org.junit.Test;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import static org.junit.Assert.assertTrue;
public class TestWritableJobConf {
public class TestWritableJobConf extends TestCase {
private static final Configuration CONF = new Configuration();
@ -79,17 +78,15 @@ private void assertEquals(Configuration conf1, Configuration conf2) {
}
}
assertTrue(map1.equals(map2));
assertEquals(map1, map2);
}
@Test
public void testEmptyConfiguration() throws Exception {
JobConf conf = new JobConf();
Configuration deser = serDeser(conf);
assertEquals(conf, deser);
}
@Test
public void testNonEmptyConfiguration() throws Exception {
JobConf conf = new JobConf();
conf.set("a", "A");
@ -98,7 +95,6 @@ public void testNonEmptyConfiguration() throws Exception {
assertEquals(conf, deser);
}
@Test
public void testConfigurationWithDefaults() throws Exception {
JobConf conf = new JobConf(false);
conf.set("a", "A");

View File

@ -18,10 +18,6 @@
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
@ -42,6 +38,8 @@
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -115,7 +113,7 @@
* Test YarnRunner and make sure the client side plugin works
* fine
*/
public class TestYARNRunner {
public class TestYARNRunner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestYARNRunner.class);
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);

View File

@ -22,6 +22,11 @@
import java.io.IOException;
import java.util.Iterator;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
@ -49,27 +54,23 @@
import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
public class TestDatamerge {
public class TestDatamerge extends TestCase {
private static MiniDFSCluster cluster = null;
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
private static SequenceFile.Writer[] createWriters(Path testdir,
@ -245,22 +246,18 @@ private static void joinAs(String jointype,
base.getFileSystem(job).delete(base, true);
}
@Test
public void testSimpleInnerJoin() throws Exception {
joinAs("inner", InnerJoinChecker.class);
}
@Test
public void testSimpleOuterJoin() throws Exception {
joinAs("outer", OuterJoinChecker.class);
}
@Test
public void testSimpleOverride() throws Exception {
joinAs("override", OverrideChecker.class);
}
@Test
public void testNestedJoin() throws Exception {
// outer(inner(S1,...,Sn),outer(S1,...Sn))
final int SOURCES = 3;
@ -353,7 +350,6 @@ public void testNestedJoin() throws Exception {
}
@Test
public void testEmptyJoin() throws Exception {
JobConf job = new JobConf();
Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));

View File

@ -26,6 +26,8 @@
import java.util.Arrays;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.FloatWritable;
@ -34,12 +36,8 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestTupleWritable {
public class TestTupleWritable extends TestCase {
private TupleWritable makeTuple(Writable[] writs) {
Writable[] sub1 = { writs[1], writs[2] };
@ -102,7 +100,6 @@ private int verifIter(Writable[] writs, TupleWritable t, int i) {
return i;
}
@Test
public void testIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -124,7 +121,6 @@ public void testIterable() throws Exception {
verifIter(writs, t, 0);
}
@Test
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -143,7 +139,6 @@ public void testNestedIterable() throws Exception {
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
@Test
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -167,7 +162,6 @@ public void testWritable() throws Exception {
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
@Test
public void testWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
@ -186,8 +180,7 @@ public void testWideWritable() throws Exception {
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
@Test
public void testWideWritable2() throws Exception {
Writable[] manyWrits = makeRandomWritables(71);
@ -209,7 +202,6 @@ public void testWideWritable2() throws Exception {
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
@Test
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
@ -228,7 +220,7 @@ public void testSparseWideWritable() throws Exception {
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
@Test
public void testWideTuple() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
@ -248,7 +240,7 @@ public void testWideTuple() throws Exception {
}
}
}
@Test
public void testWideTuple2() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
@ -272,7 +264,6 @@ public void testWideTuple2() throws Exception {
/**
* Tests that we can write more than 64 values.
*/
@Test
public void testWideTupleBoundary() throws Exception {
Text emptyText = new Text("Should not be set written");
Writable[] values = new Writable[65];
@ -296,7 +287,6 @@ public void testWideTupleBoundary() throws Exception {
/**
* Tests compatibility with pre-0.21 versions of TupleWritable
*/
@Test
public void testPreVersion21Compatibility() throws Exception {
Writable[] manyWrits = makeRandomWritables(64);
PreVersion21TupleWritable oldTuple = new PreVersion21TupleWritable(manyWrits);
@ -314,7 +304,7 @@ public void testPreVersion21Compatibility() throws Exception {
assertTrue("Tuple writable is unable to read pre-0.21 versions of TupleWritable", oldTuple.isCompatible(dTuple));
assertEquals("All tuple data has not been read from the stream",-1,in.read());
}
@Test
public void testPreVersion21CompatibilityEmptyTuple() throws Exception {
Writable[] manyWrits = new Writable[0];
PreVersion21TupleWritable oldTuple = new PreVersion21TupleWritable(manyWrits);

View File

@ -21,6 +21,8 @@
import java.io.DataOutput;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
@ -33,16 +35,13 @@
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestWrappedRecordReaderClassloader {
public class TestWrappedRecordReaderClassloader extends TestCase {
/**
* Tests the class loader set by {@link JobConf#setClassLoader(ClassLoader)}
* is inherited by any {@link WrappedRecordReader}s created by
* {@link CompositeRecordReader}
*/
@Test
public void testClassLoader() throws Exception {
JobConf job = new JobConf();
Fake_ClassLoader classLoader = new Fake_ClassLoader();

View File

@ -20,6 +20,8 @@
import java.io.DataOutputStream;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -30,12 +32,9 @@
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
public class TestDelegatingInputFormat {
@Test
public class TestDelegatingInputFormat extends TestCase {
public void testSplitting() throws Exception {
JobConf conf = new JobConf();
MiniDFSCluster dfs = null;

View File

@ -20,14 +20,13 @@
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestLineInputFormat {
public class TestLineInputFormat extends TestCase {
private static int MAX_LENGTH = 200;
private static JobConf defaultConf = new JobConf();
@ -44,7 +43,7 @@ public class TestLineInputFormat {
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestLineInputFormat");
@Test
public void testFormat() throws Exception {
JobConf job = new JobConf();
Path file = new Path(workDir, "test.txt");

View File

@ -36,6 +36,7 @@
* @see TestDelegatingInputFormat
*/
public class TestMultipleInputs {
@Test
public void testAddInputPathWithFormat() {
final JobConf conf = new JobConf();
@ -48,6 +49,7 @@ public void testAddInputPathWithFormat() {
assertEquals(KeyValueTextInputFormat.class, inputs.get(new Path("/bar"))
.getClass());
}
@Test
public void testAddInputPathWithMapper() {
final JobConf conf = new JobConf();

View File

@ -22,14 +22,13 @@
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.lib.*;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
import java.io.*;
import java.util.*;
import java.text.NumberFormat;
public class TestAggregates {
public class TestAggregates extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
@ -37,7 +36,7 @@ public class TestAggregates {
idFormat.setGroupingUsed(false);
}
@Test
public void testAggregates() throws Exception {
launch();
}

View File

@ -19,13 +19,13 @@
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestConstructQuery {
public class TestConstructQuery extends TestCase {
private String[] fieldNames = new String[] { "id", "name", "value" };
private String[] nullFieldNames = new String[] { null, null, null };
private String expected = "INSERT INTO hadoop_output (id,name,value) VALUES (?,?,?);";
@ -33,15 +33,15 @@ public class TestConstructQuery {
private DBOutputFormat<DBWritable, NullWritable> format
= new DBOutputFormat<DBWritable, NullWritable>();
@Test
public void testConstructQuery() {
public void testConstructQuery() {
String actual = format.constructQuery("hadoop_output", fieldNames);
assertEquals(expected, actual);
actual = format.constructQuery("hadoop_output", nullFieldNames);
assertEquals(nullExpected, actual);
}
@Test
public void testSetOutput() throws IOException {
JobConf job = new JobConf();
DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);

View File

@ -44,13 +44,10 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import junit.framework.TestCase;
@Ignore
public class TestPipes {
public class TestPipes extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestPipes.class.getName());
@ -69,7 +66,7 @@ static void cleanup(FileSystem fs, Path p) throws IOException {
fs.delete(p, true);
assertFalse("output not cleaned up", fs.exists(p));
}
@Test
public void testPipes() throws IOException {
if (System.getProperty("compile.c++") == null) {
LOG.info("compile.c++ is not defined, so skipping TestPipes");

View File

@ -17,42 +17,36 @@
*/
package org.apache.hadoop.mapreduce;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.mapred.LocalJobRunner;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
/**
* Stress tests for the LocalJobRunner
*/
public class TestLocalRunner {
public class TestLocalRunner extends TestCase {
private static final Log LOG = LogFactory.getLog(TestLocalRunner.class);

View File

@ -17,23 +17,6 @@
*/
package org.apache.hadoop.mapreduce;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Test;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
@ -48,11 +31,23 @@
import java.io.PrintStream;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.RemoteIterator;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterMapReduceTestCase;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.tools.CLI;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
test CLI class. CLI class implemented the Tool interface.
@ -108,7 +103,7 @@ public void checkOutputSpecs(JobContext job) throws IOException {
throw new IOException();
}
}
@Test
public void testJobSubmissionSpecsAndFiles() throws Exception {
Configuration conf = createJobConf();
Job job = MapReduceTestUtil.createJob(conf, getInputDir(), getOutputDir(),
@ -132,7 +127,7 @@ public void testJobSubmissionSpecsAndFiles() throws Exception {
/**
* main test method
*/
@Test
public void testJobClient() throws Exception {
Configuration conf = createJobConf();
Job job = runJob(conf);
@ -185,7 +180,8 @@ private void testfailTask(Configuration conf) throws Exception {
runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out);
String answer = new String(out.toByteArray(), "UTF-8");
assertTrue(answer.contains("Killed task " + taid + " by failing it"));
Assert
.assertTrue(answer.contains("Killed task " + taid + " by failing it"));
}
/**
@ -203,7 +199,7 @@ private void testKillTask(Configuration conf) throws Exception {
runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out);
String answer = new String(out.toByteArray(), "UTF-8");
assertTrue(answer.contains("Killed task " + taid));
Assert.assertTrue(answer.contains("Killed task " + taid));
}
/**
@ -690,7 +686,6 @@ public void testChangingJobPriority(String jobId, Configuration conf)
* Test -list option displays job name.
* The name is capped to 20 characters for display.
*/
@Test
public void testJobName() throws Exception {
Configuration conf = createJobConf();
CLI jc = createJobClient();

View File

@ -25,6 +25,8 @@
import java.util.Arrays;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@ -40,16 +42,13 @@
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* A JUnit test to test the Map-Reduce framework's feature to create part
* files only if there is an explicit output.collect. This helps in preventing
* 0 byte files
*/
public class TestMapReduceLazyOutput {
public class TestMapReduceLazyOutput extends TestCase {
private static final int NUM_HADOOP_SLAVES = 3;
private static final int NUM_MAPS_PER_NODE = 2;
private static final Path INPUT = new Path("/testlazy/input");
@ -123,7 +122,7 @@ public void createInput(FileSystem fs, int numMappers) throws Exception {
}
}
@Test
public void testLazyOutput() throws Exception {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;

View File

@ -27,6 +27,8 @@
import java.util.ArrayList;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -41,15 +43,12 @@
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
/**
* A JUnit test to test the Map-Reduce framework's support for the
* "mark-reset" functionality in Reduce Values Iterator
*/
public class TestValueIterReset {
public class TestValueIterReset extends TestCase {
private static final int NUM_MAPS = 1;
private static final int NUM_TESTS = 4;
private static final int NUM_VALUES = 40;
@ -519,7 +518,6 @@ public void createInput() throws Exception {
}
}
@Test
public void testValueIterReset() {
try {
Configuration conf = new Configuration();

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.mapreduce;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -27,6 +26,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
@ -44,7 +44,8 @@
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.Test;
public class TestYarnClientProtocolProvider {
public class TestYarnClientProtocolProvider extends TestCase {
private static final RecordFactory recordFactory = RecordFactoryProvider.
getRecordFactory(null);

View File

@ -18,24 +18,22 @@
package org.apache.hadoop.mapreduce.lib.aggregate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.junit.Test;
import junit.framework.TestCase;
import java.io.*;
import java.text.NumberFormat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMapReduceAggregates {
public class TestMapReduceAggregates extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
@ -43,7 +41,7 @@ public class TestMapReduceAggregates {
idFormat.setGroupingUsed(false);
}
@Test
public void testAggregates() throws Exception {
launch();
}
@ -124,4 +122,11 @@ public static void launch() throws Exception {
fs.delete(OUTPUT_DIR, true);
fs.delete(INPUT_DIR, true);
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}

View File

@ -19,15 +19,14 @@
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class TestDBOutputFormat {
public class TestDBOutputFormat extends TestCase {
private String[] fieldNames = new String[] { "id", "name", "value" };
private String[] nullFieldNames = new String[] { null, null, null };
private String expected = "INSERT INTO hadoop_output " +
@ -36,17 +35,15 @@ public class TestDBOutputFormat {
private DBOutputFormat<DBWritable, NullWritable> format
= new DBOutputFormat<DBWritable, NullWritable>();
@Test
public void testConstructQuery() {
public void testConstructQuery() {
String actual = format.constructQuery("hadoop_output", fieldNames);
assertEquals(expected, actual);
actual = format.constructQuery("hadoop_output", nullFieldNames);
assertEquals(nullExpected, actual);
}
@Test
public void testSetOutput() throws IOException {
Job job = Job.getInstance(new Configuration());
DBOutputFormat.setOutput(job, "hadoop_output", fieldNames);

View File

@ -17,15 +17,15 @@
*/
package org.apache.hadoop.mapreduce.lib.db;
import org.junit.Test;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import junit.framework.TestCase;
public class TestIntegerSplitter {
public class TestIntegerSplitter extends TestCase {
private long [] toLongArray(List<Long> in) {
long [] out = new long[in.size()];
for (int i = 0; i < in.size(); i++) {
@ -70,14 +70,12 @@ public void assertLongArrayEquals(long [] expected, long [] actual) {
}
}
@Test
public void testEvenSplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(10, 0, 100);
long [] expected = { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
assertLongArrayEquals(expected, toLongArray(splits));
}
@Test
public void testOddSplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(10, 0, 95);
long [] expected = { 0, 9, 18, 27, 36, 45, 54, 63, 72, 81, 90, 95 };
@ -85,14 +83,12 @@ public void testOddSplits() throws SQLException {
}
@Test
public void testSingletonSplit() throws SQLException {
List<Long> splits = new IntegerSplitter().split(1, 5, 5);
long [] expected = { 5, 5 };
assertLongArrayEquals(expected, toLongArray(splits));
}
@Test
public void testSingletonSplit2() throws SQLException {
// Same test, but overly-high numSplits
List<Long> splits = new IntegerSplitter().split(5, 5, 5);
@ -100,7 +96,6 @@ public void testSingletonSplit2() throws SQLException {
assertLongArrayEquals(expected, toLongArray(splits));
}
@Test
public void testTooManySplits() throws SQLException {
List<Long> splits = new IntegerSplitter().split(5, 3, 5);
long [] expected = { 3, 4, 5 };

View File

@ -17,16 +17,15 @@
*/
package org.apache.hadoop.mapreduce.lib.db;
import org.junit.Test;
import java.io.IOException;
import java.math.BigDecimal;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import junit.framework.TestCase;
public class TestTextSplitter {
public class TestTextSplitter extends TestCase {
public String formatArray(Object [] ar) {
StringBuilder sb = new StringBuilder();
@ -63,56 +62,48 @@ public void assertArrayEquals(Object [] expected, Object [] actual) {
}
}
@Test
public void testStringConvertEmpty() {
TextSplitter splitter = new TextSplitter();
BigDecimal emptyBigDec = splitter.stringToBigDecimal("");
assertEquals(BigDecimal.ZERO, emptyBigDec);
}
@Test
public void testBigDecConvertEmpty() {
TextSplitter splitter = new TextSplitter();
String emptyStr = splitter.bigDecimalToString(BigDecimal.ZERO);
assertEquals("", emptyStr);
}
@Test
public void testConvertA() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("A"));
assertEquals("A", out);
}
@Test
public void testConvertZ() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("Z"));
assertEquals("Z", out);
}
@Test
public void testConvertThreeChars() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("abc"));
assertEquals("abc", out);
}
@Test
public void testConvertStr() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("big str"));
assertEquals("big str", out);
}
@Test
public void testConvertChomped() {
TextSplitter splitter = new TextSplitter();
String out = splitter.bigDecimalToString(splitter.stringToBigDecimal("AVeryLongStringIndeed"));
assertEquals("AVeryLon", out);
}
@Test
public void testAlphabetSplit() throws SQLException {
// This should give us 25 splits, one per letter.
TextSplitter splitter = new TextSplitter();
@ -122,7 +113,6 @@ public void testAlphabetSplit() throws SQLException {
assertArrayEquals(expected, splits.toArray(new String [0]));
}
@Test
public void testCommonPrefix() throws SQLException {
// Splits between 'Hand' and 'Hardy'
TextSplitter splitter = new TextSplitter();

View File

@ -18,19 +18,15 @@
package org.apache.hadoop.mapreduce.lib.fieldsel;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.junit.Test;
import junit.framework.TestCase;
import java.text.NumberFormat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMRFieldSelection {
public class TestMRFieldSelection extends TestCase {
private static NumberFormat idFormat = NumberFormat.getInstance();
static {
@ -38,7 +34,6 @@ public class TestMRFieldSelection {
idFormat.setGroupingUsed(false);
}
@Test
public void testFieldSelection() throws Exception {
launch();
}
@ -119,4 +114,11 @@ public static void constructInputOutputData(StringBuffer inputData,
System.out.println("ExpectedData:");
System.out.println(expectedOutput.toString());
}
/**
* Launches all the tasks in order.
*/
public static void main(String[] argv) throws Exception {
launch();
}
}

View File

@ -18,12 +18,11 @@
package org.apache.hadoop.mapreduce.lib.input;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
@ -32,18 +31,12 @@
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
import java.io.IOException;
import java.util.Random;
import junit.framework.TestCase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestMRSequenceFileAsBinaryInputFormat {
public class TestMRSequenceFileAsBinaryInputFormat extends TestCase {
private static final int RECORDS = 10000;
@Test
public void testBinary() throws IOException, InterruptedException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());

View File

@ -18,13 +18,11 @@
package org.apache.hadoop.mapreduce.lib.input;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
@ -33,19 +31,12 @@
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
import org.apache.hadoop.conf.*;
import java.util.BitSet;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class TestMRSequenceFileAsTextInputFormat {
public class TestMRSequenceFileAsTextInputFormat extends TestCase {
private static int MAX_LENGTH = 10000;
private static Configuration conf = new Configuration();
@Test
public void testFormat() throws Exception {
Job job = Job.getInstance(conf);
FileSystem fs = FileSystem.getLocal(conf);
@ -121,4 +112,8 @@ public void testFormat() throws Exception {
}
}
public static void main(String[] args) throws Exception {
new TestMRSequenceFileAsTextInputFormat().testFormat();
}
}

View File

@ -18,14 +18,14 @@
package org.apache.hadoop.mapreduce.lib.input;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
@ -34,15 +34,10 @@
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
import org.apache.hadoop.conf.*;
import java.io.IOException;
import java.util.Random;
import static org.junit.Assert.assertEquals;
public class TestMRSequenceFileInputFilter {
private static final Log LOG =
public class TestMRSequenceFileInputFilter extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestMRSequenceFileInputFilter.class.getName());
private static final int MAX_LENGTH = 15000;
@ -118,8 +113,7 @@ private int countRecords(int numSplits)
}
return count;
}
@Test
public void testRegexFilter() throws Exception {
// set the filter class
LOG.info("Testing Regex Filter with patter: \\A10*");
@ -144,7 +138,6 @@ public void testRegexFilter() throws Exception {
fs.delete(inDir, true);
}
@Test
public void testPercentFilter() throws Exception {
LOG.info("Testing Percent Filter with frequency: 1000");
// set the filter class
@ -172,8 +165,7 @@ public void testPercentFilter() throws Exception {
// clean up
fs.delete(inDir, true);
}
@Test
public void testMD5Filter() throws Exception {
// set the filter class
LOG.info("Testing MD5 Filter with frequency: 1000");
@ -195,4 +187,9 @@ public void testMD5Filter() throws Exception {
// clean up
fs.delete(inDir, true);
}
public static void main(String[] args) throws Exception {
TestMRSequenceFileInputFilter filter = new TestMRSequenceFileInputFilter();
filter.testRegexFilter();
}
}

View File

@ -18,28 +18,17 @@
package org.apache.hadoop.mapreduce.lib.input;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class TestNLineInputFormat {
public class TestNLineInputFormat extends TestCase {
private static int MAX_LENGTH = 200;
private static Configuration conf = new Configuration();
@ -56,8 +45,7 @@ public class TestNLineInputFormat {
private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "."), "data"),
"TestNLineInputFormat");
@Test
public void testFormat() throws Exception {
Job job = Job.getInstance(conf);
Path file = new Path(workDir, "test.txt");
@ -128,4 +116,8 @@ void checkFormat(Job job, int expectedN, int lastN)
}
}
}
public static void main(String[] args) throws Exception {
new TestNLineInputFormat().testFormat();
}
}

View File

@ -19,6 +19,11 @@
import java.io.IOException;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
@ -32,31 +37,23 @@
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestJoinDatamerge {
public class TestJoinDatamerge extends TestCase {
private static MiniDFSCluster cluster = null;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinDatamerge.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
private static SequenceFile.Writer[] createWriters(Path testdir,
@ -114,7 +111,7 @@ private static abstract class SimpleCheckerMapBase<V extends Writable>
extends Mapper<IntWritable, V, IntWritable, IntWritable>{
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void setup(Context context) {
srcs = context.getConfiguration().getInt("testdatamerge.sources", 0);
assertTrue("Invalid src count: " + srcs, srcs > 0);
@ -126,7 +123,7 @@ private static abstract class SimpleCheckerReduceBase
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void setup(Context context) {
srcs = context.getConfiguration().getInt("testdatamerge.sources", 0);
assertTrue("Invalid src count: " + srcs, srcs > 0);
@ -275,12 +272,10 @@ private static void joinAs(String jointype,
base.getFileSystem(conf).delete(base, true);
}
@Test
public void testSimpleInnerJoin() throws Exception {
joinAs("inner", InnerJoinMapChecker.class, InnerJoinReduceChecker.class);
}
@Test
public void testSimpleOuterJoin() throws Exception {
joinAs("outer", OuterJoinMapChecker.class, OuterJoinReduceChecker.class);
}
@ -327,13 +322,11 @@ private static int countProduct(IntWritable key, Path[] src,
}
return product;
}
@Test
public void testSimpleOverride() throws Exception {
joinAs("override", OverrideMapChecker.class, OverrideReduceChecker.class);
}
@Test
public void testNestedJoin() throws Exception {
// outer(inner(S1,...,Sn),outer(S1,...Sn))
final int SOURCES = 3;
@ -429,7 +422,6 @@ public void testNestedJoin() throws Exception {
}
@Test
public void testEmptyJoin() throws Exception {
Configuration conf = new Configuration();
Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));

View File

@ -20,6 +20,11 @@
import java.io.IOException;
import java.util.List;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.extensions.TestSetup;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -31,14 +36,8 @@
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestJoinProperties {
public class TestJoinProperties extends TestCase {
private static MiniDFSCluster cluster = null;
final static int SOURCES = 3;
@ -47,19 +46,21 @@ public class TestJoinProperties {
static Path[] src;
static Path base;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf);
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf);
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
// Sources from 0 to srcs-2 have IntWritable key and IntWritable value
@ -232,7 +233,6 @@ private void testExpr4(Configuration conf) throws Exception {
}
// outer(outer(A, B), C) == outer(A,outer(B, C)) == outer(A, B, C)
@Test
public void testOuterAssociativity() throws Exception {
Configuration conf = new Configuration();
testExpr1(conf, "outer", TestType.OUTER_ASSOCIATIVITY, 33);
@ -241,7 +241,6 @@ public void testOuterAssociativity() throws Exception {
}
// inner(inner(A, B), C) == inner(A,inner(B, C)) == inner(A, B, C)
@Test
public void testInnerAssociativity() throws Exception {
Configuration conf = new Configuration();
testExpr1(conf, "inner", TestType.INNER_ASSOCIATIVITY, 2);
@ -250,7 +249,6 @@ public void testInnerAssociativity() throws Exception {
}
// override(inner(A, B), A) == A
@Test
public void testIdentity() throws Exception {
Configuration conf = new Configuration();
testExpr4(conf);

View File

@ -24,6 +24,8 @@
import java.util.Arrays;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.FloatWritable;
@ -31,13 +33,8 @@
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestJoinTupleWritable {
public class TestJoinTupleWritable extends TestCase {
private TupleWritable makeTuple(Writable[] writs) {
Writable[] sub1 = { writs[1], writs[2] };
@ -100,7 +97,6 @@ private int verifIter(Writable[] writs, TupleWritable t, int i) {
return i;
}
@Test
public void testIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -122,7 +118,6 @@ public void testIterable() throws Exception {
verifIter(writs, t, 0);
}
@Test
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -141,7 +136,6 @@ public void testNestedIterable() throws Exception {
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
@Test
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = {
@ -165,7 +159,6 @@ public void testWritable() throws Exception {
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
@Test
public void testWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
@ -185,8 +178,7 @@ public void testWideWritable() throws Exception {
assertEquals("All tuple data has not been read from the stream",
-1, in.read());
}
@Test
public void testWideWritable2() throws Exception {
Writable[] manyWrits = makeRandomWritables(71);
@ -209,7 +201,6 @@ public void testWideWritable2() throws Exception {
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
@Test
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
@ -229,8 +220,7 @@ public void testSparseWideWritable() throws Exception {
assertEquals("All tuple data has not been read from the stream",
-1, in.read());
}
@Test
public void testWideTuple() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
@ -251,8 +241,7 @@ public void testWideTuple() throws Exception {
}
}
}
@Test
public void testWideTuple2() throws Exception {
Text emptyText = new Text("Should be empty");
Writable[] values = new Writable[64];
@ -277,7 +266,6 @@ public void testWideTuple2() throws Exception {
/**
* Tests that we can write more than 64 values.
*/
@Test
public void testWideTupleBoundary() throws Exception {
Text emptyText = new Text("Should not be set written");
Writable[] values = new Writable[65];

View File

@ -17,32 +17,23 @@
*/
package org.apache.hadoop.mapreduce.lib.join;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.MapReduceTestUtil.Fake_RR;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class TestWrappedRRClassloader {
public class TestWrappedRRClassloader extends TestCase {
/**
* Tests the class loader set by
* {@link Configuration#setClassLoader(ClassLoader)}
* is inherited by any {@link WrappedRecordReader}s created by
* {@link CompositeRecordReader}
*/
@Test
public void testClassLoader() throws Exception {
Configuration conf = new Configuration();
Fake_ClassLoader classLoader = new Fake_ClassLoader();

View File

@ -18,17 +18,12 @@
package org.apache.hadoop.mapreduce.lib.output;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapreduce.InputFormat;
@ -43,22 +38,16 @@
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.junit.Test;
import java.io.IOException;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestMRSequenceFileAsBinaryOutputFormat {
public class TestMRSequenceFileAsBinaryOutputFormat extends TestCase {
private static final Log LOG =
LogFactory.getLog(TestMRSequenceFileAsBinaryOutputFormat.class.getName());
private static final int RECORDS = 10000;
@Test
public void testBinary() throws IOException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
@ -155,8 +144,7 @@ public void testBinary() throws IOException, InterruptedException {
assertEquals("Some records not found", RECORDS, count);
}
@Test
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
public void testSequenceOutputClassDefaultsToMapRedOutputClass()
throws IOException {
Job job = Job.getInstance();
// Setting Random class to test getSequenceFileOutput{Key,Value}Class
@ -184,8 +172,7 @@ public void testSequenceOutputClassDefaultsToMapRedOutputClass()
SequenceFileAsBinaryOutputFormat.getSequenceFileOutputValueClass(job));
}
@Test
public void testcheckOutputSpecsForbidRecordCompression()
public void testcheckOutputSpecsForbidRecordCompression()
throws IOException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());

View File

@ -22,14 +22,11 @@
import org.apache.hadoop.io.BinaryComparable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
public class TestBinaryPartitioner {
public class TestBinaryPartitioner extends TestCase {
@Test
public void testDefaultOffsets() {
Configuration conf = new Configuration();
BinaryPartitioner<?> partitioner =
@ -53,8 +50,7 @@ public void testDefaultOffsets() {
partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
}
@Test
public void testCustomOffsets() {
Configuration conf = new Configuration();
BinaryComparable key1 = new BytesWritable(new byte[] { 1, 2, 3, 4, 5 });
@ -79,8 +75,7 @@ public void testCustomOffsets() {
partition2 = partitioner.getPartition(key2, null, 10);
assertEquals(partition1, partition2);
}
@Test
public void testLowerBound() {
Configuration conf = new Configuration();
BinaryPartitioner.setLeftOffset(conf, 0);
@ -92,8 +87,7 @@ public void testLowerBound() {
int partition2 = partitioner.getPartition(key2, null, 10);
assertTrue(partition1 != partition2);
}
@Test
public void testUpperBound() {
Configuration conf = new Configuration();
BinaryPartitioner.setRightOffset(conf, 4);

View File

@ -19,17 +19,14 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import junit.framework.TestCase;
public class TestKeyFieldHelper {
public class TestKeyFieldHelper extends TestCase {
private static final Log LOG = LogFactory.getLog(TestKeyFieldHelper.class);
/**
* Test is key-field-helper's parse option.
*/
@Test
public void testparseOption() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
@ -215,7 +212,6 @@ public void testparseOption() throws Exception {
/**
* Test is key-field-helper's getWordLengths.
*/
@Test
public void testGetWordLengths() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");
@ -274,7 +270,6 @@ public void testGetWordLengths() throws Exception {
/**
* Test is key-field-helper's getStartOffset/getEndOffset.
*/
@Test
public void testgetStartEndOffset() throws Exception {
KeyFieldHelper helper = new KeyFieldHelper();
helper.setKeyFieldSeparator("\t");

View File

@ -19,16 +19,14 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import junit.framework.TestCase;
public class TestMRKeyFieldBasedPartitioner {
public class TestMRKeyFieldBasedPartitioner extends TestCase {
/**
* Test is key-field-based partitioned works with empty key.
*/
@Test
public void testEmptyKey() throws Exception {
int numReducers = 10;
KeyFieldBasedPartitioner<Text, Text> kfbp =

View File

@ -23,6 +23,8 @@
import java.util.Arrays;
import java.util.Comparator;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
@ -39,11 +41,8 @@
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.WritableSerialization;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestTotalOrderPartitioner {
public class TestTotalOrderPartitioner extends TestCase {
private static final Text[] splitStrings = new Text[] {
// -inf // 0
@ -141,7 +140,6 @@ private static <T> Path writePartitionFile(
return p;
}
@Test
public void testTotalOrderWithCustomSerialization() throws Exception {
TotalOrderPartitioner<String, NullWritable> partitioner =
new TotalOrderPartitioner<String, NullWritable>();
@ -167,7 +165,6 @@ public void testTotalOrderWithCustomSerialization() throws Exception {
}
}
@Test
public void testTotalOrderMemCmp() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
@ -187,7 +184,6 @@ public void testTotalOrderMemCmp() throws Exception {
}
}
@Test
public void testTotalOrderBinarySearch() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();
@ -220,7 +216,6 @@ public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
}
}
@Test
public void testTotalOrderCustomComparator() throws Exception {
TotalOrderPartitioner<Text,NullWritable> partitioner =
new TotalOrderPartitioner<Text,NullWritable>();

View File

@ -20,6 +20,8 @@
import java.io.File;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -28,27 +30,20 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* A test for MRAsyncDiskService.
*/
public class TestMRAsyncDiskService {
public class TestMRAsyncDiskService extends TestCase {
public static final Log LOG = LogFactory.getLog(TestMRAsyncDiskService.class);
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString();
@Before
public void setUp() {
@Override
protected void setUp() {
FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.mapreduce.v2;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@ -28,25 +29,22 @@
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.net.InetAddress;
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.InetAddress;
import java.security.PrivilegedExceptionAction;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestMiniMRProxyUser {
public class TestMiniMRProxyUser extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
@Before
public void setUp() throws Exception {
protected void setUp() throws Exception {
super.setUp();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "/tmp");
}
@ -93,14 +91,15 @@ protected JobConf getJobConf() {
return mrCluster.createJobConf();
}
@After
public void tearDown() throws Exception {
@Override
protected void tearDown() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
super.tearDown();
}
private void mrRun() throws Exception {
@ -126,13 +125,11 @@ private void mrRun() throws Exception {
assertTrue(runJob.isComplete());
assertTrue(runJob.isSuccessful());
}
@Test
public void __testCurrentUser() throws Exception {
mrRun();
}
@Test
public void testValidProxyUser() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createProxyUser("u1", UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@ -145,7 +142,6 @@ public Void run() throws Exception {
});
}
@Test
public void ___testInvalidProxyUser() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createProxyUser("u2", UserGroupInformation.getLoginUser());
ugi.doAs(new PrivilegedExceptionAction<Void>() {

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.mapreduce.v2;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
@ -27,22 +28,17 @@
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import static org.junit.Assert.assertNull;
public class TestNonExistentJob {
public class TestNonExistentJob extends TestCase {
private MiniDFSCluster dfsCluster = null;
private MiniMRCluster mrCluster = null;
@Before
public void setUp() throws Exception {
protected void setUp() throws Exception {
super.setUp();
if (System.getProperty("hadoop.log.dir") == null) {
System.setProperty("hadoop.log.dir", "/tmp");
}
@ -82,17 +78,17 @@ protected JobConf getJobConf() {
return mrCluster.createJobConf();
}
@After
public void tearDown() throws Exception {
@Override
protected void tearDown() throws Exception {
if (mrCluster != null) {
mrCluster.shutdown();
}
if (dfsCluster != null) {
dfsCluster.shutdown();
}
super.tearDown();
}
@Test
public void testGetInvalidJob() throws Exception {
RunningJob runJob = new JobClient(getJobConf()).getJob(JobID.forName("job_0_0"));
assertNull(runJob);

View File

@ -42,11 +42,6 @@
import org.apache.hadoop.mapred.SkipBadRecords;
import org.apache.hadoop.mapred.Utils;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestStreamingBadRecords extends ClusterMapReduceTestCase
{
@ -73,8 +68,7 @@ public TestStreamingBadRecords() throws IOException
utilTest.redirectIfAntJunit();
}
@Before
public void setUp() throws Exception {
protected void setUp() throws Exception {
Properties props = new Properties();
props.setProperty(JTConfig.JT_RETIREJOBS, "false");
props.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "false");
@ -248,7 +242,6 @@ public void testNarrowDown() throws Exception {
}
*/
@Test
public void testNoOp() {
// Added to avoid warnings when running this disabled test
}