MAPREDUCE-6292. Use org.junit package instead of junit.framework in TestCombineFileInputFormat. (aajisaka)

This commit is contained in:
Akira Ajisaka 2015-03-25 19:00:35 +09:00
parent b6dea9776b
commit c770df49b4
2 changed files with 25 additions and 14 deletions

View File

@ -389,6 +389,9 @@ Release 2.7.0 - UNRELEASED
MAPREDUCE-6265. Make ContainerLauncherImpl.INITIAL_POOL_SIZE configurable
to better control to launch/kill containers. (Zhihai Xu via ozawa)
MAPREDUCE-6292. Use org.junit package instead of junit.framework in
TestCombineFileInputFormat. (aajisaka)
OPTIMIZATIONS
MAPREDUCE-6169. MergeQueue should release reference to the current item

View File

@ -29,9 +29,6 @@
import java.util.concurrent.TimeoutException;
import java.util.zip.GZIPOutputStream;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -60,7 +57,11 @@
import com.google.common.collect.HashMultiset;
public class TestCombineFileInputFormat extends TestCase {
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestCombineFileInputFormat {
private static final String rack1[] = new String[] {
"/r1"
@ -221,6 +222,7 @@ public RecordReader<Text,Text> createRecordReader(InputSplit split,
}
}
@Test
public void testRecordReaderInit() throws InterruptedException, IOException {
// Test that we properly initialize the child recordreader when
// CombineFileInputFormat and CombineFileRecordReader are used.
@ -258,6 +260,7 @@ public void testRecordReaderInit() throws InterruptedException, IOException {
rr.getCurrentKey().toString());
}
@Test
public void testReinit() throws Exception {
// Test that a split containing multiple files works correctly,
// with the child RecordReader getting its initialize() method
@ -296,6 +299,7 @@ public void testReinit() throws Exception {
assertFalse(rr.nextKeyValue());
}
@Test
public void testSplitPlacement() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
@ -725,6 +729,7 @@ private static void writeDataAndSetReplication(FileSystem fileSys, Path name,
DFSTestUtil.waitReplication(fileSys, name, replication);
}
@Test
public void testNodeDistribution() throws IOException, InterruptedException {
DummyInputFormat inFormat = new DummyInputFormat();
int numBlocks = 60;
@ -774,20 +779,21 @@ public void testNodeDistribution() throws IOException, InterruptedException {
maxSplitSize, minSizeNode, minSizeRack, splits);
int expectedSplitCount = (int) (totLength / maxSplitSize);
Assert.assertEquals(expectedSplitCount, splits.size());
assertEquals(expectedSplitCount, splits.size());
// Ensure 90+% of the splits have node local blocks.
// 100% locality may not always be achieved.
int numLocalSplits = 0;
for (InputSplit inputSplit : splits) {
Assert.assertEquals(maxSplitSize, inputSplit.getLength());
assertEquals(maxSplitSize, inputSplit.getLength());
if (inputSplit.getLocations().length == 1) {
numLocalSplits++;
}
}
Assert.assertTrue(numLocalSplits >= 0.9 * splits.size());
assertTrue(numLocalSplits >= 0.9 * splits.size());
}
@Test
public void testNodeInputSplit() throws IOException, InterruptedException {
// Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on
// both nodes. The grouping ensures that both nodes get splits instead of
@ -826,18 +832,19 @@ public void testNodeInputSplit() throws IOException, InterruptedException {
maxSize, minSizeNode, minSizeRack, splits);
int expectedSplitCount = (int)(totLength/maxSize);
Assert.assertEquals(expectedSplitCount, splits.size());
assertEquals(expectedSplitCount, splits.size());
HashMultiset<String> nodeSplits = HashMultiset.create();
for(int i=0; i<expectedSplitCount; ++i) {
InputSplit inSplit = splits.get(i);
Assert.assertEquals(maxSize, inSplit.getLength());
Assert.assertEquals(1, inSplit.getLocations().length);
assertEquals(maxSize, inSplit.getLength());
assertEquals(1, inSplit.getLocations().length);
nodeSplits.add(inSplit.getLocations()[0]);
}
Assert.assertEquals(3, nodeSplits.count(locations[0]));
Assert.assertEquals(3, nodeSplits.count(locations[1]));
assertEquals(3, nodeSplits.count(locations[0]));
assertEquals(3, nodeSplits.count(locations[1]));
}
@Test
public void testSplitPlacementForCompressedFiles() throws Exception {
MiniDFSCluster dfs = null;
FileSystem fileSys = null;
@ -1190,6 +1197,7 @@ public void testSplitPlacementForCompressedFiles() throws Exception {
/**
* Test that CFIF can handle missing blocks.
*/
@Test
public void testMissingBlocks() throws Exception {
String namenode = null;
MiniDFSCluster dfs = null;