MAPREDUCE-2797. Update mapreduce tests and RAID for HDFS-2239.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1156215 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-08-10 15:10:20 +00:00
parent 5d5b1c6c10
commit 2d653c994c
5 changed files with 11 additions and 7 deletions

View File

@ -381,6 +381,8 @@ Trunk (unreleased changes)
MAPREDUCE-2760. mapreduce.jobtracker.split.metainfo.maxsize typoed
in mapred-default.xml. (todd via eli)
MAPREDUCE-2797. Update mapreduce tests and RAID for HDFS-2239. (szetszwo)
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES

View File

@ -543,7 +543,7 @@ String getSourceFile(String parity, String prefix) throws IOException {
}
// remove the prefix
String src = parity.substring(prefix.length());
if (NameNodeRaidUtil.getFileInfo(namesystem.dir, src, true) == null) {
if (NameNodeRaidUtil.getFileInfo(namesystem, src, true) == null) {
return null;
}
return src;
@ -575,7 +575,7 @@ String getParityFile(String src) throws IOException {
private String getParityFile(String parityPrefix, String src)
throws IOException {
String parity = parityPrefix + src;
if (NameNodeRaidUtil.getFileInfo(namesystem.dir, parity, true) == null) {
if (NameNodeRaidUtil.getFileInfo(namesystem, parity, true) == null) {
return null;
}
return parity;

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
@ -147,7 +148,7 @@ public static void setUp() throws Exception {
dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
jConf);
dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
@ -157,7 +158,7 @@ public static void setUp() throws Exception {
createTokenFileJson();
verifySecretKeysInJSONFile();
dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
@ -303,7 +304,7 @@ public void testGetTokensForHftpFS() throws IOException, URISyntaxException {
HftpFileSystem hfs = mock(HftpFileSystem.class);
DelegationTokenSecretManager dtSecretManager =
dfsCluster.getNamesystem().getDelegationTokenSecretManager();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
String renewer = "renewer";
jConf.set(JTConfig.JT_USER_NAME,renewer);
DelegationTokenIdentifier dtId =

View File

@ -40,6 +40,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
@ -185,8 +186,7 @@ public static void setUp() throws Exception {
createTokenFileJson();
verifySecretKeysInJSONFile();
dfsCluster.getNamesystem()
.getDelegationTokenSecretManager().startThreads();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");