MAPREDUCE-2839. Fixed TokenCache to get delegation tokens using both new and old apis. Contributed by Siddharth Seth.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1157420 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2cd4c6ad6d
commit
a464402cb8
@ -399,6 +399,9 @@ Trunk (unreleased changes)
|
|||||||
MAPREDUCE-2541. Fixed a race condition in IndexCache.removeMap. (Binglin
|
MAPREDUCE-2541. Fixed a race condition in IndexCache.removeMap. (Binglin
|
||||||
Chang via acmurthy)
|
Chang via acmurthy)
|
||||||
|
|
||||||
|
MAPREDUCE-2839. Fixed TokenCache to get delegation tokens using both new
|
||||||
|
and old apis. (Siddharth Seth via acmurthy)
|
||||||
|
|
||||||
|
|
||||||
Release 0.22.0 - Unreleased
|
Release 0.22.0 - Unreleased
|
||||||
|
|
||||||
|
@ -19,7 +19,9 @@
|
|||||||
package org.apache.hadoop.mapreduce.security;
|
package org.apache.hadoop.mapreduce.security;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
@ -28,10 +30,7 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.HftpFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.JobTracker;
|
import org.apache.hadoop.mapred.JobTracker;
|
||||||
@ -139,6 +138,16 @@ static void obtainTokensForNamenodesInternal(FileSystem fs,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
List<Token<?>> tokens = fs.getDelegationTokens(delegTokenRenewer);
|
||||||
|
if (tokens != null) {
|
||||||
|
for (Token<?> token : tokens) {
|
||||||
|
credentials.addToken(token.getService(), token);
|
||||||
|
LOG.info("Got dt for " + fs.getUri() + ";uri="+ fsName +
|
||||||
|
";t.service="+token.getService());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//Call getDelegationToken as well for now - for FS implementations
|
||||||
|
// which may not have implmented getDelegationTokens (hftp)
|
||||||
Token<?> token = fs.getDelegationToken(delegTokenRenewer);
|
Token<?> token = fs.getDelegationToken(delegTokenRenewer);
|
||||||
if (token != null) {
|
if (token != null) {
|
||||||
Text fsNameText = new Text(fsName);
|
Text fsNameText = new Text(fsName);
|
||||||
|
@ -30,7 +30,9 @@
|
|||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.crypto.KeyGenerator;
|
import javax.crypto.KeyGenerator;
|
||||||
@ -38,8 +40,10 @@
|
|||||||
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
import org.apache.commons.codec.binary.Base64;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HftpFileSystem;
|
import org.apache.hadoop.hdfs.HftpFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
@ -149,6 +153,7 @@ private void populateTokens(Job job) {
|
|||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUp() throws Exception {
|
public static void setUp() throws Exception {
|
||||||
|
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
|
conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
|
||||||
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
|
dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
|
||||||
@ -334,6 +339,14 @@ public Token<DelegationTokenIdentifier> answer(InvocationOnMock invocation)
|
|||||||
return t;
|
return t;
|
||||||
}}).when(hfs).getDelegationToken(renewer);
|
}}).when(hfs).getDelegationToken(renewer);
|
||||||
|
|
||||||
|
//when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t);
|
||||||
|
Mockito.doAnswer(new Answer<List<Token<DelegationTokenIdentifier>>>(){
|
||||||
|
@Override
|
||||||
|
public List<Token<DelegationTokenIdentifier>> answer(InvocationOnMock invocation)
|
||||||
|
throws Throwable {
|
||||||
|
return Collections.singletonList(t);
|
||||||
|
}}).when(hfs).getDelegationTokens(renewer);
|
||||||
|
|
||||||
//when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
|
//when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
|
||||||
Mockito.doAnswer(new Answer<String>(){
|
Mockito.doAnswer(new Answer<String>(){
|
||||||
@Override
|
@Override
|
||||||
@ -378,4 +391,40 @@ public void testGetJTPrincipal() throws IOException {
|
|||||||
assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName",
|
assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName",
|
||||||
serviceName + hostName + domainName, TokenCache.getJTPrincipal(conf));
|
serviceName + hostName + domainName, TokenCache.getJTPrincipal(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetTokensForViewFS() throws IOException, URISyntaxException {
|
||||||
|
Configuration conf = new Configuration(jConf);
|
||||||
|
FileSystem dfs = dfsCluster.getFileSystem();
|
||||||
|
String serviceName = dfs.getCanonicalServiceName();
|
||||||
|
|
||||||
|
Path p1 = new Path("/mount1");
|
||||||
|
Path p2 = new Path("/mount2");
|
||||||
|
p1 = dfs.makeQualified(p1);
|
||||||
|
p2 = dfs.makeQualified(p2);
|
||||||
|
|
||||||
|
conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
|
||||||
|
conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
|
||||||
|
Credentials credentials = new Credentials();
|
||||||
|
Path lp1 = new Path("viewfs:///dir1");
|
||||||
|
Path lp2 = new Path("viewfs:///dir2");
|
||||||
|
Path[] paths = new Path[2];
|
||||||
|
paths[0] = lp1;
|
||||||
|
paths[1] = lp2;
|
||||||
|
TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
|
||||||
|
|
||||||
|
Collection<Token<? extends TokenIdentifier>> tns =
|
||||||
|
credentials.getAllTokens();
|
||||||
|
assertEquals("number of tokens is not 1", 1, tns.size());
|
||||||
|
|
||||||
|
boolean found = false;
|
||||||
|
for (Token<? extends TokenIdentifier> tt : tns) {
|
||||||
|
System.out.println("token=" + tt);
|
||||||
|
if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
|
||||||
|
&& tt.getService().equals(new Text(serviceName))) {
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user