HDFS-3268. FileContext API mishandles token service and incompatible with HA. Contributed by Daryn Sharp.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326747 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-04-16 19:08:29 +00:00
parent 551468385c
commit 258da66cc7
5 changed files with 35 additions and 8 deletions

View File

@ -516,6 +516,9 @@ Release 2.0.0 - UNRELEASED
HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
HDFS-3268. FileContext API mishandles token service and incompatible with
HA (Daryn Sharp via todd)
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -20,7 +20,6 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
@ -391,11 +390,15 @@ public Path getLinkTarget(Path p) throws IOException {
return new Path(dfs.getLinkTarget(getUriPath(p)));
}
@Override
public String getCanonicalServiceName() {
return dfs.getCanonicalServiceName();
}
@Override //AbstractFileSystem
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
Token<DelegationTokenIdentifier> result = dfs
.getDelegationToken(renewer == null ? null : new Text(renewer));
result.setService(new Text(this.getCanonicalServiceName()));
List<Token<?>> tokenList = new ArrayList<Token<?>>();
tokenList.add(result);
return tokenList;

View File

@ -637,6 +637,16 @@ public FsServerDefaults getServerDefaults() throws IOException {
return serverDefaults;
}
/**
* Get a canonical token service name for this client's tokens. Null should
* be returned if the client is not using tokens.
* @return the token service for the client
*/
@InterfaceAudience.LimitedPrivate( { "HDFS" })
public String getCanonicalServiceName() {
return (dtService != null) ? dtService.toString() : null;
}
/**
* @see ClientProtocol#getDelegationToken(Text)
*/

View File

@ -848,12 +848,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
*/
@Override
public String getCanonicalServiceName() {
URI uri = getUri();
if (HAUtil.isLogicalUri(getConf(), uri)) {
return HAUtil.buildTokenServiceForLogicalUri(uri).toString();
} else {
return super.getCanonicalServiceName();
}
return dfs.getCanonicalServiceName();
}
/**

View File

@ -30,6 +30,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -223,6 +224,21 @@ public void testDFSGetCanonicalServiceName() throws Exception {
token.cancel(dfs.getConf());
}
@Test
public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf = dfs.getConf();
URI haUri = HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf);
String haService = HAUtil.buildTokenServiceForLogicalUri(haUri).toString();
assertEquals(haService, afs.getCanonicalServiceName());
Token<?> token = afs.getDelegationTokens(
UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
assertEquals(haService, token.getService().toString());
// make sure the logical uri is handled correctly
token.renew(conf);
token.cancel(conf);
}
enum TokenTestAction {
RENEW, CANCEL;
}