diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ce87883546..4764b9db94 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -79,11 +79,19 @@ Trunk (unreleased changes)
HADOOP-7899. Generate proto java files as part of the build. (tucu)
- HADOOP-7574. Improve FSShell -stat, add user/group elements (XieXianshan via harsh)
+ HADOOP-7574. Improve FSShell -stat, add user/group elements.
+ (XieXianshan via harsh)
- HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead (XieXianshan via harsh)
+ HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead.
+ (XieXianshan via harsh)
- HADOOP-7919. Remove the unused hadoop.logfile.* properties from the core-default.xml file. (harsh)
+ HADOOP-7919. Remove the unused hadoop.logfile.* properties from the
+ core-default.xml file. (harsh)
+
+ HADOOP-7808. Port HADOOP-7510 - Add configurable option to use original
+ hostname in token instead of IP to allow server IP change.
+ (Daryn Sharp via suresh)
+
BUGS
@@ -241,6 +249,9 @@ Release 0.23.1 - Unreleased
HADOOP-7948. Shell scripts created by hadoop-dist/pom.xml to build tar do not
properly propagate failure. (cim_michajlomatijkiw via tucu)
+ HADOOP-7949. Updated maxIdleTime default in the code to match
+ core-default.xml (eli)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 7c9b25c957..f0ca72b00e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -51,7 +51,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/** How often does RPC client send pings to RPC server */
public static final String IPC_PING_INTERVAL_KEY = "ipc.ping.interval";
/** Default value for IPC_PING_INTERVAL_KEY */
- public static final int IPC_PING_INTERVAL_DEFAULT = 60000;
+ public static final int IPC_PING_INTERVAL_DEFAULT = 60000; // 1 min
/** Enables pings from RPC client to the server */
public static final String IPC_CLIENT_PING_KEY = "ipc.client.ping";
/** Default value of IPC_CLIENT_PING_KEY */
@@ -114,5 +114,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
"security.refresh.user.mappings.protocol.acl";
+
+ public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
+ "hadoop.security.token.service.use_ip";
+ public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
+ true;
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 534046a9ab..7953411b57 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -165,7 +165,7 @@ public class CommonConfigurationKeysPublic {
public static final String IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY =
"ipc.client.connection.maxidletime";
/** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
- public static final int IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000;
+ public static final int IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
/** See core-default.xml */
public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
"ipc.client.connect.max.retries";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 4fe9d77573..64f7c68a6d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -186,6 +187,15 @@ public void initialize(URI name, Configuration conf) throws IOException {
/** Returns a URI whose scheme and authority identify this FileSystem.*/
public abstract URI getUri();
+ /**
+ * Resolve the uri's hostname and add the default port if not in the uri
+ * @return URI
+ * @see NetUtils#getCanonicalUri(URI, int)
+ */
+ protected URI getCanonicalUri() {
+ return NetUtils.getCanonicalUri(getUri(), getDefaultPort());
+ }
+
/**
* Get the default port for this file system.
* @return the default port or 0 if there isn't one
@@ -195,8 +205,13 @@ protected int getDefaultPort() {
}
/**
- * Get a canonical name for this file system.
- * @return a URI string that uniquely identifies this file system
+ * Get a canonical service name for this file system. The token cache is
+ * the only user of this value, and uses it to lookup this filesystem's
+ * service tokens. The token cache will not attempt to acquire tokens if the
+ * service is null.
+ * @return a service string that uniquely identifies this file system, null
+ * if the filesystem does not implement tokens
+ * @see SecurityUtil#buildDTServiceName(URI, int)
*/
public String getCanonicalServiceName() {
return SecurityUtil.buildDTServiceName(getUri(), getDefaultPort());
@@ -487,32 +502,31 @@ protected FileSystem() {
*/
protected void checkPath(Path path) {
URI uri = path.toUri();
- if (uri.getScheme() == null) // fs is relative
- return;
- String thisScheme = this.getUri().getScheme();
String thatScheme = uri.getScheme();
- String thisAuthority = this.getUri().getAuthority();
- String thatAuthority = uri.getAuthority();
+ if (thatScheme == null) // fs is relative
+ return;
+ URI thisUri = getCanonicalUri();
+ String thisScheme = thisUri.getScheme();
//authority and scheme are not case sensitive
if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match
- if (thisAuthority == thatAuthority || // & authorities match
- (thisAuthority != null &&
- thisAuthority.equalsIgnoreCase(thatAuthority)))
- return;
-
+ String thisAuthority = thisUri.getAuthority();
+ String thatAuthority = uri.getAuthority();
if (thatAuthority == null && // path's authority is null
thisAuthority != null) { // fs has an authority
- URI defaultUri = getDefaultUri(getConf()); // & is the conf default
- if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) &&
- thisAuthority.equalsIgnoreCase(defaultUri.getAuthority()))
- return;
- try { // or the default fs's uri
- defaultUri = get(getConf()).getUri();
- } catch (IOException e) {
- throw new RuntimeException(e);
+ URI defaultUri = getDefaultUri(getConf());
+ if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) {
+ uri = defaultUri; // schemes match, so use this uri instead
+ } else {
+ uri = null; // can't determine auth of the path
}
- if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) &&
- thisAuthority.equalsIgnoreCase(defaultUri.getAuthority()))
+ }
+ if (uri != null) {
+ // canonicalize uri before comparing with this fs
+ uri = NetUtils.getCanonicalUri(uri, getDefaultPort());
+ thatAuthority = uri.getAuthority();
+ if (thisAuthority == thatAuthority || // authorities match
+ (thisAuthority != null &&
+ thisAuthority.equalsIgnoreCase(thatAuthority)))
return;
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index f59085c87a..cedf802228 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -77,6 +77,15 @@ public URI getUri() {
return fs.getUri();
}
+ /**
+ * Returns a qualified URI whose scheme and authority identify this
+ * FileSystem.
+ */
+ @Override
+ protected URI getCanonicalUri() {
+ return fs.getCanonicalUri();
+ }
+
/** Make sure that a path specifies a FileSystem. */
public Path makeQualified(Path path) {
return fs.makeQualified(path);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index c636493911..5fe97eac1b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -48,6 +48,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.RpcPayloadHeader.*;
import org.apache.hadoop.io.IOUtils;
@@ -88,8 +89,6 @@ public class Client {
private SocketFactory socketFactory; // how to create sockets
private int refCount = 1;
- final static String PING_INTERVAL_NAME = "ipc.ping.interval";
- final static int DEFAULT_PING_INTERVAL = 60000; // 1 min
final static int PING_CALL_ID = -1;
/**
@@ -99,7 +98,7 @@ public class Client {
* @param pingInterval the ping interval
*/
final public static void setPingInterval(Configuration conf, int pingInterval) {
- conf.setInt(PING_INTERVAL_NAME, pingInterval);
+ conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
}
/**
@@ -110,7 +109,8 @@ final public static void setPingInterval(Configuration conf, int pingInterval) {
* @return the ping interval
*/
final static int getPingInterval(Configuration conf) {
- return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL);
+ return conf.getInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
+ CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
}
/**
@@ -123,7 +123,7 @@ final static int getPingInterval(Configuration conf) {
* @return the timeout period in milliseconds. -1 if no timeout value is set
*/
final public static int getTimeout(Configuration conf) {
- if (!conf.getBoolean("ipc.client.ping", true)) {
+ if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true)) {
return getPingInterval(conf);
}
return -1;
@@ -425,7 +425,7 @@ private synchronized boolean setupSaslConnection(final InputStream in2,
*/
private synchronized boolean updateAddress() throws IOException {
// Do a fresh lookup with the old host name.
- InetSocketAddress currentAddr = new InetSocketAddress(
+ InetSocketAddress currentAddr = NetUtils.createSocketAddrForHost(
server.getHostName(), server.getPort());
if (!server.equals(currentAddr)) {
@@ -1347,15 +1347,19 @@ public static ConnectionId getConnectionId(InetSocketAddress addr,
Class> protocol, UserGroupInformation ticket, int rpcTimeout,
Configuration conf) throws IOException {
String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
- boolean doPing = conf.getBoolean("ipc.client.ping", true);
+ boolean doPing =
+ conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
return new ConnectionId(addr, protocol, ticket,
rpcTimeout, remotePrincipal,
- conf.getInt("ipc.client.connection.maxidletime", 10000), // 10s
- conf.getInt("ipc.client.connect.max.retries", 10),
conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
- conf.getBoolean("ipc.client.tcpnodelay", false),
+ conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
+ conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
+ conf.getBoolean(CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT),
doPing,
(doPing ? Client.getPingInterval(conf) : 0));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index b9220a6df5..8046786685 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -62,6 +62,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
@@ -378,7 +379,9 @@ private class Listener extends Thread {
//-tion (for idle connections) ran
private long cleanupInterval = 10000; //the minimum interval between
//two cleanup runs
- private int backlogLength = conf.getInt("ipc.server.listen.queue.size", 128);
+ private int backlogLength = conf.getInt(
+ CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_KEY,
+ CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT);
public Listener() throws IOException {
address = new InetSocketAddress(bindAddress, port);
@@ -1712,12 +1715,18 @@ protected Server(String bindAddress, int port,
} else {
this.readThreads = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
- CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
+ CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
}
this.callQueue = new LinkedBlockingQueue(maxQueueSize);
- this.maxIdleTime = 2*conf.getInt("ipc.client.connection.maxidletime", 1000);
- this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);
- this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000);
+ this.maxIdleTime = 2 * conf.getInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
+ this.maxConnectionsToNuke = conf.getInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
+ this.thresholdIdleConnections = conf.getInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
this.secretManager = (SecretManager) secretManager;
this.authorize =
conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
@@ -1729,7 +1738,9 @@ protected Server(String bindAddress, int port,
this.port = listener.getAddress().getPort();
this.rpcMetrics = RpcMetrics.create(this);
this.rpcDetailedMetrics = RpcDetailedMetrics.create(this.port);
- this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
+ this.tcpNoDelay = conf.getBoolean(
+ CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
+ CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_DEFAULT);
// Create the responder here
responder = new Responder();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java
index e68325b09d..166a846fdf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/spi/Util.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.net.NetUtils;
/**
* Static utility methods
@@ -56,14 +57,7 @@ public static List parse(String specs, int defaultPort) {
else {
String[] specStrings = specs.split("[ ,]+");
for (String specString : specStrings) {
- int colon = specString.indexOf(':');
- if (colon < 0 || colon == specString.length() - 1) {
- result.add(new InetSocketAddress(specString, defaultPort));
- } else {
- String hostname = specString.substring(0, colon);
- int port = Integer.parseInt(specString.substring(colon+1));
- result.add(new InetSocketAddress(hostname, port));
- }
+ result.add(NetUtils.createSocketAddr(specString, defaultPort));
}
}
return result;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java
index 2ddf3c5fb5..aa5d715078 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/Servers.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.net.NetUtils;
/**
* Helpers to handle server addresses
@@ -57,14 +58,7 @@ public static List parse(String specs, int defaultPort) {
else {
String[] specStrings = specs.split("[ ,]+");
for (String specString : specStrings) {
- int colon = specString.indexOf(':');
- if (colon < 0 || colon == specString.length() - 1) {
- result.add(new InetSocketAddress(specString, defaultPort));
- } else {
- String hostname = specString.substring(0, colon);
- int port = Integer.parseInt(specString.substring(colon+1));
- result.add(new InetSocketAddress(hostname, port));
- }
+ result.add(NetUtils.createSocketAddr(specString, defaultPort));
}
}
return result;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index ceaccb285b..752c0be8bc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -37,6 +37,7 @@
import java.util.Map.Entry;
import java.util.regex.Pattern;
import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
import javax.net.SocketFactory;
@@ -45,11 +46,17 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.ReflectionUtils;
+import com.google.common.annotations.VisibleForTesting;
+
+//this will need to be replaced someday when there is a suitable replacement
+import sun.net.dns.ResolverConfiguration;
+import sun.net.util.IPAddressUtil;
+
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetUtils {
@@ -65,6 +72,26 @@ public class NetUtils {
/** Base URL of the Hadoop Wiki: {@value} */
public static final String HADOOP_WIKI = "http://wiki.apache.org/hadoop/";
+ private static HostResolver hostResolver;
+
+ static {
+ // SecurityUtils requires a more secure host resolver if tokens are
+ // using hostnames
+ setUseQualifiedHostResolver(!SecurityUtil.getTokenServiceUseIp());
+ }
+
+ /**
+ * This method is intended for use only by SecurityUtils!
+ * @param flag where the qualified or standard host resolver is used
+ * to create socket addresses
+ */
+ @InterfaceAudience.Private
+ public static void setUseQualifiedHostResolver(boolean flag) {
+ hostResolver = flag
+ ? new QualifiedHostResolver()
+ : new StandardHostResolver();
+ }
+
/**
* Get the socket factory for the given class according to its
* configuration parameter
@@ -178,43 +205,256 @@ public static InetSocketAddress createSocketAddr(String target,
throw new IllegalArgumentException("Target address cannot be null." +
helpText);
}
- int colonIndex = target.indexOf(':');
- if (colonIndex < 0 && defaultPort == -1) {
- throw new RuntimeException("Not a host:port pair: " + target +
- helpText);
- }
- String hostname;
- int port = -1;
- if (!target.contains("/")) {
- if (colonIndex == -1) {
- hostname = target;
- } else {
- // must be the old style :
- hostname = target.substring(0, colonIndex);
- String portStr = target.substring(colonIndex + 1);
- try {
- port = Integer.parseInt(portStr);
- } catch (NumberFormatException nfe) {
- throw new IllegalArgumentException(
- "Can't parse port '" + portStr + "'"
- + helpText);
- }
- }
- } else {
- // a new uri
- URI addr = new Path(target).toUri();
- hostname = addr.getHost();
- port = addr.getPort();
+ boolean hasScheme = target.contains("://");
+ URI uri = null;
+ try {
+ uri = hasScheme ? URI.create(target) : URI.create("dummyscheme://"+target);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(
+ "Does not contain a valid host:port authority: " + target + helpText
+ );
}
+ String host = uri.getHost();
+ int port = uri.getPort();
if (port == -1) {
port = defaultPort;
}
-
- if (getStaticResolution(hostname) != null) {
- hostname = getStaticResolution(hostname);
+ String path = uri.getPath();
+
+ if ((host == null) || (port < 0) ||
+ (!hasScheme && path != null && !path.isEmpty()))
+ {
+ throw new IllegalArgumentException(
+ "Does not contain a valid host:port authority: " + target + helpText
+ );
}
- return new InetSocketAddress(hostname, port);
+ return createSocketAddrForHost(host, port);
+ }
+
+ /**
+ * Create a socket address with the given host and port. The hostname
+ * might be replaced with another host that was set via
+ * {@link #addStaticResolution(String, String)}. The value of
+ * hadoop.security.token.service.use_ip will determine whether the
+ * standard java host resolver is used, or if the fully qualified resolver
+ * is used.
+ * @param host the hostname or IP use to instantiate the object
+ * @param port the port number
+ * @return InetSocketAddress
+ */
+ public static InetSocketAddress createSocketAddrForHost(String host, int port) {
+ String staticHost = getStaticResolution(host);
+ String resolveHost = (staticHost != null) ? staticHost : host;
+
+ InetSocketAddress addr;
+ try {
+ InetAddress iaddr = hostResolver.getByName(resolveHost);
+ // if there is a static entry for the host, make the returned
+ // address look like the original given host
+ if (staticHost != null) {
+ iaddr = InetAddress.getByAddress(host, iaddr.getAddress());
+ }
+ addr = new InetSocketAddress(iaddr, port);
+ } catch (UnknownHostException e) {
+ addr = InetSocketAddress.createUnresolved(host, port);
+ }
+ return addr;
+ }
+
+ interface HostResolver {
+ InetAddress getByName(String host) throws UnknownHostException;
+ }
+
+ /**
+ * Uses standard java host resolution
+ */
+ static class StandardHostResolver implements HostResolver {
+ public InetAddress getByName(String host) throws UnknownHostException {
+ return InetAddress.getByName(host);
+ }
+ }
+
+ /**
+ * This an alternate resolver with important properties that the standard
+ * java resolver lacks:
+ * 1) The hostname is fully qualified. This avoids security issues if not
+ * all hosts in the cluster do not share the same search domains. It
+ * also prevents other hosts from performing unnecessary dns searches.
+ * In contrast, InetAddress simply returns the host as given.
+ * 2) The InetAddress is instantiated with an exact host and IP to prevent
+ * further unnecessary lookups. InetAddress may perform an unnecessary
+ * reverse lookup for an IP.
+ * 3) A call to getHostName() will always return the qualified hostname, or
+ * more importantly, the IP if instantiated with an IP. This avoids
+ * unnecessary dns timeouts if the host is not resolvable.
+ * 4) Point 3 also ensures that if the host is re-resolved, ex. during a
+ * connection re-attempt, that a reverse lookup to host and forward
+ * lookup to IP is not performed since the reverse/forward mappings may
+ * not always return the same IP. If the client initiated a connection
+ * with an IP, then that IP is all that should ever be contacted.
+ *
+ * NOTE: this resolver is only used if:
+ * hadoop.security.token.service.use_ip=false
+ */
+ protected static class QualifiedHostResolver implements HostResolver {
+ @SuppressWarnings("unchecked")
+ private List searchDomains =
+ ResolverConfiguration.open().searchlist();
+
+ /**
+ * Create an InetAddress with a fully qualified hostname of the given
+ * hostname. InetAddress does not qualify an incomplete hostname that
+ * is resolved via the domain search list.
+ * {@link InetAddress#getCanonicalHostName()} will fully qualify the
+ * hostname, but it always return the A record whereas the given hostname
+ * may be a CNAME.
+ *
+ * @param host a hostname or ip address
+ * @return InetAddress with the fully qualified hostname or ip
+ * @throws UnknownHostException if host does not exist
+ */
+ public InetAddress getByName(String host) throws UnknownHostException {
+ InetAddress addr = null;
+
+ if (IPAddressUtil.isIPv4LiteralAddress(host)) {
+ // use ipv4 address as-is
+ byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
+ addr = InetAddress.getByAddress(host, ip);
+ } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
+ // use ipv6 address as-is
+ byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
+ addr = InetAddress.getByAddress(host, ip);
+ } else if (host.endsWith(".")) {
+ // a rooted host ends with a dot, ex. "host."
+ // rooted hosts never use the search path, so only try an exact lookup
+ addr = getByExactName(host);
+ } else if (host.contains(".")) {
+ // the host contains a dot (domain), ex. "host.domain"
+ // try an exact host lookup, then fallback to search list
+ addr = getByExactName(host);
+ if (addr == null) {
+ addr = getByNameWithSearch(host);
+ }
+ } else {
+ // it's a simple host with no dots, ex. "host"
+ // try the search list, then fallback to exact host
+ InetAddress loopback = InetAddress.getByName(null);
+ if (host.equalsIgnoreCase(loopback.getHostName())) {
+ addr = InetAddress.getByAddress(host, loopback.getAddress());
+ } else {
+ addr = getByNameWithSearch(host);
+ if (addr == null) {
+ addr = getByExactName(host);
+ }
+ }
+ }
+ // unresolvable!
+ if (addr == null) {
+ throw new UnknownHostException(host);
+ }
+ return addr;
+ }
+
+ InetAddress getByExactName(String host) {
+ InetAddress addr = null;
+ // InetAddress will use the search list unless the host is rooted
+ // with a trailing dot. The trailing dot will disable any use of the
+ // search path in a lower level resolver. See RFC 1535.
+ String fqHost = host;
+ if (!fqHost.endsWith(".")) fqHost += ".";
+ try {
+ addr = getInetAddressByName(fqHost);
+ // can't leave the hostname as rooted or other parts of the system
+ // malfunction, ex. kerberos principals are lacking proper host
+ // equivalence for rooted/non-rooted hostnames
+ addr = InetAddress.getByAddress(host, addr.getAddress());
+ } catch (UnknownHostException e) {
+ // ignore, caller will throw if necessary
+ }
+ return addr;
+ }
+
+ InetAddress getByNameWithSearch(String host) {
+ InetAddress addr = null;
+ if (host.endsWith(".")) { // already qualified?
+ addr = getByExactName(host);
+ } else {
+ for (String domain : searchDomains) {
+ String dot = !domain.startsWith(".") ? "." : "";
+ addr = getByExactName(host + dot + domain);
+ if (addr != null) break;
+ }
+ }
+ return addr;
+ }
+
+ // implemented as a separate method to facilitate unit testing
+ InetAddress getInetAddressByName(String host) throws UnknownHostException {
+ return InetAddress.getByName(host);
+ }
+
+ void setSearchDomains(String ... domains) {
+ searchDomains = Arrays.asList(domains);
+ }
+ }
+
+ /**
+ * This is for testing only!
+ */
+ @VisibleForTesting
+ static void setHostResolver(HostResolver newResolver) {
+ hostResolver = newResolver;
+ }
+
+ /**
+ * Resolve the uri's hostname and add the default port if not in the uri
+ * @param uri to resolve
+ * @param defaultPort if none is given
+ * @return URI
+ */
+ public static URI getCanonicalUri(URI uri, int defaultPort) {
+ // skip if there is no authority, ie. "file" scheme or relative uri
+ String host = uri.getHost();
+ if (host == null) {
+ return uri;
+ }
+ String fqHost = canonicalizeHost(host);
+ int port = uri.getPort();
+ // short out if already canonical with a port
+ if (host.equals(fqHost) && port != -1) {
+ return uri;
+ }
+ // reconstruct the uri with the canonical host and port
+ try {
+ uri = new URI(uri.getScheme(), uri.getUserInfo(),
+ fqHost, (port == -1) ? defaultPort : port,
+ uri.getPath(), uri.getQuery(), uri.getFragment());
+ } catch (URISyntaxException e) {
+ throw new IllegalArgumentException(e);
+ }
+ return uri;
+ }
+
+ // cache the canonicalized hostnames; the cache currently isn't expired,
+ // but the canonicals will only change if the host's resolver configuration
+ // changes
+ private static final ConcurrentHashMap canonicalizedHostCache =
+ new ConcurrentHashMap();
+
+ private static String canonicalizeHost(String host) {
+ // check if the host has already been canonicalized
+ String fqHost = canonicalizedHostCache.get(host);
+ if (fqHost == null) {
+ try {
+ fqHost = hostResolver.getByName(host).getHostName();
+ // slight race condition, but won't hurt
+ canonicalizedHostCache.put(host, fqHost);
+ } catch (UnknownHostException e) {
+ fqHost = host;
+ }
+ }
+ return fqHost;
}
/**
@@ -279,8 +519,8 @@ public static List getAllStaticResolutions() {
*/
public static InetSocketAddress getConnectAddress(Server server) {
InetSocketAddress addr = server.getListenerAddress();
- if (addr.getAddress().getHostAddress().equals("0.0.0.0")) {
- addr = new InetSocketAddress("127.0.0.1", addr.getPort());
+ if (addr.getAddress().isAnyLocalAddress()) {
+ addr = createSocketAddrForHost("127.0.0.1", addr.getPort());
}
return addr;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index b200ca51f5..a5e8c5d2b5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
@@ -50,6 +51,35 @@ public class SecurityUtil {
public static final Log LOG = LogFactory.getLog(SecurityUtil.class);
public static final String HOSTNAME_PATTERN = "_HOST";
+ // controls whether buildTokenService will use an ip or host/ip as given
+ // by the user
+ private static boolean useIpForTokenService;
+
+ static {
+ boolean useIp = new Configuration().getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
+ CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
+ setTokenServiceUseIp(useIp);
+ }
+
+ /**
+ * For use only by tests and initialization
+ */
+ @InterfaceAudience.Private
+ static void setTokenServiceUseIp(boolean flag) {
+ useIpForTokenService = flag;
+ NetUtils.setUseQualifiedHostResolver(!flag);
+ }
+
+ /**
+ * Intended only for temporary use by NetUtils. Do not use.
+ * @return whether tokens use an IP address
+ */
+ @InterfaceAudience.Private
+ public static boolean getTokenServiceUseIp() {
+ return useIpForTokenService;
+ }
+
/**
* Find the original TGT within the current subject's credentials. Cross-realm
* TGT's of the form "krbtgt/TWO.COM@ONE.COM" may be present.
@@ -263,29 +293,20 @@ public static void login(final Configuration conf,
}
/**
- * create service name for Delegation token ip:port
- * @param uri
- * @param defPort
- * @return "ip:port"
+ * create the service name for a Delegation token
+ * @param uri of the service
+ * @param defPort is used if the uri lacks a port
+ * @return the token service, or null if no authority
+ * @see #buildTokenService(InetSocketAddress)
*/
public static String buildDTServiceName(URI uri, int defPort) {
- int port = uri.getPort();
- if(port == -1)
- port = defPort;
-
- // build the service name string "/ip:port"
- // for whatever reason using NetUtils.createSocketAddr(target).toString()
- // returns "localhost/ip:port"
- StringBuffer sb = new StringBuffer();
- String host = uri.getHost();
- if (host != null) {
- host = NetUtils.normalizeHostName(host);
- } else {
- host = "";
+ String authority = uri.getAuthority();
+ if (authority == null) {
+ return null;
}
- sb.append(host).append(":").append(port);
- return sb.toString();
- }
+ InetSocketAddress addr = NetUtils.createSocketAddr(authority, defPort);
+ return buildTokenService(addr).toString();
+ }
/**
* Get the host name from the principal name of format /host@realm.
@@ -367,22 +388,58 @@ public static TokenInfo getTokenInfo(Class> protocol, Configuration conf) {
return null;
}
+ /**
+ * Decode the given token's service field into an InetAddress
+ * @param token from which to obtain the service
+ * @return InetAddress for the service
+ */
+ public static InetSocketAddress getTokenServiceAddr(Token> token) {
+ return NetUtils.createSocketAddr(token.getService().toString());
+ }
+
/**
* Set the given token's service to the format expected by the RPC client
* @param token a delegation token
* @param addr the socket for the rpc connection
*/
public static void setTokenService(Token> token, InetSocketAddress addr) {
- token.setService(buildTokenService(addr));
+ Text service = buildTokenService(addr);
+ if (token != null) {
+ token.setService(service);
+ LOG.info("Acquired token "+token); // Token#toString() prints service
+ } else {
+ LOG.warn("Failed to get token for service "+service);
+ }
}
/**
* Construct the service key for a token
* @param addr InetSocketAddress of remote connection with a token
- * @return "ip:port"
+ * @return "ip:port" or "host:port" depending on the value of
+ * hadoop.security.token.service.use_ip
*/
public static Text buildTokenService(InetSocketAddress addr) {
- String host = addr.getAddress().getHostAddress();
+ String host = null;
+ if (useIpForTokenService) {
+ if (addr.isUnresolved()) { // host has no ip address
+ throw new IllegalArgumentException(
+ new UnknownHostException(addr.getHostName())
+ );
+ }
+ host = addr.getAddress().getHostAddress();
+ } else {
+ host = addr.getHostName().toLowerCase();
+ }
return new Text(host + ":" + addr.getPort());
}
+
+ /**
+ * Construct the service key for a token
+ * @param uri of remote connection with a token
+ * @return "ip:port" or "host:port" depending on the value of
+ * hadoop.security.token.service.use_ip
+ */
+ public static Text buildTokenService(URI uri) {
+ return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
new file mode 100644
index 0000000000..70c77a5161
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtilsTestResolver;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Test;
+
+public class TestFileSystemCanonicalization extends TestCase {
+ static String[] authorities = {
+ "myfs://host",
+ "myfs://host.a",
+ "myfs://host.a.b",
+ };
+
+ static String[] ips = {
+ "myfs://127.0.0.1"
+ };
+
+
+ @Test
+ public void testSetupResolver() throws Exception {
+ NetUtilsTestResolver.install();
+ }
+
+ // no ports
+
+ @Test
+ public void testShortAuthority() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testPartialAuthority() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testFullAuthority() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a.b", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ // with default ports
+
+ @Test
+ public void testShortAuthorityWithDefaultPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host:123", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testPartialAuthorityWithDefaultPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a:123", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testFullAuthorityWithDefaultPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a.b:123", "myfs://host.a.b:123");
+ verifyPaths(fs, authorities, -1, true);
+ verifyPaths(fs, authorities, 123, true);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ // with non-standard ports
+
+ @Test
+ public void testShortAuthorityWithOtherPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host:456", "myfs://host.a.b:456");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, true);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testPartialAuthorityWithOtherPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a:456", "myfs://host.a.b:456");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, true);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testFullAuthorityWithOtherPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://host.a.b:456", "myfs://host.a.b:456");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, true);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ // ips
+
+ @Test
+ public void testIpAuthority() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://127.0.0.1", "myfs://127.0.0.1:123");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, true);
+ verifyPaths(fs, ips, 123, true);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testIpAuthorityWithDefaultPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://127.0.0.1:123", "myfs://127.0.0.1:123");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, true);
+ verifyPaths(fs, ips, 123, true);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testIpAuthorityWithOtherPort() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://127.0.0.1:456", "myfs://127.0.0.1:456");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, true);
+ }
+
+ // bad stuff
+
+ @Test
+ public void testMismatchedSchemes() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs2://simple", "myfs2://simple:123");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testMismatchedHosts() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs://simple", "myfs://simple:123");
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testNullAuthority() throws Exception {
+ FileSystem fs = getVerifiedFS("myfs:///", "myfs:///");
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
+ verifyPaths(fs, authorities, -1, false);
+ verifyPaths(fs, authorities, 123, false);
+ verifyPaths(fs, authorities, 456, false);
+ verifyPaths(fs, ips, -1, false);
+ verifyPaths(fs, ips, 123, false);
+ verifyPaths(fs, ips, 456, false);
+ }
+
+ @Test
+ public void testAuthorityFromDefaultFS() throws Exception {
+ Configuration config = new Configuration();
+ String defaultFsKey = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
+
+ FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config);
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
+
+ config.set(defaultFsKey, "myfs://host");
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
+
+ config.set(defaultFsKey, "myfs2://host");
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
+
+ config.set(defaultFsKey, "myfs://host:123");
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, true);
+
+ config.set(defaultFsKey, "myfs://host:456");
+ verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
+ }
+
+ FileSystem getVerifiedFS(String authority, String canonical) throws Exception {
+ return getVerifiedFS(authority, canonical, new Configuration());
+ }
+
+ // create a fs from the authority, then check its uri against the given uri
+ // and the canonical. then try to fetch paths using the canonical
+ FileSystem getVerifiedFS(String authority, String canonical, Configuration conf)
+ throws Exception {
+ URI uri = URI.create(authority);
+ URI canonicalUri = URI.create(canonical);
+
+ FileSystem fs = new DummyFileSystem(uri, conf);
+ assertEquals(uri, fs.getUri());
+ assertEquals(canonicalUri, fs.getCanonicalUri());
+ verifyCheckPath(fs, "/file", true);
+ return fs;
+ }
+
+ void verifyPaths(FileSystem fs, String[] uris, int port, boolean shouldPass) {
+ for (String uri : uris) {
+ if (port != -1) uri += ":"+port;
+ verifyCheckPath(fs, uri+"/file", shouldPass);
+ }
+ }
+
+ void verifyCheckPath(FileSystem fs, String path, boolean shouldPass) {
+ Path rawPath = new Path(path);
+ Path fqPath = null;
+ Exception e = null;
+ try {
+ fqPath = fs.makeQualified(rawPath);
+ } catch (IllegalArgumentException iae) {
+ e = iae;
+ }
+ if (shouldPass) {
+ assertEquals(null, e);
+ String pathAuthority = rawPath.toUri().getAuthority();
+ if (pathAuthority == null) {
+ pathAuthority = fs.getUri().getAuthority();
+ }
+ assertEquals(pathAuthority, fqPath.toUri().getAuthority());
+ } else {
+ assertNotNull("did not fail", e);
+ assertEquals("Wrong FS: "+rawPath+", expected: "+fs.getUri(),
+ e.getMessage());
+ }
+ }
+
+ static class DummyFileSystem extends FileSystem {
+ URI uri;
+ static int defaultPort = 123;
+
+ DummyFileSystem(URI uri, Configuration conf) throws IOException {
+ this.uri = uri;
+ setConf(conf);
+ }
+
+ @Override
+ public URI getUri() {
+ return uri;
+ }
+
+ @Override
+ protected int getDefaultPort() {
+ return defaultPort;
+ }
+
+ @Override
+ public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public FSDataOutputStream create(Path f, FsPermission permission,
+ boolean overwrite, int bufferSize, short replication, long blockSize,
+ Progressable progress) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public FSDataOutputStream append(Path f, int bufferSize,
+ Progressable progress) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public boolean rename(Path src, Path dst) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public boolean delete(Path f, boolean recursive) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public FileStatus[] listStatus(Path f) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public void setWorkingDirectory(Path new_dir) {
+ }
+
+ @Override
+ public Path getWorkingDirectory() {
+ return new Path("/");
+ }
+
+ @Override
+ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+
+ @Override
+ public FileStatus getFileStatus(Path f) throws IOException {
+ throw new IOException("not supposed to be here");
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
index 59eefca045..55484913cc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
@@ -213,8 +214,7 @@ public MiniProtocol run() throws IOException {
token = p.getDelegationToken(new Text(RENEWER));
currentUgi = UserGroupInformation.createUserForTesting(MINI_USER,
GROUP_NAMES);
- token.setService(new Text(addr.getAddress().getHostAddress()
- + ":" + addr.getPort()));
+ SecurityUtil.setTokenService(token, addr);
currentUgi.addToken(token);
return p;
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 0b186a1eb1..9246fd5d72 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -40,6 +40,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.net.NetUtils;
@@ -286,10 +287,7 @@ private void doDigestRpc(Server server, TestTokenSecretManager sm
.getUserName()));
Token token = new Token(tokenId,
sm);
- Text host = new Text(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- token.setService(host);
- LOG.info("Service IP address for token is " + host);
+ SecurityUtil.setTokenService(token, addr);
current.addToken(token);
TestSaslProtocol proxy = null;
@@ -311,14 +309,17 @@ private void doDigestRpc(Server server, TestTokenSecretManager sm
public void testPingInterval() throws Exception {
Configuration newConf = new Configuration(conf);
newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
- conf.setInt(Client.PING_INTERVAL_NAME, Client.DEFAULT_PING_INTERVAL);
+ conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
+ CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
+
// set doPing to true
- newConf.setBoolean("ipc.client.ping", true);
+ newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
ConnectionId remoteId = ConnectionId.getConnectionId(
new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
- assertEquals(Client.DEFAULT_PING_INTERVAL, remoteId.getPingInterval());
+ assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
+ remoteId.getPingInterval());
// set doPing to false
- newConf.setBoolean("ipc.client.ping", false);
+ newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
remoteId = ConnectionId.getConnectionId(
new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
assertEquals(0, remoteId.getPingInterval());
@@ -358,10 +359,7 @@ public void testPerConnectionConf() throws Exception {
.getUserName()));
Token token = new Token(tokenId,
sm);
- Text host = new Text(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- token.setService(host);
- LOG.info("Service IP address for token is " + host);
+ SecurityUtil.setTokenService(token, addr);
current.addToken(token);
Configuration newConf = new Configuration(conf);
@@ -448,10 +446,7 @@ public void testDigestAuthMethod() throws Exception {
.getUserName()));
Token token = new Token(tokenId,
sm);
- Text host = new Text(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- token.setService(host);
- LOG.info("Service IP address for token is " + host);
+ SecurityUtil.setTokenService(token, addr);
current.addToken(token);
current.doAs(new PrivilegedExceptionAction