HDFS-2683. Authority-based lookup of proxy provider fails if path becomes canonicalized. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1214579 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-12-15 00:42:50 +00:00
parent b77c107ccb
commit afd0333196
3 changed files with 56 additions and 12 deletions

View File

@ -53,3 +53,5 @@ HDFS-2634. Standby needs to ingest latest edit logs before transitioning to acti
HDFS-2671. NN should throw StandbyException in response to RPCs in STANDBY state (todd) HDFS-2671. NN should throw StandbyException in response to RPCs in STANDBY state (todd)
HDFS-2680. DFSClient should construct failover proxy with exponential backoff (todd) HDFS-2680. DFSClient should construct failover proxy with exponential backoff (todd)
HDFS-2683. Authority-based lookup of proxy provider fails if path becomes canonicalized (todd)

View File

@ -313,7 +313,8 @@ public DFSClient(URI nameNodeUri, Configuration conf,
this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity); this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
Class<?> failoverProxyProviderClass = getFailoverProxyProviderClass(authority, conf); Class<?> failoverProxyProviderClass = getFailoverProxyProviderClass(
nameNodeUri, conf);
if (nameNodeUri != null && failoverProxyProviderClass != null) { if (nameNodeUri != null && failoverProxyProviderClass != null) {
FailoverProxyProvider failoverProxyProvider = (FailoverProxyProvider) FailoverProxyProvider failoverProxyProvider = (FailoverProxyProvider)
@ -353,15 +354,32 @@ public DFSClient(URI nameNodeUri, Configuration conf,
} }
} }
private Class<?> getFailoverProxyProviderClass(String authority, Configuration conf) private Class<?> getFailoverProxyProviderClass(URI nameNodeUri, Configuration conf)
throws IOException { throws IOException {
String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + authority; if (nameNodeUri == null) {
return null;
}
String host = nameNodeUri.getHost();
String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + host;
try { try {
return conf.getClass(configKey, null); Class<?> ret = conf.getClass(configKey, null);
if (ret != null) {
// If we found a proxy provider, then this URI should be a logical NN.
// Given that, it shouldn't have a non-default port number.
int port = nameNodeUri.getPort();
if (port > 0 && port != NameNode.DEFAULT_PORT) {
throw new IOException(
"Port " + port + " specified in URI " + nameNodeUri +
" but host '" + host + "' is a logical (HA) namenode" +
" and does not use port information.");
}
}
return ret;
} catch (RuntimeException e) { } catch (RuntimeException e) {
if (e.getCause() instanceof ClassNotFoundException) { if (e.getCause() instanceof ClassNotFoundException) {
throw new IOException("Could not load failover proxy provider class " throw new IOException("Could not load failover proxy provider class "
+ conf.get(configKey) + " which is configured for authority " + authority, + conf.get(configKey) + " which is configured for authority " + nameNodeUri,
e); e);
} else { } else {
throw e; throw e;

View File

@ -20,6 +20,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.junit.Assert.*;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
@ -31,7 +32,9 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -43,6 +46,7 @@ public class TestDFSClientFailover {
private Configuration conf = new Configuration(); private Configuration conf = new Configuration();
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private static final String LOGICAL_HOSTNAME = "ha-nn-uri";
@Before @Before
public void setUpCluster() throws IOException { public void setUpCluster() throws IOException {
@ -65,10 +69,6 @@ public void tearDownCluster() throws IOException {
public void testDfsClientFailover() throws IOException, URISyntaxException { public void testDfsClientFailover() throws IOException, URISyntaxException {
InetSocketAddress nnAddr1 = cluster.getNameNode(0).getNameNodeAddress(); InetSocketAddress nnAddr1 = cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nnAddr2 = cluster.getNameNode(1).getNameNodeAddress(); InetSocketAddress nnAddr2 = cluster.getNameNode(1).getNameNodeAddress();
String nameServiceId1 = DFSUtil.getNameServiceIdFromAddress(conf, nnAddr1,
DFS_NAMENODE_RPC_ADDRESS_KEY);
String nameServiceId2 = DFSUtil.getNameServiceIdFromAddress(conf, nnAddr2,
DFS_NAMENODE_RPC_ADDRESS_KEY);
ClientProtocol nn1 = DFSUtil.createNamenode(nnAddr1, conf); ClientProtocol nn1 = DFSUtil.createNamenode(nnAddr1, conf);
ClientProtocol nn2 = DFSUtil.createNamenode(nnAddr2, conf); ClientProtocol nn2 = DFSUtil.createNamenode(nnAddr2, conf);
@ -89,9 +89,34 @@ public void testDfsClientFailover() throws IOException, URISyntaxException {
cluster.getNameNode(0).stop(); cluster.getNameNode(0).stop();
AppendTestUtil.check(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY); AppendTestUtil.check(fs, TEST_FILE, FILE_LENGTH_TO_VERIFY);
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" + LOGICAL_HOSTNAME + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close(); fs.close();
} }
/**
* Regression test for HDFS-2683.
*/
@Test
public void testLogicalUriShouldNotHavePorts() {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + ".foo",
ConfiguredFailoverProxyProvider.class.getName());
Path p = new Path("hdfs://foo:12345/");
try {
p.getFileSystem(conf).exists(p);
fail("Did not fail with fake FS");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"does not use port information", ioe);
}
}
public static FileSystem configureFailoverFs(MiniDFSCluster cluster, Configuration conf) public static FileSystem configureFailoverFs(MiniDFSCluster cluster, Configuration conf)
throws IOException, URISyntaxException { throws IOException, URISyntaxException {
InetSocketAddress nnAddr1 = cluster.getNameNode(0).getNameNodeAddress(); InetSocketAddress nnAddr1 = cluster.getNameNode(0).getNameNodeAddress();
@ -99,7 +124,6 @@ public static FileSystem configureFailoverFs(MiniDFSCluster cluster, Configurati
String nsId = "nameserviceId1"; String nsId = "nameserviceId1";
final String logicalNameNodeId = "ha-nn-uri";
String nameNodeId1 = "nn1"; String nameNodeId1 = "nn1";
String nameNodeId2 = "nn2"; String nameNodeId2 = "nn2";
@ -114,10 +138,10 @@ public static FileSystem configureFailoverFs(MiniDFSCluster cluster, Configurati
conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nsId); conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, nsId);
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY, nsId), conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY, nsId),
nameNodeId1 + "," + nameNodeId2); nameNodeId1 + "," + nameNodeId2);
conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalNameNodeId, conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + LOGICAL_HOSTNAME,
ConfiguredFailoverProxyProvider.class.getName()); ConfiguredFailoverProxyProvider.class.getName());
FileSystem fs = FileSystem.get(new URI("hdfs://" + logicalNameNodeId), conf); FileSystem fs = FileSystem.get(new URI("hdfs://" + LOGICAL_HOSTNAME), conf);
return fs; return fs;
} }