From f105784d6a28d2a0cedb619f0951de93d995e9da Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 28 Jun 2012 01:25:32 +0000 Subject: [PATCH] svn merge -c -1354790 for reverting HDFS-3576 since it requires more changes. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1354792 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 --- .../src/main/java/org/apache/hadoop/fs/Hdfs.java | 7 +++---- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 - .../apache/hadoop/hdfs/DistributedFileSystem.java | 2 +- .../org/apache/hadoop/hdfs/NameNodeProxies.java | 2 +- .../token/delegation/DelegationTokenSelector.java | 4 ++-- .../hadoop/hdfs/server/namenode/NameNode.java | 6 +++--- .../hadoop/hdfs/tools/NNHAServiceTarget.java | 2 +- .../apache/hadoop/hdfs/TestDFSClientFailover.java | 7 ++++--- .../hadoop/hdfs/TestDefaultNameNodePort.java | 12 +++++------- .../balancer/TestBalancerWithHANameNodes.java | 4 ++-- .../java/org/apache/hadoop/fs/TestFileSystem.java | 15 +++++++-------- 12 files changed, 29 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5cf4a7ddc1..a708333c08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -250,9 +250,6 @@ Branch-2 ( Unreleased changes ) HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd) - HDFS-3576. Move the constant NameNode.DEFAULT_PORT to - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT. (Brandon Li via szetszwo) - OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index 1d2832a5b1..b31960c974 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -33,7 +33,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; @@ -43,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; @@ -71,8 +71,7 @@ public class Hdfs extends AbstractFileSystem { * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { - super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); @@ -87,7 +86,7 @@ public class Hdfs extends AbstractFileSystem { @Override public int getUriDefaultPort() { - return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; + return NameNode.DEFAULT_PORT; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 467b345cf4..146ed8358f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -87,7 +87,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT; public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address"; - public static final int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020; public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address"; public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects"; public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index fb6f9dc10a..8c0ed10d2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -734,7 +734,7 @@ public class DistributedFileSystem extends FileSystem { @Override protected int getDefaultPort() { - return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; + return NameNode.DEFAULT_PORT; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 326ba5595d..cc6517daa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -402,7 +402,7 @@ public class NameNodeProxies { // If we found a proxy provider, then this URI should be a logical NN. // Given that, it shouldn't have a non-default port number. int port = nameNodeUri.getPort(); - if (port > 0 && port != DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) { + if (port > 0 && port != NameNode.DEFAULT_PORT) { throw new IOException("Port " + port + " specified in URI " + nameNodeUri + " but host '" + host + "' is a logical (HA) namenode" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java index 3822939944..293611e377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java @@ -22,7 +22,7 @@ import java.util.Collection; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; @@ -57,7 +57,7 @@ public class DelegationTokenSelector Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); - int nnRpcPort = DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; + int nnRpcPort = NameNode.DEFAULT_PORT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 6caf8d3741..6416db1d9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -214,6 +214,7 @@ public class NameNode { } } + public static final int DEFAULT_PORT = 8020; public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"); public static final HAState ACTIVE_STATE = new ActiveState(); @@ -269,7 +270,7 @@ public class NameNode { } public static InetSocketAddress getAddress(String address) { - return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT); + return NetUtils.createSocketAddr(address, DEFAULT_PORT); } /** @@ -328,8 +329,7 @@ public class NameNode { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); - String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ? - "" : (":"+port); + String portString = port == DEFAULT_PORT ? "" : (":"+port); return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java index af1a1b70ed..38f5123de2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java @@ -77,7 +77,7 @@ public class NNHAServiceTarget extends HAServiceTarget { "Unable to determine service address for namenode '" + nnId + "'"); } this.addr = NetUtils.createSocketAddr(serviceAddr, - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + NameNode.DEFAULT_PORT); this.autoFailoverEnabled = targetConf.getBoolean( DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index 5769f48a57..a88e8a74ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.test.GenericTestUtils; @@ -81,9 +82,9 @@ public class TestDFSClientFailover { // Check that it functions even if the URL becomes canonicalized // to include a port number. - Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) - + ":" + DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" - + TEST_FILE.toUri().getPath()); + Path withPort = new Path("hdfs://" + + HATestUtil.getLogicalHostname(cluster) + ":" + + NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java index 799d28cf6e..6a66e947e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java @@ -31,9 +31,9 @@ public class TestDefaultNameNodePort extends TestCase { public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(), - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo/").getPort(), - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(), 555); assertEquals(NameNode.getAddress("foo:555").getPort(), @@ -43,20 +43,18 @@ public class TestDefaultNameNodePort extends TestCase { public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); - assertEquals(NameNode.getAddress(conf).getPort(), - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); - assertEquals(NameNode.getAddress(conf).getPort(), - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); } public void testGetUri() { assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); assertEquals(NameNode.getUri(new InetSocketAddress("foo", - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)), + NameNode.DEFAULT_PORT)), URI.create("hdfs://foo")); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index c1762d3553..9d13a2b619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -25,7 +25,6 @@ import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -33,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.junit.Test; @@ -67,7 +67,7 @@ public class TestBalancerWithHANameNodes { assertEquals(capacities.length, racks.length); int numOfDatanodes = capacities.length; NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); - nn1Conf.setIpcPort(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); Configuration copiedConf = new Configuration(conf); cluster = new MiniDFSCluster.Builder(copiedConf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java index 6c978f0b38..f299cb6740 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java @@ -39,7 +39,6 @@ import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.fs.shell.CommandFormat; @@ -510,10 +509,10 @@ public class TestFileSystem extends TestCase { { try { - runTestCache(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + runTestCache(NameNode.DEFAULT_PORT); } catch(java.net.BindException be) { - LOG.warn("Cannot test NameNode's default RPC port (=" - + DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + ")", be); + LOG.warn("Cannot test NameNode.DEFAULT_PORT (=" + + NameNode.DEFAULT_PORT + ")", be); } runTestCache(0); @@ -536,11 +535,11 @@ public class TestFileSystem extends TestCase { } } - if (port == DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) { + if (port == NameNode.DEFAULT_PORT) { //test explicit default port - URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), - DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, uri.getPath(), - uri.getQuery(), uri.getFragment()); + URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(), + uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(), + uri.getQuery(), uri.getFragment()); LOG.info("uri2=" + uri2); FileSystem fs = FileSystem.get(uri2, conf); checkPath(cluster, fs);