HDFS-3576. Move the definition of the constant NameNode.DEFAULT_PORT to DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT. Contributed by Brandon Li

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1354790 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-06-28 01:11:22 +00:00
parent 5770a453f3
commit 44389399d4
12 changed files with 36 additions and 29 deletions

View File

@ -250,6 +250,9 @@ Branch-2 ( Unreleased changes )
HDFS-3572. Cleanup code which inits SPNEGO in HttpServer (todd)
HDFS-3576. Move the constant NameNode.DEFAULT_PORT to
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT. (Brandon Li via szetszwo)
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log

View File

@ -33,6 +33,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@ -42,7 +43,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@ -71,7 +71,8 @@ public class Hdfs extends AbstractFileSystem {
* @throws IOException
*/
Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
super(theUri, HdfsConstants.HDFS_URI_SCHEME, true,
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
@ -86,7 +87,7 @@ public class Hdfs extends AbstractFileSystem {
@Override
public int getUriDefaultPort() {
return NameNode.DEFAULT_PORT;
return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
}
@Override

View File

@ -87,6 +87,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
public static final int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;

View File

@ -734,7 +734,7 @@ public void setTimes(Path p, long mtime, long atime
@Override
protected int getDefaultPort() {
return NameNode.DEFAULT_PORT;
return DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
}
@Override

View File

@ -402,7 +402,7 @@ private static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass
// If we found a proxy provider, then this URI should be a logical NN.
// Given that, it shouldn't have a non-default port number.
int port = nameNodeUri.getPort();
if (port > 0 && port != NameNode.DEFAULT_PORT) {
if (port > 0 && port != DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
throw new IOException("Port " + port + " specified in URI "
+ nameNodeUri + " but host '" + host
+ "' is a logical (HA) namenode"

View File

@ -22,7 +22,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
@ -57,7 +57,7 @@ public Token<DelegationTokenIdentifier> selectToken(
Text serviceName = SecurityUtil.buildTokenService(nnUri);
final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
int nnRpcPort = NameNode.DEFAULT_PORT;
int nnRpcPort = DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
if (nnServiceName != null) {
nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort();
}

View File

@ -214,7 +214,6 @@ public long getProtocolVersion(String protocol,
}
}
public static final int DEFAULT_PORT = 8020;
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
public static final HAState ACTIVE_STATE = new ActiveState();
@ -270,7 +269,7 @@ public static NameNodeMetrics getNameNodeMetrics() {
}
public static InetSocketAddress getAddress(String address) {
return NetUtils.createSocketAddr(address, DEFAULT_PORT);
return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT);
}
/**
@ -329,7 +328,8 @@ public static InetSocketAddress getAddress(URI filesystemURI) {
public static URI getUri(InetSocketAddress namenode) {
int port = namenode.getPort();
String portString = port == DEFAULT_PORT ? "" : (":"+port);
String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ?
"" : (":"+port);
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ namenode.getHostName()+portString);
}

View File

@ -77,7 +77,7 @@ public NNHAServiceTarget(Configuration conf,
"Unable to determine service address for namenode '" + nnId + "'");
}
this.addr = NetUtils.createSocketAddr(serviceAddr,
NameNode.DEFAULT_PORT);
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
this.autoFailoverEnabled = targetConf.getBoolean(
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,

View File

@ -31,7 +31,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.test.GenericTestUtils;
@ -82,9 +81,9 @@ public void testDfsClientFailover() throws IOException, URISyntaxException {
// Check that it functions even if the URL becomes canonicalized
// to include a port number.
Path withPort = new Path("hdfs://" +
HATestUtil.getLogicalHostname(cluster) + ":" +
NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster)
+ ":" + DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/"
+ TEST_FILE.toUri().getPath());
FileSystem fs2 = withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));

View File

@ -31,9 +31,9 @@ public class TestDefaultNameNodePort extends TestCase {
public void testGetAddressFromString() throws Exception {
assertEquals(NameNode.getAddress("foo").getPort(),
NameNode.DEFAULT_PORT);
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
NameNode.DEFAULT_PORT);
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
555);
assertEquals(NameNode.getAddress("foo:555").getPort(),
@ -43,18 +43,20 @@ public void testGetAddressFromString() throws Exception {
public void testGetAddressFromConf() throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://foo/");
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
assertEquals(NameNode.getAddress(conf).getPort(),
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
assertEquals(NameNode.getAddress(conf).getPort(), 555);
FileSystem.setDefaultUri(conf, "foo");
assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
assertEquals(NameNode.getAddress(conf).getPort(),
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
}
public void testGetUri() {
assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
URI.create("hdfs://foo:555"));
assertEquals(NameNode.getUri(new InetSocketAddress("foo",
NameNode.DEFAULT_PORT)),
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
URI.create("hdfs://foo"));
}
}

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -32,7 +33,6 @@
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.junit.Test;
@ -67,7 +67,7 @@ public void testBalancerWithHANameNodes() throws Exception {
assertEquals(capacities.length, racks.length);
int numOfDatanodes = capacities.length;
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
nn1Conf.setIpcPort(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
Configuration copiedConf = new Configuration(conf);
cluster = new MiniDFSCluster.Builder(copiedConf)
.nnTopology(MiniDFSNNTopology.simpleHATopology())

View File

@ -39,6 +39,7 @@
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.fs.shell.CommandFormat;
@ -509,10 +510,10 @@ public FileSystem run() throws IOException {
{
try {
runTestCache(NameNode.DEFAULT_PORT);
runTestCache(DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
} catch(java.net.BindException be) {
LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
+ NameNode.DEFAULT_PORT + ")", be);
LOG.warn("Cannot test NameNode's default RPC port (="
+ DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + ")", be);
}
runTestCache(0);
@ -535,11 +536,11 @@ static void runTestCache(int port) throws Exception {
}
}
if (port == NameNode.DEFAULT_PORT) {
if (port == DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
//test explicit default port
URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
uri.getQuery(), uri.getFragment());
URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(),
DFSConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT, uri.getPath(),
uri.getQuery(), uri.getFragment());
LOG.info("uri2=" + uri2);
FileSystem fs = FileSystem.get(uri2, conf);
checkPath(cluster, fs);