From 737df8b67b972155b12ed615e23f3f1e8e4e9ca9 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Sat, 31 Dec 2011 01:53:23 +0000 Subject: [PATCH] HDFS-2716. Configuration needs to allow different dfs.http.addresses for each HA NN. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1226020 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-1623.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 28 ++++++------ .../hadoop/hdfs/server/namenode/NameNode.java | 18 +++++--- .../server/namenode/SecondaryNameNode.java | 11 ++++- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 43 ++++++++++++++++--- 5 files changed, 77 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index db4c8e881c..e733586305 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -79,3 +79,5 @@ HDFS-1972. Fencing mechanism for block invalidations and replications (todd) HDFS-2714. Fix test cases which use standalone FSNamesystems (todd) HDFS-2692. Fix bugs related to failover from/into safe mode. (todd) + +HDFS-2716. Configuration needs to allow different dfs.http.addresses for each HA NN (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 8bc1720513..29cb3b3339 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -337,7 +337,7 @@ private static Collection emptyAsSingletonNull(Collection coll) * @param nsId the nameservice ID to look at, or null for non-federated * @return collection of namenode Ids */ - static Collection getNameNodeIds(Configuration conf, String nsId) { + public static Collection getNameNodeIds(Configuration conf, String nsId) { String key = addSuffix(DFS_HA_NAMENODES_KEY, nsId); return conf.getTrimmedStringCollection(key); } @@ -644,24 +644,28 @@ public static String getInfoServer( DFS_NAMENODE_HTTPS_ADDRESS_KEY : DFS_NAMENODE_HTTP_ADDRESS_KEY; String httpAddressDefault = (securityOn && httpsAddress) ? DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT : DFS_NAMENODE_HTTP_ADDRESS_DEFAULT; + + String suffixes[]; if (namenodeAddr != null) { // if non-default namenode, try reverse look up // the nameServiceID if it is available - String nameServiceId = DFSUtil.getNameServiceIdFromAddress( - conf, namenodeAddr, + suffixes = getSuffixIDs(conf, namenodeAddr, DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); + } else { + suffixes = new String[2]; + } - if (nameServiceId != null) { - httpAddress = conf.get(DFSUtil.addKeySuffixes( - httpAddressKey, nameServiceId)); - } + return getSuffixedConf(conf, httpAddressKey, httpAddressDefault, suffixes); + } + + private static String getSuffixedConf(Configuration conf, + String key, String defaultVal, String[] suffixes) { + String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes)); + if (ret != null) { + return ret; } - // else - Use non-federation style configuration - if (httpAddress == null) { - httpAddress = conf.get(httpAddressKey, httpAddressDefault); - } - return httpAddress; + return conf.get(key, defaultVal); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 54d4d2f290..fc0c22eeea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -531,11 +531,12 @@ protected NameNode(Configuration conf, NamenodeRole role) this.conf = conf; this.role = role; String nsId = getNameServiceId(conf); + String namenodeId = HAUtil.getNameNodeId(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); this.haContext = createHAContext(); try { - initializeGenericKeys(conf, nsId); + initializeGenericKeys(conf, nsId, namenodeId); initialize(conf); if (!haEnabled) { state = ACTIVE_STATE; @@ -852,17 +853,24 @@ public static NameNode createNameNode(String argv[], Configuration conf) * @param conf * Configuration object to lookup specific key and to set the value * to the key passed. Note the conf object is modified - * @param nameserviceId name service Id + * @param nameserviceId name service Id (to distinguish federated NNs) + * @param namenodeId the namenode ID (to distinguish HA NNs) * @see DFSUtil#setGenericConf(Configuration, String, String, String...) */ - public static void initializeGenericKeys(Configuration conf, String - nameserviceId) { - String namenodeId = HAUtil.getNameNodeId(conf, nameserviceId); + public static void initializeGenericKeys(Configuration conf, + String nameserviceId, String namenodeId) { if ((nameserviceId == null || nameserviceId.isEmpty()) && (namenodeId == null || namenodeId.isEmpty())) { return; } + if (nameserviceId != null) { + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId); + } + if (namenodeId != null) { + conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId); + } + DFSUtil.setGenericConf(conf, nameserviceId, namenodeId, NAMESERVICE_SPECIFIC_KEYS); if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 11f77cc08b..9231f11d8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator; +import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; @@ -177,8 +178,14 @@ public SecondaryNameNode(Configuration conf) throws IOException { public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { - NameNode.initializeGenericKeys(conf, - DFSUtil.getSecondaryNameServiceId(conf)); + String nsId = DFSUtil.getSecondaryNameServiceId(conf); + if (HAUtil.isHAEnabled(conf, nsId)) { + LOG.fatal("Cannot use SecondaryNameNode in an HA cluster." + + " The Standby Namenode will perform checkpointing."); + shutdown(); + return; + } + NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch(IOException e) { shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 5fb5bd70e8..9773a50d54 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -258,20 +258,51 @@ public void testDefaultNamenode() throws IOException { * copied to generic keys when the namenode starts. */ @Test - public void testConfModification() { + public void testConfModificationFederationOnly() { final HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFS_FEDERATION_NAMESERVICES, "nn1"); - conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1"); - final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); + String nsId = "ns1"; + + conf.set(DFS_FEDERATION_NAMESERVICES, nsId); + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId); // Set the nameservice specific keys with nameserviceId in the config key for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) { // Note: value is same as the key - conf.set(DFSUtil.addKeySuffixes(key, nameserviceId), key); + conf.set(DFSUtil.addKeySuffixes(key, nsId), key); } // Initialize generic keys from specific keys - NameNode.initializeGenericKeys(conf, nameserviceId); + NameNode.initializeGenericKeys(conf, nsId, null); + + // Retrieve the keys without nameserviceId and Ensure generic keys are set + // to the correct value + for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) { + assertEquals(key, conf.get(key)); + } + } + + /** + * Test to ensure nameservice specific keys in the configuration are + * copied to generic keys when the namenode starts. + */ + @Test + public void testConfModificationFederationAndHa() { + final HdfsConfiguration conf = new HdfsConfiguration(); + String nsId = "ns1"; + String nnId = "nn1"; + + conf.set(DFS_FEDERATION_NAMESERVICES, nsId); + conf.set(DFS_FEDERATION_NAMESERVICE_ID, nsId); + conf.set(DFS_HA_NAMENODES_KEY + "." + nsId, nnId); + + // Set the nameservice specific keys with nameserviceId in the config key + for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) { + // Note: value is same as the key + conf.set(DFSUtil.addKeySuffixes(key, nsId, nnId), key); + } + + // Initialize generic keys from specific keys + NameNode.initializeGenericKeys(conf, nsId, nnId); // Retrieve the keys without nameserviceId and Ensure generic keys are set // to the correct value