diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 9ded5e8efc..a0d3c1785b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -244,3 +244,5 @@ HDFS-2920. fix remaining TODO items. (atm and todd) HDFS-3027. Implement a simple NN health check. (atm) HDFS-3023. Optimize entries in edits log for persistBlocks call. (todd) + +HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index f4a861089b..e63ed0d26b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -28,9 +28,11 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.StringTokenizer; import javax.net.SocketFactory; @@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; @@ -605,6 +608,68 @@ public String toString() { "nnId=" + namenodeId + ";addr=" + addr + "]"; } } + + /** + * Get a URI for each configured nameservice. If a nameservice is + * HA-enabled, then the logical URI of the nameservice is returned. If the + * nameservice is not HA-enabled, then a URI corresponding to an RPC address + * of the single NN for that nameservice is returned, preferring the service + * RPC address over the client RPC address. + * + * @param conf configuration + * @return a collection of all configured NN URIs, preferring service + * addresses + */ + public static Collection getNsServiceRpcUris(Configuration conf) { + return getNameServiceUris(conf, + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); + } + + /** + * Get a URI for each configured nameservice. If a nameservice is + * HA-enabled, then the logical URI of the nameservice is returned. If the + * nameservice is not HA-enabled, then a URI corresponding to the address of + * the single NN for that nameservice is returned. + * + * @param conf configuration + * @param keys configuration keys to try in order to get the URI for non-HA + * nameservices + * @return a collection of all configured NN URIs + */ + public static Collection getNameServiceUris(Configuration conf, + String... keys) { + Set ret = new HashSet(); + for (String nsId : getNameServiceIds(conf)) { + if (HAUtil.isHAEnabled(conf, nsId)) { + // Add the logical URI of the nameservice. + try { + ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId)); + } catch (URISyntaxException ue) { + throw new IllegalArgumentException(ue); + } + } else { + // Add the URI corresponding to the address of the NN. + for (String key : keys) { + String addr = conf.get(concatSuffixes(key, nsId)); + if (addr != null) { + ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME, + NetUtils.createSocketAddr(addr))); + break; + } + } + } + } + // Add the generic configuration keys. + for (String key : keys) { + String addr = conf.get(key); + if (addr != null) { + ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr))); + break; + } + } + return ret; + } /** * Given the InetSocketAddress this method returns the nameservice Id diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index bc7c13a914..e808af623c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -24,8 +24,8 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.Socket; +import java.net.URI; import java.text.DateFormat; import java.util.ArrayList; import java.util.Arrays; @@ -39,7 +39,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -1380,8 +1379,7 @@ private ReturnStatus run(int iteration, Formatter formatter) { * for each namenode, * execute a {@link Balancer} to work through all datanodes once. */ - static int run(Map> namenodes, - final Parameters p, + static int run(Collection namenodes, final Parameters p, Configuration conf) throws IOException, InterruptedException { final long sleeptime = 2000*conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, @@ -1395,10 +1393,8 @@ static int run(Map> namenodes, final List connectors = new ArrayList(namenodes.size()); try { - for(Entry> entry : - namenodes.entrySet()) { - connectors.add( - new NameNodeConnector(entry.getValue().values(), conf)); + for (URI uri : namenodes) { + connectors.add(new NameNodeConnector(uri, conf)); } boolean done = false; @@ -1480,8 +1476,7 @@ public int run(String[] args) { try { checkReplicationPolicyCompatibility(conf); - final Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(conf); + final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); return Balancer.run(namenodes, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index eab6273c22..c4208b7951 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -21,9 +21,7 @@ import java.io.IOException; import java.io.OutputStream; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.URI; -import java.util.Collection; import java.util.EnumSet; import org.apache.commons.logging.Log; @@ -38,7 +36,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; @@ -46,8 +43,6 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; -import com.google.common.collect.Lists; - /** * The class provides utilities for {@link Balancer} to access a NameNode */ @@ -56,7 +51,7 @@ class NameNodeConnector { private static final Log LOG = Balancer.LOG; private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id"); - final InetSocketAddress namenodeAddress; + final URI nameNodeUri; final String blockpoolID; final NamenodeProtocol namenode; @@ -70,10 +65,9 @@ class NameNodeConnector { private BlockTokenSecretManager blockTokenSecretManager; private Daemon keyupdaterthread; // AccessKeyUpdater thread - NameNodeConnector(Collection haNNs, + NameNodeConnector(URI nameNodeUri, Configuration conf) throws IOException { - this.namenodeAddress = Lists.newArrayList(haNNs).get(0); - URI nameNodeUri = NameNode.getUri(this.namenodeAddress); + this.nameNodeUri = nameNodeUri; this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class) @@ -186,7 +180,7 @@ void close() { @Override public String toString() { - return getClass().getSimpleName() + "[namenodeAddress=" + namenodeAddress + return getClass().getSimpleName() + "[namenodeUri=" + nameNodeUri + ", id=" + blockpoolID + "]"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 409dd37525..7854f95f88 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; import static org.junit.Assert.assertEquals; import java.io.BufferedOutputStream; @@ -38,9 +40,11 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeoutException; import org.apache.hadoop.conf.Configuration; @@ -52,6 +56,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; +import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -74,6 +79,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import com.google.common.base.Joiner; + /** Utilities for HDFS tests */ public class DFSTestUtil { @@ -681,4 +688,21 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, return BlockOpResponseProto.parseDelimitedFrom(in); } + + public static void setFederatedConfiguration(MiniDFSCluster cluster, + Configuration conf) { + Set nameservices = new HashSet(); + for (NameNodeInfo info : cluster.getNameNodeInfos()) { + assert info.nameserviceId != null; + nameservices.add(info.nameserviceId); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, + info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME, + info.nameNode.getNameNodeAddress()).toString()); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + info.nameserviceId), DFSUtil.createUri(HdfsConstants.HDFS_URI_SCHEME, + info.nameNode.getNameNodeAddress()).toString()); + } + conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, Joiner.on(",") + .join(nameservices)); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index d69dc0a7da..584446ac13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -316,8 +316,13 @@ public class DataNodeProperties { static class NameNodeInfo { final NameNode nameNode; final Configuration conf; - NameNodeInfo(NameNode nn, Configuration conf) { + final String nameserviceId; + final String nnId; + NameNodeInfo(NameNode nn, String nameserviceId, String nnId, + Configuration conf) { this.nameNode = nn; + this.nameserviceId = nameserviceId; + this.nnId = nnId; this.conf = conf; } } @@ -674,6 +679,10 @@ public static URI formatSharedEditsDir(File baseDir, int minNN, int maxNN) return fileAsURI(new File(baseDir, "shared-edits-" + minNN + "-through-" + maxNN)); } + + public NameNodeInfo[] getNameNodeInfos() { + return this.nameNodes; + } private void initNameNodeConf(Configuration conf, String nameserviceId, String nnId, @@ -763,7 +772,8 @@ private void createNameNode(int nnIndex, Configuration conf, .getHostPortString(nn.getHttpAddress())); DFSUtil.setGenericConf(conf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY); - nameNodes[nnIndex] = new NameNodeInfo(nn, new Configuration(conf)); + nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, + new Configuration(conf)); } /** @@ -1264,7 +1274,7 @@ public synchronized void shutdownNameNode(int nnIndex) { nn.stop(); nn.join(); Configuration conf = nameNodes[nnIndex].conf; - nameNodes[nnIndex] = new NameNodeInfo(null, conf); + nameNodes[nnIndex] = new NameNodeInfo(null, null, null, conf); } } @@ -1307,10 +1317,12 @@ public synchronized void restartNameNode(int nnIndex) throws IOException { */ public synchronized void restartNameNode(int nnIndex, boolean waitActive) throws IOException { + String nameserviceId = nameNodes[nnIndex].nameserviceId; + String nnId = nameNodes[nnIndex].nnId; Configuration conf = nameNodes[nnIndex].conf; shutdownNameNode(nnIndex); NameNode nn = NameNode.createNameNode(new String[] {}, conf); - nameNodes[nnIndex] = new NameNodeInfo(nn, conf); + nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, conf); if (waitActive) { waitClusterUp(); LOG.info("Restarted the namenode"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index e49bb107e2..a9b62c3aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -25,6 +25,8 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; @@ -41,6 +43,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.CommonConfigurationKeys; + import static org.apache.hadoop.hdfs.DFSConfigKeys.*; public class TestDFSUtil { @@ -233,11 +237,12 @@ public void checkNameServiceId(Configuration conf, String addr, * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)} */ @Test - public void testSingleNamenode() { + public void testSingleNamenode() throws URISyntaxException { HdfsConfiguration conf = new HdfsConfiguration(); final String DEFAULT_ADDRESS = "localhost:9000"; final String NN2_ADDRESS = "localhost:9001"; conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DEFAULT_ADDRESS); InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS); boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1, @@ -247,6 +252,10 @@ public void testSingleNamenode() { isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); assertFalse(isDefault); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + assertEquals(1, uris.size()); + assertTrue(uris.contains(new URI("hdfs://" + DEFAULT_ADDRESS))); } /** Tests to ensure default namenode is used as fallback */ @@ -407,13 +416,14 @@ public void testGetServerInfo() { } @Test - public void testHANameNodesWithFederation() { + public void testHANameNodesWithFederation() throws URISyntaxException { HdfsConfiguration conf = new HdfsConfiguration(); final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; final String NS1_NN2_HOST = "ns1-nn2.example.com:8020"; final String NS2_NN1_HOST = "ns2-nn1.example.com:8020"; final String NS2_NN2_HOST = "ns2-nn2.example.com:8020"; + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); // Two nameservices, each with two NNs. conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); @@ -460,6 +470,11 @@ public void testHANameNodesWithFederation() { // Ditto for nameservice IDs, if multiple are defined assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf)); assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf)); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + assertEquals(2, uris.size()); + assertTrue(uris.contains(new URI("hdfs://ns1"))); + assertTrue(uris.contains(new URI("hdfs://ns2"))); } @Test @@ -509,4 +524,34 @@ public void testSubstituteForWildcardAddress() throws IOException { assertEquals("127.0.0.1:12345", DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo")); } + + @Test + public void testGetNNUris() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + + final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; + final String NS1_NN2_HOST = "ns1-nn1.example.com:8020"; + final String NS2_NN_HOST = "ns2-nn.example.com:8020"; + final String NN_HOST = "nn.example.com:8020"; + + conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST); + + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), + NS2_NN_HOST); + + conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN_HOST); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + + assertEquals(3, uris.size()); + assertTrue(uris.contains(new URI("hdfs://ns1"))); + assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_HOST))); + assertTrue(uris.contains(new URI("hdfs://" + NN_HOST))); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 2dddb1b6e0..81b03a568e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hdfs.server.balancer; import java.io.IOException; -import java.net.InetSocketAddress; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.TimeoutException; @@ -338,8 +338,7 @@ private void runBalancer(Configuration conf, waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(conf); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index e064534da4..9d13a2b619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; -import java.net.InetSocketAddress; -import java.util.Map; +import java.net.URI; +import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -67,12 +68,13 @@ public void testBalancerWithHANameNodes() throws Exception { int numOfDatanodes = capacities.length; NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); - MiniDFSNNTopology simpleHATopology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf(null).addNN(nn1Conf) - .addNN(new MiniDFSNNTopology.NNConf("nn2"))); - cluster = new MiniDFSCluster.Builder(conf).nnTopology(simpleHATopology) - .numDataNodes(capacities.length).racks(racks).simulatedCapacities( - capacities).build(); + Configuration copiedConf = new Configuration(conf); + cluster = new MiniDFSCluster.Builder(copiedConf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(capacities.length) + .racks(racks) + .simulatedCapacities(capacities) + .build(); HATestUtil.setFailoverConfigurations(cluster, conf); try { cluster.waitActive(); @@ -89,14 +91,12 @@ public void testBalancerWithHANameNodes() throws Exception { // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack }, new long[] { newNodeCapacity }); - - HATestUtil.setFailoverConfigurations(cluster, conf, NameNode.getUri( - cluster.getNameNode(0).getNameNodeAddress()).getHost()); totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - Map> namenodes = DFSUtil - .getNNServiceRpcAddresses(conf); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + assertEquals(1, namenodes.size()); + assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 0245615a48..b130e027b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.server.balancer; import java.io.IOException; -import java.net.InetSocketAddress; +import java.net.URI; import java.util.Arrays; +import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.commons.logging.Log; @@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -157,8 +157,7 @@ static void runBalancer(Suite s, LOG.info("BALANCER 1"); // start rebalancing - final Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(s.conf); + final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf); Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); @@ -252,7 +251,8 @@ private void unevenDistribution(final int nNameNodes, final ExtendedBlock[][] blocks; { LOG.info("UNEVEN 1"); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new Configuration(conf)) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(nDataNodes) .racks(racks) @@ -261,6 +261,7 @@ private void unevenDistribution(final int nNameNodes, LOG.info("UNEVEN 2"); try { cluster.waitActive(); + DFSTestUtil.setFederatedConfiguration(cluster, conf); LOG.info("UNEVEN 3"); final Suite s = new Suite(cluster, nNameNodes, nDataNodes, conf); blocks = generateBlocks(s, usedSpacePerNN); @@ -327,13 +328,15 @@ private void runTest(final int nNameNodes, long[] capacities, String[] racks, Assert.assertEquals(nDataNodes, racks.length); LOG.info("RUN_TEST -1"); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + final MiniDFSCluster cluster = new MiniDFSCluster + .Builder(new Configuration(conf)) .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes)) .numDataNodes(nDataNodes) .racks(racks) .simulatedCapacities(capacities) .build(); LOG.info("RUN_TEST 0"); + DFSTestUtil.setFederatedConfiguration(cluster, conf); try { cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 42b5612571..bf919cea7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -188,6 +189,12 @@ public static String getLogicalHostname(MiniDFSCluster cluster) { return String.format(LOGICAL_HOSTNAME, cluster.getInstanceId()); } + public static URI getLogicalUri(MiniDFSCluster cluster) + throws URISyntaxException { + return new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + + getLogicalHostname(cluster)); + } + public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx, List txids) throws InterruptedException { long start = System.currentTimeMillis();