From 01b17c40cf20df576fb18315c2019645d5050ddf Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Wed, 29 Feb 2012 23:42:52 +0000 Subject: [PATCH] HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1295340 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-1623.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 65 +++++++++++++++++++ .../hadoop/hdfs/server/balancer/Balancer.java | 15 ++--- .../server/balancer/NameNodeConnector.java | 14 ++-- .../hadoop/hdfs/server/namenode/NameNode.java | 16 +++-- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 49 +++++++++++++- .../hdfs/server/balancer/TestBalancer.java | 7 +- .../balancer/TestBalancerWithHANameNodes.java | 25 ++++--- .../TestBalancerWithMultipleNameNodes.java | 9 ++- .../hdfs/server/namenode/ha/HATestUtil.java | 7 ++ 10 files changed, 158 insertions(+), 51 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 4790bd7f66..01553a7c45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -242,3 +242,5 @@ HDFS-2958. Sweep for remaining proxy construction which doesn't go through failo HDFS-2920. fix remaining TODO items. (atm and todd) HDFS-3027. Implement a simple NN health check. (atm) + +HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index f4a861089b..e63ed0d26b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -28,9 +28,11 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.StringTokenizer; import javax.net.SocketFactory; @@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; @@ -605,6 +608,68 @@ public String toString() { "nnId=" + namenodeId + ";addr=" + addr + "]"; } } + + /** + * Get a URI for each configured nameservice. If a nameservice is + * HA-enabled, then the logical URI of the nameservice is returned. If the + * nameservice is not HA-enabled, then a URI corresponding to an RPC address + * of the single NN for that nameservice is returned, preferring the service + * RPC address over the client RPC address. + * + * @param conf configuration + * @return a collection of all configured NN URIs, preferring service + * addresses + */ + public static Collection getNsServiceRpcUris(Configuration conf) { + return getNameServiceUris(conf, + DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); + } + + /** + * Get a URI for each configured nameservice. If a nameservice is + * HA-enabled, then the logical URI of the nameservice is returned. If the + * nameservice is not HA-enabled, then a URI corresponding to the address of + * the single NN for that nameservice is returned. + * + * @param conf configuration + * @param keys configuration keys to try in order to get the URI for non-HA + * nameservices + * @return a collection of all configured NN URIs + */ + public static Collection getNameServiceUris(Configuration conf, + String... keys) { + Set ret = new HashSet(); + for (String nsId : getNameServiceIds(conf)) { + if (HAUtil.isHAEnabled(conf, nsId)) { + // Add the logical URI of the nameservice. + try { + ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId)); + } catch (URISyntaxException ue) { + throw new IllegalArgumentException(ue); + } + } else { + // Add the URI corresponding to the address of the NN. + for (String key : keys) { + String addr = conf.get(concatSuffixes(key, nsId)); + if (addr != null) { + ret.add(createUri(HdfsConstants.HDFS_URI_SCHEME, + NetUtils.createSocketAddr(addr))); + break; + } + } + } + } + // Add the generic configuration keys. + for (String key : keys) { + String addr = conf.get(key); + if (addr != null) { + ret.add(createUri("hdfs", NetUtils.createSocketAddr(addr))); + break; + } + } + return ret; + } /** * Given the InetSocketAddress this method returns the nameservice Id diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index bc7c13a914..e808af623c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -24,8 +24,8 @@ import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.Socket; +import java.net.URI; import java.text.DateFormat; import java.util.ArrayList; import java.util.Arrays; @@ -39,7 +39,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -1380,8 +1379,7 @@ private ReturnStatus run(int iteration, Formatter formatter) { * for each namenode, * execute a {@link Balancer} to work through all datanodes once. */ - static int run(Map> namenodes, - final Parameters p, + static int run(Collection namenodes, final Parameters p, Configuration conf) throws IOException, InterruptedException { final long sleeptime = 2000*conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, @@ -1395,10 +1393,8 @@ static int run(Map> namenodes, final List connectors = new ArrayList(namenodes.size()); try { - for(Entry> entry : - namenodes.entrySet()) { - connectors.add( - new NameNodeConnector(entry.getValue().values(), conf)); + for (URI uri : namenodes) { + connectors.add(new NameNodeConnector(uri, conf)); } boolean done = false; @@ -1480,8 +1476,7 @@ public int run(String[] args) { try { checkReplicationPolicyCompatibility(conf); - final Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(conf); + final Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); return Balancer.run(namenodes, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index eab6273c22..c4208b7951 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -21,9 +21,7 @@ import java.io.IOException; import java.io.OutputStream; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.URI; -import java.util.Collection; import java.util.EnumSet; import org.apache.commons.logging.Log; @@ -38,7 +36,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; @@ -46,8 +43,6 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Daemon; -import com.google.common.collect.Lists; - /** * The class provides utilities for {@link Balancer} to access a NameNode */ @@ -56,7 +51,7 @@ class NameNodeConnector { private static final Log LOG = Balancer.LOG; private static final Path BALANCER_ID_PATH = new Path("/system/balancer.id"); - final InetSocketAddress namenodeAddress; + final URI nameNodeUri; final String blockpoolID; final NamenodeProtocol namenode; @@ -70,10 +65,9 @@ class NameNodeConnector { private BlockTokenSecretManager blockTokenSecretManager; private Daemon keyupdaterthread; // AccessKeyUpdater thread - NameNodeConnector(Collection haNNs, + NameNodeConnector(URI nameNodeUri, Configuration conf) throws IOException { - this.namenodeAddress = Lists.newArrayList(haNNs).get(0); - URI nameNodeUri = NameNode.getUri(this.namenodeAddress); + this.nameNodeUri = nameNodeUri; this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri, NamenodeProtocol.class) @@ -186,7 +180,7 @@ void close() { @Override public String toString() { - return getClass().getSimpleName() + "[namenodeAddress=" + namenodeAddress + return getClass().getSimpleName() + "[namenodeUri=" + nameNodeUri + ", id=" + blockpoolID + "]"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index b62f0d5d9e..d0e657baab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -526,21 +526,21 @@ public NameNode(Configuration conf) throws IOException { protected NameNode(Configuration conf, NamenodeRole role) throws IOException { - this.conf = conf; + this.conf = new Configuration(conf); this.role = role; - String nsId = getNameServiceId(conf); - String namenodeId = HAUtil.getNameNodeId(conf, nsId); - this.haEnabled = HAUtil.isHAEnabled(conf, nsId); + String nsId = getNameServiceId(this.conf); + String namenodeId = HAUtil.getNameNodeId(this.conf, nsId); + this.haEnabled = HAUtil.isHAEnabled(this.conf, nsId); if (!haEnabled) { state = ACTIVE_STATE; } else { state = STANDBY_STATE; } - this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); + this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(this.conf); this.haContext = createHAContext(); try { - initializeGenericKeys(conf, nsId, namenodeId); - initialize(conf); + initializeGenericKeys(this.conf, nsId, namenodeId); + initialize(this.conf); state.prepareToEnterState(haContext); state.enterState(haContext); } catch (IOException e) { @@ -651,6 +651,7 @@ private static boolean format(Configuration conf, throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); + conf = new Configuration(conf); initializeGenericKeys(conf, nsId, namenodeId); if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, @@ -697,6 +698,7 @@ private static boolean format(Configuration conf, private static boolean finalize(Configuration conf, boolean isConfirmationNeeded ) throws IOException { + conf = new Configuration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index e49bb107e2..a9b62c3aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -25,6 +25,8 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; @@ -41,6 +43,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.CommonConfigurationKeys; + import static org.apache.hadoop.hdfs.DFSConfigKeys.*; public class TestDFSUtil { @@ -233,11 +237,12 @@ public void checkNameServiceId(Configuration conf, String addr, * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)} */ @Test - public void testSingleNamenode() { + public void testSingleNamenode() throws URISyntaxException { HdfsConfiguration conf = new HdfsConfiguration(); final String DEFAULT_ADDRESS = "localhost:9000"; final String NN2_ADDRESS = "localhost:9001"; conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS); + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DEFAULT_ADDRESS); InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS); boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1, @@ -247,6 +252,10 @@ public void testSingleNamenode() { isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); assertFalse(isDefault); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + assertEquals(1, uris.size()); + assertTrue(uris.contains(new URI("hdfs://" + DEFAULT_ADDRESS))); } /** Tests to ensure default namenode is used as fallback */ @@ -407,13 +416,14 @@ public void testGetServerInfo() { } @Test - public void testHANameNodesWithFederation() { + public void testHANameNodesWithFederation() throws URISyntaxException { HdfsConfiguration conf = new HdfsConfiguration(); final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; final String NS1_NN2_HOST = "ns1-nn2.example.com:8020"; final String NS2_NN1_HOST = "ns2-nn1.example.com:8020"; final String NS2_NN2_HOST = "ns2-nn2.example.com:8020"; + conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); // Two nameservices, each with two NNs. conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); @@ -460,6 +470,11 @@ public void testHANameNodesWithFederation() { // Ditto for nameservice IDs, if multiple are defined assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf)); assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf)); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + assertEquals(2, uris.size()); + assertTrue(uris.contains(new URI("hdfs://ns1"))); + assertTrue(uris.contains(new URI("hdfs://ns2"))); } @Test @@ -509,4 +524,34 @@ public void testSubstituteForWildcardAddress() throws IOException { assertEquals("127.0.0.1:12345", DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo")); } + + @Test + public void testGetNNUris() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + + final String NS1_NN1_HOST = "ns1-nn1.example.com:8020"; + final String NS1_NN2_HOST = "ns1-nn1.example.com:8020"; + final String NS2_NN_HOST = "ns2-nn.example.com:8020"; + final String NN_HOST = "nn.example.com:8020"; + + conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2"); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST); + conf.set(DFSUtil.addKeySuffixes( + DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST); + + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), + NS2_NN_HOST); + + conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN_HOST); + + Collection uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + + assertEquals(3, uris.size()); + assertTrue(uris.contains(new URI("hdfs://ns1"))); + assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_HOST))); + assertTrue(uris.contains(new URI("hdfs://" + NN_HOST))); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 2dddb1b6e0..81b03a568e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hdfs.server.balancer; import java.io.IOException; -import java.net.InetSocketAddress; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.TimeoutException; @@ -338,8 +338,7 @@ private void runBalancer(Configuration conf, waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(conf); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index e064534da4..9a0001fd09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hdfs.server.balancer; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; -import java.net.InetSocketAddress; -import java.util.Map; +import java.net.URI; +import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -67,12 +68,12 @@ public void testBalancerWithHANameNodes() throws Exception { int numOfDatanodes = capacities.length; NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); - MiniDFSNNTopology simpleHATopology = new MiniDFSNNTopology() - .addNameservice(new MiniDFSNNTopology.NSConf(null).addNN(nn1Conf) - .addNN(new MiniDFSNNTopology.NNConf("nn2"))); - cluster = new MiniDFSCluster.Builder(conf).nnTopology(simpleHATopology) - .numDataNodes(capacities.length).racks(racks).simulatedCapacities( - capacities).build(); + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()) + .numDataNodes(capacities.length) + .racks(racks) + .simulatedCapacities(capacities) + .build(); HATestUtil.setFailoverConfigurations(cluster, conf); try { cluster.waitActive(); @@ -89,14 +90,12 @@ public void testBalancerWithHANameNodes() throws Exception { // start up an empty node with the same capacity and on the same rack cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack }, new long[] { newNodeCapacity }); - - HATestUtil.setFailoverConfigurations(cluster, conf, NameNode.getUri( - cluster.getNameNode(0).getNameNodeAddress()).getHost()); totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - Map> namenodes = DFSUtil - .getNNServiceRpcAddresses(conf); + Collection namenodes = DFSUtil.getNsServiceRpcUris(conf); + assertEquals(1, namenodes.size()); + assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf); assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 0245615a48..333d23ad9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.server.balancer; import java.io.IOException; -import java.net.InetSocketAddress; +import java.net.URI; import java.util.Arrays; +import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.commons.logging.Log; @@ -40,8 +40,8 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -157,8 +157,7 @@ static void runBalancer(Suite s, LOG.info("BALANCER 1"); // start rebalancing - final Map> namenodes = - DFSUtil.getNNServiceRpcAddresses(s.conf); + final Collection namenodes = DFSUtil.getNsServiceRpcUris(s.conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf); Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java index 42b5612571..bf919cea7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -188,6 +189,12 @@ public static String getLogicalHostname(MiniDFSCluster cluster) { return String.format(LOGICAL_HOSTNAME, cluster.getInstanceId()); } + public static URI getLogicalUri(MiniDFSCluster cluster) + throws URISyntaxException { + return new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + + getLogicalHostname(cluster)); + } + public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx, List txids) throws InterruptedException { long start = System.currentTimeMillis();