diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index d267530106..f9b2e8d7c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -366,7 +366,7 @@ static Map> getAddressesForNsIds( static Map getAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue, String... keys) { Collection nnIds = getNameNodeIds(conf, nsId); - Map ret = Maps.newHashMap(); + Map ret = Maps.newLinkedHashMap(); for (String nnId : emptyAsSingletonNull(nnIds)) { String suffix = concatSuffixes(nsId, nnId); String address = getConfValue(defaultValue, suffix, conf, keys); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 6f8c661a5b..1a388061ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -264,6 +264,8 @@ interface Failover { String CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = PREFIX + "connection.retries.on.timeouts"; int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0; + String RANDOM_ORDER = PREFIX + "random.order"; + boolean RANDOM_ORDER_DEFAULT = false; } /** dfs.client.write configuration properties */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index c2d4d91626..0e8fa44880 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -23,6 +23,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -43,9 +44,10 @@ import com.google.common.base.Preconditions; /** - * A FailoverProxyProvider implementation which allows one to configure two URIs - * to connect to during fail-over. The first configured address is tried first, - * and on a fail-over event the other address is tried. + * A FailoverProxyProvider implementation which allows one to configure + * multiple URIs to connect to during fail-over. A random configured address is + * tried first, and on a fail-over event the other addresses are tried + * sequentially in a random order. */ public class ConfiguredFailoverProxyProvider extends AbstractNNFailoverProxyProvider { @@ -124,6 +126,13 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri, for (InetSocketAddress address : addressesOfNns) { proxies.add(new AddressRpcProxyPair(address)); } + // Randomize the list to prevent all clients pointing to the same one + boolean randomized = conf.getBoolean( + HdfsClientConfigKeys.Failover.RANDOM_ORDER, + HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT); + if (randomized) { + Collections.shuffle(proxies); + } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the