diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 79275b0cdc..aebc28aa79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -57,10 +57,14 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class HAUtil { - + + public static final org.slf4j.Logger LOG = + LoggerFactory.getLogger(HAUtil.class.getName()); + private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String[]{ DFS_NAMENODE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_BIND_HOST_KEY, @@ -273,8 +277,13 @@ public static InetSocketAddress getAddressOfActive(FileSystem fs) List namenodes = getProxiesForAllNameNodesInNameservice(dfsConf, nsId); for (ClientProtocol proxy : namenodes) { - if (proxy.getHAServiceState().equals(HAServiceState.ACTIVE)) { - inAddr = RPC.getServerAddress(proxy); + try { + if (proxy.getHAServiceState().equals(HAServiceState.ACTIVE)) { + inAddr = RPC.getServerAddress(proxy); + } + } catch (Exception e) { + //Ignore the exception while connecting to a namenode. + LOG.debug("Error while connecting to namenode", e); } } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index cc8ead1687..46ebb8f104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -75,7 +75,12 @@ public void testHaFsck() throws Exception { cluster.transitionToStandby(0); cluster.transitionToActive(1); - + + runFsck(conf); + // Stop one standby namenode, FSCK should still be successful, since there + // is one Active namenode available + cluster.getNameNode(0).stop(); + runFsck(conf); } finally { if (fs != null) {