diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 74757e563a..7c2026c105 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -384,6 +385,7 @@ public long getProtocolVersion(String protocol, */ @Deprecated public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT; + public static final String FS_HDFS_IMPL_KEY = "fs.hdfs.impl"; public static final Logger LOG = LoggerFactory.getLogger(NameNode.class.getName()); public static final Logger stateChangeLog = @@ -725,6 +727,11 @@ protected void initialize(Configuration conf) throws IOException { intervals); } } + // Currently NN uses FileSystem.get to initialize DFS in startTrashEmptier. + // If fs.hdfs.impl was overridden by core-site.xml, we may get other + // filesystem. To make sure we get DFS, we are setting fs.hdfs.impl to DFS. + // HDFS-15450 + conf.set(FS_HDFS_IMPL_KEY, DistributedFileSystem.class.getName()); UserGroupInformation.setConfiguration(conf); loginAsNameNodeUser(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java new file mode 100644 index 0000000000..9d394c0049 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Tests that the NN startup is successful with ViewFSOverloadScheme. + */ +public class TestNNStartupWhenViewFSOverloadSchemeEnabled { + private MiniDFSCluster cluster; + private static final String FS_IMPL_PATTERN_KEY = "fs.%s.impl"; + private static final String HDFS_SCHEME = "hdfs"; + private static final Configuration CONF = new Configuration(); + + @BeforeClass + public static void setUp() { + CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + CONF.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); + CONF.set(String.format(FS_IMPL_PATTERN_KEY, HDFS_SCHEME), + ViewFileSystemOverloadScheme.class.getName()); + CONF.set(String + .format(FsConstants.FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN, + HDFS_SCHEME), DistributedFileSystem.class.getName()); + // By default trash interval is 0. To trigger TrashEmptier, let's set it to + // >0 value. + CONF.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 100); + } + + /** + * Tests that the HA mode NameNode startup is successful when + * ViewFSOverloadScheme configured. + */ + @Test(timeout = 30000) + public void testHANameNodeAndDataNodeStartup() throws Exception { + cluster = new MiniDFSCluster.Builder(CONF) + .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1) + .waitSafeMode(false).build(); + cluster.waitActive(); + cluster.transitionToActive(0); + } + + /** + * Tests that the NameNode startup is successful when ViewFSOverloadScheme + * configured. + */ + @Test(timeout = 30000) + public void testNameNodeAndDataNodeStartup() throws Exception { + cluster = + new MiniDFSCluster.Builder(CONF).numDataNodes(1).waitSafeMode(false) + .build(); + cluster.waitActive(); + } + + @After + public void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } +}