HDFS-12797. Add Test for NFS mount of not supported filesystems like (file:///). Contributed by Mukul Kumar Singh.

This commit is contained in:
Jitendra Pandey 2017-11-09 23:53:17 -08:00
parent 3c6adda291
commit 8a1bd9a4f4

View File

@ -20,6 +20,7 @@
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.nio.file.FileSystemException;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@ -33,9 +34,14 @@
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.nfs.mount.Mountd;
import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TestExportsTable {
@Rule
public ExpectedException exception = ExpectedException.none();
@Test
public void testHdfsExportPoint() throws IOException {
@ -70,7 +76,7 @@ public void testHdfsExportPoint() throws IOException {
}
@Test
public void testViewFsExportPoint() throws IOException {
public void testViewFsMultipleExportPoint() throws IOException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
String clusterName = RandomStringUtils.randomAlphabetic(10);
@ -182,6 +188,56 @@ public void testViewFsInternalExportPoint() throws IOException {
}
}
@Test
public void testViewFsRootExportPoint() throws IOException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
String clusterName = RandomStringUtils.randomAlphabetic(10);
String exportPoint = "/";
config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_SCHEME + "://" + clusterName);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
config.set("nfs.http.address", "0.0.0.0:0");
try {
cluster =
new MiniDFSCluster.Builder(config).nnTopology(
MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitActive();
DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
cluster.waitActive();
Path base1 = new Path("/user1");
Path base2 = new Path("/user2");
hdfs1.delete(base1, true);
hdfs2.delete(base2, true);
hdfs1.mkdirs(base1);
hdfs2.mkdirs(base2);
ConfigUtil.addLink(config, clusterName, "/hdfs1",
hdfs1.makeQualified(base1).toUri());
ConfigUtil.addLink(config, clusterName, "/hdfs2",
hdfs2.makeQualified(base2).toUri());
exception.expect(FileSystemException.class);
exception.
expectMessage("Only HDFS is supported as underlyingFileSystem, "
+ "fs scheme:viewfs");
// Start nfs
final Nfs3 nfsServer = new Nfs3(config);
nfsServer.startServiceInternal(false);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testHdfsInternalExportPoint() throws IOException {
NfsConfiguration config = new NfsConfiguration();
@ -219,4 +275,34 @@ public void testHdfsInternalExportPoint() throws IOException {
}
}
}
@Test
public void testInvalidFsExport() throws IOException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
config.set("nfs.http.address", "0.0.0.0:0");
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
FsConstants.LOCAL_FS_URI.toString());
exception.expect(FileSystemException.class);
exception.
expectMessage("Only HDFS is supported as underlyingFileSystem, "
+ "fs scheme:file");
// Start nfs
final Nfs3 nfsServer = new Nfs3(config);
nfsServer.startServiceInternal(false);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}