From 51e520c68aafb73b784bf690a8a42de3af0f229c Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Fri, 4 May 2012 22:14:10 +0000 Subject: [PATCH] HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1334231 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../apache/hadoop/fs/AbstractFileSystem.java | 2 +- .../main/java/org/apache/hadoop/fs/Path.java | 7 ++ .../hadoop/fs/viewfs/ChRootedFileSystem.java | 5 +- .../apache/hadoop/fs/viewfs/ChRootedFs.java | 6 +- .../fs/viewfs/ViewFileSystemBaseTest.java | 30 ++++-- .../hadoop/fs/viewfs/ViewFsBaseTest.java | 39 ++++++-- .../hdfs/server/namenode/FSNamesystem.java | 6 +- .../viewfs/TestViewFileSystemAtHdfsRoot.java | 93 +++++++++++++++++++ .../fs/viewfs/TestViewFileSystemHdfs.java | 6 +- .../fs/viewfs/TestViewFsAtHdfsRoot.java | 93 +++++++++++++++++++ .../hadoop/fs/viewfs/TestViewFsHdfs.java | 28 +----- 12 files changed, 266 insertions(+), 51 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8bb39d508b..558c9b8962 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -423,6 +423,8 @@ Release 2.0.0 - UNRELEASED HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu) + HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 86974734b5..cbcce217b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -346,7 +346,7 @@ public void checkPath(Path path) { path); } else { throw new InvalidPathException( - "Path without scheme with non-null autorhrity:" + path); + "Path without scheme with non-null authority:" + path); } } String thisScheme = this.getUri().getScheme(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 2fbed2a2bb..3d193dfad2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -223,6 +223,13 @@ public boolean isAbsolute() { return isUriPathAbsolute(); } + /** + * @return true if and only if this path represents the root of a file system + */ + public boolean isRoot() { + return getParent() == null; + } + /** Returns the final component of this path.*/ public String getName() { String path = uri.getPath(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 209fd216d1..85426fa4ff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -75,7 +75,8 @@ protected FileSystem getMyFs() { protected Path fullPath(final Path path) { super.checkPath(path); return path.isAbsolute() ? - new Path(chRootPathPartString + path.toUri().getPath()) : + new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + + path.toUri().getPath()) : new Path(chRootPathPartString + workingDir.toUri().getPath(), path); } @@ -127,7 +128,7 @@ String stripOutRoot(final Path p) throws IOException { } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart - .substring(chRootPathPartString.length() + 1); + .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1)); } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java index 063d0d04fa..f6e27d2815 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java @@ -79,7 +79,8 @@ protected AbstractFileSystem getMyFs() { */ protected Path fullPath(final Path path) { super.checkPath(path); - return new Path(chRootPathPartString + path.toUri().getPath()); + return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + + path.toUri().getPath()); } public ChRootedFs(final AbstractFileSystem fs, final Path theRoot) @@ -127,7 +128,8 @@ public String stripOutRoot(final Path p) { } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? - "" : pathPart.substring(chRootPathPartString.length() + 1); + "" : pathPart.substring(chRootPathPartString.length() + + (chRootPathPart.isRoot() ? 0 : 1)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index 1de434e3a9..d4740a41fc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest { @Before public void setUp() throws Exception { - targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); - // In case previous test was killed before cleanup - fsTarget.delete(targetTestRoot, true); + initializeTargetTestRoot(); - fsTarget.mkdirs(targetTestRoot); // Make user and data dirs - we creates links to them in the mount table fsTarget.mkdirs(new Path(targetTestRoot,"user")); fsTarget.mkdirs(new Path(targetTestRoot,"data")); @@ -99,7 +96,16 @@ public void tearDown() throws Exception { fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true); } + void initializeTargetTestRoot() throws IOException { + targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget); + // In case previous test was killed before cleanup + fsTarget.delete(targetTestRoot, true); + + fsTarget.mkdirs(targetTestRoot); + } + void setupMountPoints() { + ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri()); @@ -121,7 +127,7 @@ public void testGetMountPoints() { } int getExpectedMountPoints() { - return 7; + return 8; } /** @@ -166,7 +172,7 @@ public void testGetDelegationTokensWithCredentials() throws IOException { } } } - Assert.assertEquals(expectedTokenCount / 2, delTokens.size()); + Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size()); } int getExpectedDelegationTokenCountWithCredentials() { @@ -309,6 +315,16 @@ public void testOperationsThroughMountLinks() throws IOException { Assert.assertTrue("Renamed dest should exist as dir in target", fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar"))); + // Make a directory under a directory that's mounted from the root of another FS + fsView.mkdirs(new Path("/targetRoot/dirFoo")); + Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo"))); + boolean dirFooPresent = false; + for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) { + if (fileStatus.getPath().getName().equals("dirFoo")) { + dirFooPresent = true; + } + } + Assert.assertTrue(dirFooPresent); } // rename across mount points that point to same target also fail @@ -418,7 +434,7 @@ public void testListOnInternalDirsOfMountTable() throws IOException { } int getExpectedDirPaths() { - return 6; + return 7; } @Test diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index 8622f02ff6..7f731de23e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.FileContextTestHelper.fileType; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; @@ -77,12 +78,8 @@ public class ViewFsBaseTest { @Before public void setUp() throws Exception { - - targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget); - // In case previous test was killed before cleanup - fcTarget.delete(targetTestRoot, true); + initializeTargetTestRoot(); - fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true); // Make user and data dirs - we creates links to them in the mount table fcTarget.mkdir(new Path(targetTestRoot,"user"), FileContext.DEFAULT_PERM, true); @@ -100,6 +97,7 @@ public void setUp() throws Exception { // Set up the defaultMT in the config with our mount point links conf = new Configuration(); + ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri()); ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri()); ConfigUtil.addLink(conf, "/user2", @@ -118,6 +116,14 @@ public void setUp() throws Exception { fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); // Also try viewfs://default/ - note authority is name of mount table } + + void initializeTargetTestRoot() throws IOException { + targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget); + // In case previous test was killed before cleanup + fcTarget.delete(targetTestRoot, true); + + fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true); + } @After public void tearDown() throws Exception { @@ -128,7 +134,11 @@ public void tearDown() throws Exception { public void testGetMountPoints() { ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem(); MountPoint[] mountPoints = viewfs.getMountPoints(); - Assert.assertEquals(7, mountPoints.length); + Assert.assertEquals(8, mountPoints.length); + } + + int getExpectedDelegationTokenCount() { + return 0; } /** @@ -140,7 +150,7 @@ public void testGetMountPoints() { public void testGetDelegationTokens() throws IOException { List> delTokens = fcView.getDelegationTokens(new Path("/"), "sanjay"); - Assert.assertEquals(0, delTokens.size()); + Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size()); } @@ -281,6 +291,19 @@ public void testOperationsThroughMountLinks() throws IOException { Assert.assertTrue("Renamed dest should exist as dir in target", isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar"))); + // Make a directory under a directory that's mounted from the root of another FS + fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false); + Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo"))); + boolean dirFooPresent = false; + RemoteIterator dirContents = fcView.listStatus(new Path( + "/targetRoot/")); + while (dirContents.hasNext()) { + FileStatus fileStatus = dirContents.next(); + if (fileStatus.getPath().getName().equals("dirFoo")) { + dirFooPresent = true; + } + } + Assert.assertTrue(dirFooPresent); } // rename across mount points that point to same target also fail @@ -358,7 +381,7 @@ public void testListOnInternalDirsOfMountTable() throws IOException { FileStatus[] dirPaths = fcView.util().listStatus(new Path("/")); FileStatus fs; - Assert.assertEquals(6, dirPaths.length); + Assert.assertEquals(7, dirPaths.length); fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink", fs.isSymlink()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b1686d0eb4..35800c1663 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4556,7 +4556,7 @@ void unprotectedChangeLease(String src, String dst, HdfsFileStatus dinfo) { if (destinationExisted && dinfo.isDir()) { Path spath = new Path(src); Path parent = spath.getParent(); - if (isRoot(parent)) { + if (parent.isRoot()) { overwrite = parent.toString(); } else { overwrite = parent.toString() + Path.SEPARATOR; @@ -4569,10 +4569,6 @@ void unprotectedChangeLease(String src, String dst, HdfsFileStatus dinfo) { leaseManager.changeLease(src, dst, overwrite, replaceBy); } - - private boolean isRoot(Path path) { - return path.getParent() == null; - } /** * Serializes leases. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java new file mode 100644 index 0000000000..9cc74e3270 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URISyntaxException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +/** + * Make sure that ViewFileSystem works when the root of an FS is mounted to a + * ViewFileSystem mount point. + */ +public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest { + + private static MiniDFSCluster cluster; + private static Configuration CONF = new Configuration(); + private static FileSystem fHdfs; + + @BeforeClass + public static void clusterSetupAtBegining() throws IOException, + LoginException, URISyntaxException { + SupportsBlocks = true; + CONF.setBoolean( + DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + + cluster = new MiniDFSCluster.Builder(CONF) + .numDataNodes(2) + .build(); + cluster.waitClusterUp(); + + fHdfs = cluster.getFileSystem(); + } + + @AfterClass + public static void clusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + fsTarget = fHdfs; + super.setUp(); + } + + /** + * Override this so that we don't set the targetTestRoot to any path under the + * root of the FS, and so that we don't try to delete the test dir, but rather + * only its contents. + */ + @Override + void initializeTargetTestRoot() throws IOException { + targetTestRoot = fHdfs.makeQualified(new Path("/")); + for (FileStatus status : fHdfs.listStatus(targetTestRoot)) { + fHdfs.delete(status.getPath(), true); + } + } + + @Override + int getExpectedDelegationTokenCount() { + return 8; + } + + @Override + int getExpectedDelegationTokenCountWithCredentials() { + return 1; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 7ad56c0e93..9f71d85f05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -105,17 +105,17 @@ void setupMountPoints() { // additional mount. @Override int getExpectedDirPaths() { - return 7; + return 8; } @Override int getExpectedMountPoints() { - return 8; + return 9; } @Override int getExpectedDelegationTokenCount() { - return 8; + return 9; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java new file mode 100644 index 0000000000..449689242d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.viewfs; + +import java.io.IOException; +import java.net.URISyntaxException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +/** + * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs + * mount point. + */ +public class TestViewFsAtHdfsRoot extends ViewFsBaseTest { + + private static MiniDFSCluster cluster; + private static HdfsConfiguration CONF = new HdfsConfiguration(); + private static FileContext fc; + + @BeforeClass + public static void clusterSetupAtBegining() throws IOException, + LoginException, URISyntaxException { + SupportsBlocks = true; + CONF.setBoolean( + DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + + cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); + cluster.waitClusterUp(); + fc = FileContext.getFileContext(cluster.getURI(0), CONF); + } + + + @AfterClass + public static void ClusterShutdownAtEnd() throws Exception { + cluster.shutdown(); + } + + @Before + public void setUp() throws Exception { + // create the test root on local_fs + fcTarget = fc; + super.setUp(); + } + + /** + * Override this so that we don't set the targetTestRoot to any path under the + * root of the FS, and so that we don't try to delete the test dir, but rather + * only its contents. + */ + @Override + void initializeTargetTestRoot() throws IOException { + targetTestRoot = fc.makeQualified(new Path("/")); + RemoteIterator dirContents = fc.listStatus(targetTestRoot); + while (dirContents.hasNext()) { + fc.delete(dirContents.next().getPath(), true); + } + } + + /** + * This overrides the default implementation since hdfs does have delegation + * tokens. + */ + @Override + int getExpectedDelegationTokenCount() { + return 8; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java index 0e94b4eb3d..dc7110cfaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.net.URISyntaxException; -import java.util.List; import javax.security.auth.login.LoginException; @@ -30,20 +29,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; - -import org.junit.After; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Test; - public class TestViewFsHdfs extends ViewFsBaseTest { private static MiniDFSCluster cluster; - private static Path defaultWorkingDirectory; private static HdfsConfiguration CONF = new HdfsConfiguration(); private static FileContext fc; @@ -57,7 +49,7 @@ public static void clusterSetupAtBegining() throws IOException, cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); fc = FileContext.getFileContext(cluster.getURI(0), CONF); - defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + + Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); } @@ -73,25 +65,15 @@ public void setUp() throws Exception { // create the test root on local_fs fcTarget = fc; super.setUp(); - - } - - @After - public void tearDown() throws Exception { - super.tearDown(); } - - /* - * This overides the default implementation since hdfs does have delegation + /** + * This overrides the default implementation since hdfs does have delegation * tokens. */ @Override - @Test - public void testGetDelegationTokens() throws IOException { - List> delTokens = - fcView.getDelegationTokens(new Path("/"), "sanjay"); - Assert.assertEquals(7, delTokens.size()); + int getExpectedDelegationTokenCount() { + return 8; } }