diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java index 5aa5ec934c..6432bb0e8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java @@ -61,8 +61,10 @@ public interface FileSubclusterResolver { * cache. * * @param path Path to get the mount points under. - * @return List of mount points present at this path or zero-length list if - * none are found. + * @return List of mount points present at this path. Return zero-length + * list if the path is a mount point but there are no mount points + * under the path. Return null if the path is not a mount point + * and there are no mount points under the path. * @throws IOException Throws exception if the data is not available. */ List getMountPoints(String path) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index d603947894..2e0713fba2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -726,6 +726,9 @@ public class RouterClientProtocol implements ClientProtocol { date = dates.get(src); } ret = getMountPointStatus(src, children.size(), date); + } else if (children != null) { + // The src is a mount point, but there are no files or directories + ret = getMountPointStatus(src, 0, 0); } } @@ -1734,13 +1737,26 @@ public class RouterClientProtocol implements ClientProtocol { FsPermission permission = FsPermission.getDirDefault(); String owner = this.superUser; String group = this.superGroup; - try { - // TODO support users, it should be the user for the pointed folder - UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); - owner = ugi.getUserName(); - group = ugi.getPrimaryGroupName(); - } catch (IOException e) { - LOG.error("Cannot get the remote user: {}", e.getMessage()); + if (subclusterResolver instanceof MountTableResolver) { + try { + MountTableResolver mountTable = (MountTableResolver) subclusterResolver; + MountTable entry = mountTable.getMountPoint(name); + if (entry != null) { + permission = entry.getMode(); + owner = entry.getOwnerName(); + group = entry.getGroupName(); + } + } catch (IOException e) { + LOG.error("Cannot get mount point: {}", e.getMessage()); + } + } else { + try { + UserGroupInformation ugi = RouterRpcServer.getRemoteUser(); + owner = ugi.getUserName(); + group = ugi.getPrimaryGroupName(); + } catch (IOException e) { + LOG.error("Cannot get remote user: {}", e.getMessage()); + } } long inodeId = 0; return new HdfsFileStatus.Builder() diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java index 4813b535e3..9bfd705efb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java @@ -87,11 +87,12 @@ public class RouterQuotaUpdateService extends PeriodicService { QuotaUsage currentQuotaUsage = null; - // Check whether destination path exists in filesystem. If destination - // is not present, reset the usage. For other mount entry get current - // quota usage + // Check whether destination path exists in filesystem. When the + // mtime is zero, the destination is not present and reset the usage. + // This is because mount table does not have mtime. + // For other mount entry get current quota usage HdfsFileStatus ret = this.rpcServer.getFileInfo(src); - if (ret == null) { + if (ret == null || ret.getModificationTime() == 0) { currentQuotaUsage = new RouterQuotaUsage.Builder() .fileAndDirectoryCount(0) .quota(nsQuota) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index f5636ceccd..9bff00732e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -303,15 +303,16 @@ public class MockResolver @Override public List getMountPoints(String path) throws IOException { + // Mounts only supported under root level + if (!path.equals("/")) { + return null; + } List mounts = new ArrayList<>(); - if (path.equals("/")) { - // Mounts only supported under root level - for (String mount : this.locations.keySet()) { - if (mount.length() > 1) { - // Remove leading slash, this is the behavior of the mount tree, - // return only names. - mounts.add(mount.replace("/", "")); - } + for (String mount : this.locations.keySet()) { + if (mount.length() > 1) { + // Remove leading slash, this is the behavior of the mount tree, + // return only names. + mounts.add(mount.replace("/", "")); } } return mounts; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 4d8ffe10fc..d2b78d34ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -43,8 +44,12 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.util.Time; +import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -59,9 +64,11 @@ public class TestRouterMountTable { private static RouterContext routerContext; private static MountTableResolver mountTable; private static ClientProtocol routerProtocol; + private static long startTime; @BeforeClass public static void globalSetUp() throws Exception { + startTime = Time.now(); // Build and start a federated cluster cluster = new StateStoreDFSCluster(false, 1); @@ -92,6 +99,21 @@ public class TestRouterMountTable { } } + @After + public void clearMountTable() throws IOException { + RouterClient client = routerContext.getAdminClient(); + MountTableManager mountTableManager = client.getMountTableManager(); + GetMountTableEntriesRequest req1 = + GetMountTableEntriesRequest.newInstance("/"); + GetMountTableEntriesResponse response = + mountTableManager.getMountTableEntries(req1); + for (MountTable entry : response.getEntries()) { + RemoveMountTableEntryRequest req2 = + RemoveMountTableEntryRequest.newInstance(entry.getSourcePath()); + mountTableManager.removeMountTableEntry(req2); + } + } + @Test public void testReadOnly() throws Exception { @@ -157,7 +179,6 @@ public class TestRouterMountTable { */ @Test public void testListFilesTime() throws Exception { - Long beforeCreatingTime = Time.now(); // Add mount table entry MountTable addEntry = MountTable.newInstance( "/testdir", Collections.singletonMap("ns0", "/testdir")); @@ -211,10 +232,40 @@ public class TestRouterMountTable { Long expectedTime = pathModTime.get(currentFile); assertEquals(currentFile, fileName); - assertTrue(currentTime > beforeCreatingTime); + assertTrue(currentTime > startTime); assertEquals(currentTime, expectedTime); } // Verify the total number of results found/matched assertEquals(pathModTime.size(), listing.getPartialListing().length); } + + /** + * Verify that the file listing contains correct permission. + */ + @Test + public void testMountTablePermissions() throws Exception { + // Add mount table entries + MountTable addEntry = MountTable.newInstance( + "/testdir1", Collections.singletonMap("ns0", "/testdir1")); + addEntry.setGroupName("group1"); + addEntry.setOwnerName("owner1"); + addEntry.setMode(FsPermission.createImmutable((short)0775)); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance( + "/testdir2", Collections.singletonMap("ns0", "/testdir2")); + addEntry.setGroupName("group2"); + addEntry.setOwnerName("owner2"); + addEntry.setMode(FsPermission.createImmutable((short)0755)); + assertTrue(addMountTable(addEntry)); + + HdfsFileStatus fs = routerProtocol.getFileInfo("/testdir1"); + assertEquals("group1", fs.getGroup()); + assertEquals("owner1", fs.getOwner()); + assertEquals((short) 0775, fs.getPermission().toShort()); + + fs = routerProtocol.getFileInfo("/testdir2"); + assertEquals("group2", fs.getGroup()); + assertEquals("owner2", fs.getOwner()); + assertEquals((short) 0755, fs.getPermission().toShort()); + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java index 7e0976016b..94b712f534 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java @@ -123,8 +123,9 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc { RouterContext rc = getRouterContext(); Router router = rc.getRouter(); FileSubclusterResolver subclusterResolver = router.getSubclusterResolver(); - for (String mount : subclusterResolver.getMountPoints(path)) { - requiredPaths.add(mount); + List mountList = subclusterResolver.getMountPoints(path); + if (mountList != null) { + requiredPaths.addAll(mountList); } // Get files/dirs from the Namenodes