diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 795ddfb379..95aad12d92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -1003,7 +1004,7 @@ public static Path makePathFromFileId(long fileId) { * @param ugi {@link UserGroupInformation} of current user. * @return the home directory of current user. */ - public static Path getHomeDirectory(Configuration conf, + public static String getHomeDirectory(Configuration conf, UserGroupInformation ugi) { String userHomePrefix = HdfsClientConfigKeys .DFS_USER_HOME_DIR_PREFIX_DEFAULT; @@ -1012,6 +1013,31 @@ public static Path getHomeDirectory(Configuration conf, HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT); } - return new Path(userHomePrefix + "/" + ugi.getShortUserName()); + return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName(); + } + + /** + * Returns trash root in non-encryption zone. + * @param conf configuration. + * @param ugi user of trash owner. + * @return unqualified path of trash root. + */ + public static String getTrashRoot(Configuration conf, + UserGroupInformation ugi) { + return getHomeDirectory(conf, ugi) + + Path.SEPARATOR + FileSystem.TRASH_PREFIX; + } + + /** + * Returns trash root in encryption zone. + * @param ez encryption zone. + * @param ugi user of trash owner. + * @return unqualified path of trash root. + */ + public static String getEZTrashRoot(EncryptionZone ez, + UserGroupInformation ugi) { + String ezpath = ez.getPath(); + return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR) + + FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 41f176318a..9628f4c9a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -220,7 +220,8 @@ public void setWorkingDirectory(Path dir) { @Override public Path getHomeDirectory() { - return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)); + return makeQualified( + new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi))); } /** @@ -3236,8 +3237,7 @@ public Path getTrashRoot(Path path) { EncryptionZone ez = dfs.getEZForPath(parentSrc); if ((ez != null)) { return this.makeQualified( - new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX), - dfs.ugi.getShortUserName())); + new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi))); } } catch (IOException e) { DFSClient.LOG.warn("Exception in checking the encryption zone for the " + @@ -3264,7 +3264,8 @@ public Collection getTrashRoots(boolean allUsers) { // Get EZ Trash roots final RemoteIterator it = dfs.listEncryptionZones(); while (it.hasNext()) { - Path ezTrashRoot = new Path(it.next().getPath(), + EncryptionZone ez = it.next(); + Path ezTrashRoot = new Path(ez.getPath(), FileSystem.TRASH_PREFIX); if (!exists(ezTrashRoot)) { continue; @@ -3276,7 +3277,7 @@ public Collection getTrashRoots(boolean allUsers) { } } } else { - Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName()); + Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)); try { ret.add(getFileStatus(userTrash)); } catch (FileNotFoundException ignored) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 78eba410bd..2423a037c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -65,7 +65,6 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.XAttr; @@ -81,6 +80,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -1244,7 +1244,7 @@ protected Response get( return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case GETHOMEDIRECTORY: { - String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString(); + String userHome = DFSUtilClient.getHomeDirectory(conf, ugi); final String js = JsonUtil.toJsonString("Path", userHome); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } @@ -1285,7 +1285,7 @@ protected Response get( return Response.ok().build(); } case GETTRASHROOT: { - final String trashPath = getTrashRoot(fullpath, conf); + final String trashPath = getTrashRoot(conf, fullpath); final String jsonStr = JsonUtil.toJsonString("Path", trashPath); return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build(); } @@ -1345,11 +1345,39 @@ protected Response get( } } - private static String getTrashRoot(String fullPath, - Configuration conf) throws IOException { - FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration()); - return fs.getTrashRoot( - new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath(); + private String getTrashRoot(Configuration conf, String fullPath) + throws IOException { + UserGroupInformation ugi= UserGroupInformation.getCurrentUser(); + String parentSrc = getParent(fullPath); + EncryptionZone ez = getRpcClientProtocol().getEZForPath( + parentSrc != null ? parentSrc : fullPath); + String trashRoot; + if (ez != null) { + trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi); + } else { + trashRoot = DFSUtilClient.getTrashRoot(conf, ugi); + } + return trashRoot; + } + + /** + * Returns the parent of a path in the same way as Path#getParent. + * @return the parent of a path or null if at root + */ + public String getParent(String path) { + int lastSlash = path.lastIndexOf('/'); + int start = 0; + if ((path.length() == start) || // empty path + (lastSlash == start && path.length() == start + 1)) { // at root + return null; + } + String parent; + if (lastSlash == -1) { + parent = org.apache.hadoop.fs.Path.CUR_DIR; + } else { + parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash); + } + return parent; } private static DirectoryListing getDirectoryListing(final ClientProtocol cp, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 6493b1e334..2b9b51fb5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -34,6 +34,7 @@ import static org.junit.Assert.fail; import java.io.EOFException; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -48,6 +49,7 @@ import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; +import java.util.EnumSet; import java.util.Iterator; import java.util.Map; import java.util.Random; @@ -62,11 +64,13 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; @@ -85,6 +89,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestFileCreation; +import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag; +import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; @@ -1535,6 +1541,52 @@ public void testGetTrashRoot() throws Exception { assertEquals(expectedPath.toUri().getPath(), trashPath.toUri().getPath()); } + @Test + public void testGetEZTrashRoot() throws Exception { + final Configuration conf = WebHdfsTestUtil.createConf(); + FileSystemTestHelper fsHelper = new FileSystemTestHelper(); + File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile(); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + "jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri()); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + dfs.getClient().setKeyProvider( + cluster.getNameNode().getNamesystem().getProvider()); + final String testkey = "test_key"; + DFSTestUtil.createKey(testkey, cluster, conf); + + final Path zone1 = new Path("/zone1"); + dfs.mkdirs(zone1, new FsPermission(700)); + dfsAdmin.createEncryptionZone(zone1, testkey, + EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH)); + + final Path insideEZ = new Path(zone1, "insideEZ"); + dfs.mkdirs(insideEZ, new FsPermission(700)); + assertEquals( + dfs.getTrashRoot(insideEZ).toUri().getPath(), + webhdfs.getTrashRoot(insideEZ).toUri().getPath()); + + final Path outsideEZ = new Path("/outsideEZ"); + dfs.mkdirs(outsideEZ, new FsPermission(755)); + assertEquals( + dfs.getTrashRoot(outsideEZ).toUri().getPath(), + webhdfs.getTrashRoot(outsideEZ).toUri().getPath()); + + final Path root = new Path("/"); + assertEquals( + dfs.getTrashRoot(root).toUri().getPath(), + webhdfs.getTrashRoot(root).toUri().getPath()); + assertEquals( + webhdfs.getTrashRoot(root).toUri().getPath(), + webhdfs.getTrashRoot(zone1).toUri().getPath()); + assertEquals( + webhdfs.getTrashRoot(outsideEZ).toUri().getPath(), + webhdfs.getTrashRoot(zone1).toUri().getPath()); + } @Test public void testStoragePolicy() throws Exception {