diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 09fb493a06..65b49cc9cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -148,6 +148,7 @@ public class HttpFSFileSystem extends FileSystem public static final String EC_POLICY_NAME_PARAM = "ecpolicy"; public static final String OFFSET_PARAM = "offset"; public static final String LENGTH_PARAM = "length"; + public static final String ALLUSERS_PARAM = "allusers"; public static final Short DEFAULT_PERMISSION = 0755; public static final String ACLSPEC_DEFAULT = ""; @@ -287,6 +288,7 @@ public enum Operation { GETSTATUS(HTTP_GET), GETECPOLICIES(HTTP_GET), GETECCODECS(HTTP_GET), + GETTRASHROOTS(HTTP_GET), GET_BLOCK_LOCATIONS(HTTP_GET); private String httpMethod; @@ -1798,6 +1800,22 @@ public Map getAllErasureCodingCodecs() throws IOException { return JsonUtilClient.getErasureCodeCodecs(json); } + public Collection getTrashRoots(boolean allUsers) { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.GETTRASHROOTS.toString()); + params.put(ALLUSERS_PARAM, Boolean.toString(allUsers)); + Path path = new Path(getUri().toString(), "/"); + try { + HttpURLConnection conn = getConnection(Operation.GETTRASHROOTS.getMethod(), + params, path, true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.getTrashRoots(json); + } catch (IOException e) { + return super.getTrashRoots(allUsers); + } + } + @VisibleForTesting static BlockLocation[] toBlockLocations(JSONObject json) throws IOException { ObjectMapper mapper = new ObjectMapper(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 7261504820..348b0bd1ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -2396,4 +2396,32 @@ public Map execute(FileSystem fs) throws IOException { return ecCodecs; } } + + /** + * Executor that performs a FSGetTrashRoots operation. + */ + @InterfaceAudience.Private + public static class FSGetTrashRoots + implements FileSystemAccess.FileSystemExecutor { + final private boolean allUsers; + + public FSGetTrashRoots(boolean allUsers) { + this.allUsers = allUsers; + } + + @Override + public Map execute(FileSystem fs) throws IOException { + Map> paths = new HashMap<>(); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + paths.put("Paths", dfs.getTrashRoots(allUsers)); + } else { + throw new UnsupportedOperationException("getTrashRoots is " + + "not supported for HttpFs on " + fs.getClass() + + ". Please check your fs.defaultFS configuration"); + } + HttpFSServerWebApp.get().getMetrics().incrOpsTrashRoots(); + return paths; + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 87f2d4d3da..3f1058da2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -132,6 +132,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{}); PARAMS_DEF.put(Operation.GETECPOLICIES, new Class[]{}); PARAMS_DEF.put(Operation.GETECCODECS, new Class[]{}); + PARAMS_DEF.put(Operation.GETTRASHROOTS, new Class[]{AllUsersParam.class}); PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class}); } @@ -765,4 +766,22 @@ public ECPolicyParam() { super(NAME, null); } } + + /** + * Class for allusers parameter. + */ + @InterfaceAudience.Private + public static class AllUsersParam extends BooleanParam { + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.ALLUSERS_PARAM; + + /** + * Constructor. + */ + public AllUsersParam() { + super(NAME, false); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index c9276aae95..57a79a1847 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -55,6 +55,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrValueParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AllUsersParam; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.http.JettyUtils; @@ -576,6 +577,14 @@ public InputStream run() throws Exception { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETTRASHROOTS: { + Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); + FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); + Map json = fsExecute(user, command); + AUDIT_LOG.info("allUsers [{}]", allUsers); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java index 6e01c5d279..8314cd7353 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java @@ -67,6 +67,7 @@ public class HttpFSServerMetrics { private @Metric MutableCounterLong opsStatus; private @Metric MutableCounterLong opsAllECPolicies; private @Metric MutableCounterLong opsECCodecs; + private @Metric MutableCounterLong opsTrashRoots; private final MetricsRegistry registry = new MetricsRegistry("httpfsserver"); private final String name; @@ -175,4 +176,8 @@ public void incrOpsAllECPolicies() { public void incrOpsECCodecs() { opsECCodecs.incr(); } + + public void incrOpsTrashRoots() { + opsTrashRoots.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 0283d1d4ad..4c1d2d176b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -1219,7 +1219,7 @@ protected enum Operation { FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST, GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS, - GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES, GETECCODECS + GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES, GETECCODECS, GETTRASHROOTS } @SuppressWarnings("methodlength") private void operation(Operation op) throws Exception { @@ -1374,6 +1374,9 @@ private void operation(Operation op) throws Exception { case GETECCODECS: testGetECCodecs(); break; + case GETTRASHROOTS: + testGetTrashRoots(); + break; } } @@ -2201,6 +2204,45 @@ private void testGetECCodecs() throws Exception { } } + private void testGetTrashRoots() throws Exception { + if (isLocalFS()) { + // do not test the getAllEEPolicies for local FS. + return; + } + final Path path = new Path("/"); + FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf()); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf()); + FileSystem httpFs = this.getHttpFSFileSystem(); + + // Create trash root for user0 + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user0"); + String user0HomeStr = DFSUtilClient.getHomeDirectory(this.getProxiedFSConf(), ugi); + Path user0Trash = new Path(user0HomeStr, FileSystem.TRASH_PREFIX); + dfs.mkdirs(user0Trash); + + Collection dfsTrashRoots = dfs.getTrashRoots(true); + Collection diffTrashRoots = null; + + if (httpFs instanceof HttpFSFileSystem) { + HttpFSFileSystem httpFS = (HttpFSFileSystem) httpFs; + diffTrashRoots = httpFS.getTrashRoots(true); + } else if (httpFs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) httpFs; + diffTrashRoots = webHdfsFileSystem.getTrashRoots(true); + } else { + Assert.fail(fs.getClass().getSimpleName() + + " is not of type HttpFSFileSystem or WebHdfsFileSystem"); + } + + // Validate getTrashRoots are the same as DistributedFileSystem + assertEquals(dfsTrashRoots.size(), diffTrashRoots.size()); + } else { + Assert.fail(fs.getClass().getSimpleName() + " is not of type DistributedFileSystem."); + } + } + private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing, SnapshotDiffReportListing dfsDiffReportListing) { Assert.assertEquals(diffReportListing.getCreateList().size(),