diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 3904a87a6b..d80c276d16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtilClient; import org.apache.hadoop.lib.wsrs.EnumSetParam; import org.apache.hadoop.security.UserGroupInformation; @@ -234,7 +235,8 @@ public enum Operation { SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST), ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT), CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE), - RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET); + RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET), + GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET); private String httpMethod; @@ -1482,4 +1484,16 @@ public SnapshotDiffReport getSnapshotDiffReport(Path path, return JsonUtilClient.toSnapshotDiffReport(json); } + public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList() + throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.GETSNAPSHOTTABLEDIRECTORYLIST.toString()); + HttpURLConnection conn = getConnection( + Operation.GETSNAPSHOTTABLEDIRECTORYLIST.getMethod(), + params, new Path(getUri().toString(), "/"), true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.toSnapshottableDirectoryList(json); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index ed7628feb0..7989895d3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.lib.service.FileSystemAccess; @@ -1701,4 +1702,38 @@ public String execute(FileSystem fs) throws IOException { } } } + + /** + * Executor that performs a getSnapshottableDirListing operation. + */ + @InterfaceAudience.Private + public static class FSGetSnapshottableDirListing implements + FileSystemAccess.FileSystemExecutor { + + /** + * Creates a getSnapshottableDirListing executor. + */ + public FSGetSnapshottableDirListing() { + } + + /** + * Executes the filesystem operation. + * @param fs filesystem instance to use. + * @return A JSON string of all snapshottable directories. + * @throws IOException thrown if an IO error occurred. + */ + @Override + public String execute(FileSystem fs) throws IOException { + SnapshottableDirectoryStatus[] sds = null; + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + sds = dfs.getSnapshottableDirListing(); + } else { + throw new UnsupportedOperationException("getSnapshottableDirListing is " + + "not supported for HttpFs on " + fs.getClass() + + ". Please check your fs.defaultFS configuration"); + } + return JsonUtil.toJsonString(sds); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 5301527e50..754ae2b288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -113,6 +113,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.GETSNAPSHOTDIFF, new Class[] {OldSnapshotNameParam.class, SnapshotNameParam.class}); + PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {}); } public HttpFSParametersProvider() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index f51006125b..2895210497 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -379,6 +379,14 @@ public InputStream run() throws Exception { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETSNAPSHOTTABLEDIRECTORYLIST: { + FSOperations.FSGetSnapshottableDirListing command = + new FSOperations.FSGetSnapshottableDirListing(); + String js = fsExecute(user, command); + AUDIT_LOG.info("[{}]", "/"); + response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index cd9d3b91e2..a23ca7abbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotException; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; @@ -1071,7 +1073,7 @@ protected enum Operation { GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT, ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION, - FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF + FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST } private void operation(Operation op) throws Exception { @@ -1185,6 +1187,9 @@ private void operation(Operation op) throws Exception { testGetSnapshotDiff(); testGetSnapshotDiffIllegalParam(); break; + case GET_SNAPSHOTTABLE_DIRECTORY_LIST: + testGetSnapshottableDirListing(); + break; } } @@ -1529,4 +1534,51 @@ private void testGetSnapshotDiffIllegalParam() throws Exception { fs.delete(path, true); } } + + private void verifyGetSnapshottableDirListing( + FileSystem fs, DistributedFileSystem dfs) throws Exception { + // Get snapshottable directory list + SnapshottableDirectoryStatus[] sds = null; + if (fs instanceof HttpFSFileSystem) { + HttpFSFileSystem httpFS = (HttpFSFileSystem) fs; + sds = httpFS.getSnapshottableDirectoryList(); + } else if (fs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs; + sds = webHdfsFileSystem.getSnapshottableDirectoryList(); + } else { + Assert.fail(fs.getClass().getSimpleName() + + " doesn't support getSnapshottableDirListing"); + } + // Verify result with DFS + SnapshottableDirectoryStatus[] dfssds = dfs.getSnapshottableDirListing(); + Assert.assertEquals(JsonUtil.toJsonString(sds), + JsonUtil.toJsonString(dfssds)); + } + + private void testGetSnapshottableDirListing() throws Exception { + if (!this.isLocalFS()) { + FileSystem fs = this.getHttpFSFileSystem(); + // Create directories with snapshot allowed + Path path1 = new Path("/tmp/tmp-snap-dirlist-test-1"); + DistributedFileSystem dfs = (DistributedFileSystem) + FileSystem.get(path1.toUri(), this.getProxiedFSConf()); + // Verify response when there is no snapshottable directory + verifyGetSnapshottableDirListing(fs, dfs); + createSnapshotTestsPreconditions(path1); + Assert.assertTrue(fs.getFileStatus(path1).isSnapshotEnabled()); + // Verify response when there is one snapshottable directory + verifyGetSnapshottableDirListing(fs, dfs); + Path path2 = new Path("/tmp/tmp-snap-dirlist-test-2"); + createSnapshotTestsPreconditions(path2); + Assert.assertTrue(fs.getFileStatus(path2).isSnapshotEnabled()); + // Verify response when there are two snapshottable directories + verifyGetSnapshottableDirListing(fs, dfs); + + // Clean up and verify + fs.delete(path2, true); + verifyGetSnapshottableDirListing(fs, dfs); + fs.delete(path1, true); + verifyGetSnapshottableDirListing(fs, dfs); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index f024c95fc5..8f7662fc32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator; @@ -1400,4 +1401,56 @@ public void testGetSnapshotDiffIllegalParam() throws Exception { // Clean up dfs.delete(path, true); } + + private void verifyGetSnapshottableDirectoryList(DistributedFileSystem dfs) + throws Exception { + // Send a request + HttpURLConnection conn = sendRequestToHttpFSServer("/", + "GETSNAPSHOTTABLEDIRECTORYLIST", ""); + // Should return HTTP_OK + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + // Verify the response + BufferedReader reader = + new BufferedReader(new InputStreamReader(conn.getInputStream())); + // The response should be a one-line JSON string. + String dirLst = reader.readLine(); + // Verify the content of diff with DFS API. + SnapshottableDirectoryStatus[] dfsDirLst = dfs.getSnapshottableDirListing(); + Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst)); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGetSnapshottableDirectoryList() throws Exception { + createHttpFSServer(false, false); + // Create test directories + String pathStr1 = "/tmp/tmp-snap-dirlist-test-1"; + createDirWithHttp(pathStr1, "700", null); + Path path1 = new Path(pathStr1); + String pathStr2 = "/tmp/tmp-snap-dirlist-test-2"; + createDirWithHttp(pathStr2, "700", null); + Path path2 = new Path(pathStr2); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get( + path1.toUri(), TestHdfsHelper.getHdfsConf()); + // Verify response when there is no snapshottable directory + verifyGetSnapshottableDirectoryList(dfs); + // Enable snapshot for path1 + dfs.allowSnapshot(path1); + Assert.assertTrue(dfs.getFileStatus(path1).isSnapshotEnabled()); + // Verify response when there is one snapshottable directory + verifyGetSnapshottableDirectoryList(dfs); + // Enable snapshot for path2 + dfs.allowSnapshot(path2); + Assert.assertTrue(dfs.getFileStatus(path2).isSnapshotEnabled()); + // Verify response when there are two snapshottable directories + verifyGetSnapshottableDirectoryList(dfs); + + // Clean up and verify + dfs.delete(path2, true); + verifyGetSnapshottableDirectoryList(dfs); + dfs.delete(path1, true); + verifyGetSnapshottableDirectoryList(dfs); + } }