From 0c77629849c4ea8f40f3cc14fb1c6f5e79ba384b Mon Sep 17 00:00:00 2001 From: zhtttylz Date: Sat, 13 May 2023 06:19:13 +0800 Subject: [PATCH] HDFS-17001. Support getStatus API in WebHDFS (#5628). Contributed by Hualong Zhang. Reviewed-by: Shilun Fan Signed-off-by: Ayush Saxena --- .../org/apache/hadoop/hdfs/DFSClient.java | 2 +- .../hadoop/hdfs/web/JsonUtilClient.java | 13 +++++++ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 14 ++++++++ .../hadoop/hdfs/web/resources/GetOpParam.java | 1 + .../router/RouterWebHdfsMethods.java | 1 + .../web/resources/NamenodeWebHdfsMethods.java | 12 +++++++ .../org/apache/hadoop/hdfs/web/JsonUtil.java | 16 +++++++++ .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 34 ++++++++++++++++++ .../apache/hadoop/hdfs/web/TestWebHDFS.java | 36 +++++++++++++++++++ 9 files changed, 128 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8faeebe8e8..231b232661 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2056,7 +2056,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } - private long getStateAtIndex(long[] states, int index) { + public static long getStateAtIndex(long[] states, int index) { return states.length > index ? states[index] : -1; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index c3dd556ba5..c1ab686457 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; @@ -822,6 +823,18 @@ public class JsonUtilClient { diffList); } + public static FsStatus toFsStatus(Map json) { + if (json == null) { + return null; + } + Map m = + (Map) json.get(FsStatus.class.getSimpleName()); + long capacity = getLong(m, "capacity", Long.MAX_VALUE); + long used = getLong(m, "used", 0); + long remaining = getLong(m, "remaining", Long.MAX_VALUE); + return new FsStatus(capacity, used, remaining); + } + private static List toDiffList( List objs) { if (objs == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index f5a54dd9be..105d208680 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -72,6 +72,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.MultipartUploaderBuilder; @@ -2178,6 +2179,19 @@ public class WebHdfsFileSystem extends FileSystem return status.makeQualified(getUri(), f); } + @Override + public FsStatus getStatus(Path path) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_STATUS); + final GetOpParam.Op op = GetOpParam.Op.GETSTATUS; + return new FsPathResponseRunner(op, path) { + @Override + FsStatus decodeResponse(Map json) { + return JsonUtilClient.toFsStatus(json); + } + }.run(); + } + @VisibleForTesting InetSocketAddress[] getResolvedNNAddr() { return nnAddrs; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index c1dcd76b15..832028fcca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -66,6 +66,7 @@ public class GetOpParam extends HttpOpParam { GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK), GETLINKTARGET(false, HttpURLConnection.HTTP_OK), GETFILELINKSTATUS(false, HttpURLConnection.HTTP_OK), + GETSTATUS(false, HttpURLConnection.HTTP_OK), GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK); final boolean redirect; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index 888def5e63..42962290da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -387,6 +387,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods { case CHECKACCESS: case GETLINKTARGET: case GETFILELINKSTATUS: + case GETSTATUS: { return super.get(ugi, delegation, username, doAsUser, fullpath, op, offset, length, renewer, bufferSize, xattrNames, xattrEncoding, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index cd69f4ebd6..2b4cb0b452 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -73,9 +73,11 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -1396,6 +1398,15 @@ public class NamenodeWebHdfsMethods { final String js = JsonUtil.toJsonString(status, true); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETSTATUS: { + long[] states = cp.getStats(); + FsStatus status = new FsStatus( + DFSClient.getStateAtIndex(states, 0), + DFSClient.getStateAtIndex(states, 1), + DFSClient.getStateAtIndex(states, 2)); + final String js = JsonUtil.toJsonString(status); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } @@ -1535,6 +1546,7 @@ public class NamenodeWebHdfsMethods { }; } + /** Handle HTTP DELETE request for the root. */ @DELETE @Path("/") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index b91399e399..4ef54383de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; @@ -725,4 +726,19 @@ public class JsonUtil { m.put(BlockLocation.class.getSimpleName(), blockLocations); return m; } + + public static String toJsonString(FsStatus status) { + return toJsonString(FsStatus.class, toJsonMap(status)); + } + + public static Map toJsonMap(FsStatus status) { + if (status == null) { + return null; + } + final Map m = new HashMap<>(); + m.put("capacity", status.getCapacity()); + m.put("used", status.getUsed()); + m.put("remaining", status.getRemaining()); + return m; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 3d22876946..b353f132cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -60,6 +60,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`GETSERVERDEFAULTS`](#Get_Server_Defaults) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getServerDefaults) * [`GETLINKTARGET`](#Get_Link_Target) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTarget) * [`GETFILELINKSTATUS`](#Get_File_Link_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkStatus) + * [`GETSTATUS`](#Get_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStatus) * HTTP PUT * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create) * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs) @@ -1190,6 +1191,28 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTa See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkInfo +### Get Status + +* Submit a HTTP GET request. + + curl -i "http://:/webhdfs/v1/?op=GETSTATUS" + + The client receives a response with a [`FsStatus` JSON object](#FsStatus_JSON_Schema): + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + { + "FsStatus": { + "used": 29229154304, + "remaining": 292893392896, + "capacity":322122547200 + } + } + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStatus + Storage Policy Operations ------------------------- @@ -3141,6 +3164,17 @@ var blockLocationProperties = } } ``` +### FsStatus JSON Schema + +```json +{ + "FsStatus": { + "used": 29229154304, + "remaining": 292893392896, + "capacity": 322122547200 + } +} +``` HTTP Query Parameter Dictionary ------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index aec7545fab..d74b180f4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -88,6 +88,7 @@ import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; @@ -2255,6 +2256,41 @@ public class TestWebHDFS { } } + @Test + public void testFsStatus() throws Exception { + final Configuration conf = WebHdfsTestUtil.createConf(); + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + + final WebHdfsFileSystem webHdfs = + WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsConstants.WEBHDFS_SCHEME); + + final DistributedFileSystem dfs = cluster.getFileSystem(); + + final String path = "/foo"; + try (OutputStream os = webHdfs.create(new Path(path))) { + os.write(new byte[1024]); + } + + FsStatus webHdfsFsStatus = webHdfs.getStatus(new Path("/")); + Assert.assertNotNull(webHdfsFsStatus); + + FsStatus dfsFsStatus = dfs.getStatus(new Path("/")); + Assert.assertNotNull(dfsFsStatus); + + //Validate used free and capacity are the same as DistributedFileSystem + Assert.assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed()); + Assert.assertEquals(webHdfsFsStatus.getRemaining(), + dfsFsStatus.getRemaining()); + Assert.assertEquals(webHdfsFsStatus.getCapacity(), + dfsFsStatus.getCapacity()); + } finally { + cluster.shutdown(); + } + } + /** * Get FileStatus JSONObject from ListStatus response. */