From 99bf1dc9eb18f9b4d0338986d1b8fd2232f1232f Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Thu, 1 Aug 2019 17:14:07 -0700 Subject: [PATCH] HDFS-14683. WebHDFS: Add erasureCodingPolicy field to GETCONTENTSUMMARY response (#1189) Contributed by Siyao Meng. --- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 1 + .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 1 + .../apache/hadoop/hdfs/web/TestJsonUtil.java | 21 +++++++++++++++++++ 3 files changed, 23 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 58a18d29e3..0e8638d4ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -354,6 +354,7 @@ public static String toJsonString(final ContentSummary contentsummary) { m.put("length", contentsummary.getLength()); m.put("fileCount", contentsummary.getFileCount()); m.put("directoryCount", contentsummary.getDirectoryCount()); + m.put("ecPolicy", contentsummary.getErasureCodingPolicy()); // For ContentSummary we don't need this since we already have // separate count for file and directory. m.putAll(toJsonMap(contentsummary, false)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 00bb704945..8afb7af933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -761,6 +761,7 @@ Other File System Operations "ContentSummary": { "directoryCount": 2, + "ecPolicy" : "RS-6-3-1024k", "fileCount" : 1, "length" : 24930, "quota" : -1, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index e1dc2716ab..3ffc35fe6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrCodec; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -256,6 +257,26 @@ public void testToJsonFromAclStatus() { JsonUtil.toJsonString(aclStatusBuilder.build())); } + + @Test + public void testToJsonFromContentSummary() { + String jsonString = + "{\"ContentSummary\":{\"directoryCount\":33333,\"ecPolicy\":\"RS-6-3-1024k\",\"fileCount\":22222,\"length\":11111,\"quota\":44444,\"spaceConsumed\":55555,\"spaceQuota\":66666,\"typeQuota\":{}}}"; + long length = 11111; + long fileCount = 22222; + long directoryCount = 33333; + long quota = 44444; + long spaceConsumed = 55555; + long spaceQuota = 66666; + String ecPolicy = "RS-6-3-1024k"; + + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota). + erasureCodingPolicy(ecPolicy).build(); + + Assert.assertEquals(jsonString, JsonUtil.toJsonString(contentSummary)); + } @Test public void testToJsonFromXAttrs() throws IOException {