From 41d3f8899d8b96568f56331eaf598bb356ecdae0 Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Mon, 9 Nov 2015 09:57:56 -0800 Subject: [PATCH] HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. Contributed by Surendra Singh Lilhore. --- .../hadoop/hdfs/web/JsonUtilClient.java | 24 ++++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/web/JsonUtil.java | 15 ++++ .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 84 ++++++++++++++++++- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 24 ++++++ 5 files changed, 142 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index d40adad6c0..756f2aa21c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -20,11 +20,13 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.ContentSummary.Builder; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; @@ -316,14 +318,22 @@ static ContentSummary toContentSummary(final Map json) { final long quota = ((Number) m.get("quota")).longValue(); final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); + final Map typem = (Map) m.get("typeQuota"); - return new ContentSummary.Builder() - .length(length) - .fileCount(fileCount) - .directoryCount(directoryCount) - .quota(quota) - .spaceConsumed(spaceConsumed) - .spaceQuota(spaceQuota).build(); + Builder contentSummaryBuilder = new ContentSummary.Builder().length(length) + .fileCount(fileCount).directoryCount(directoryCount).quota(quota) + .spaceConsumed(spaceConsumed).spaceQuota(spaceQuota); + if (typem != null) { + for (StorageType t : StorageType.getTypesSupportingQuota()) { + Map type = (Map) typem.get(t.toString()); + if (type != null) { + contentSummaryBuilder = contentSummaryBuilder.typeQuota(t, + ((Number) type.get("quota")).longValue()).typeConsumed(t, + ((Number) type.get("consumed")).longValue()); + } + } + } + return contentSummaryBuilder.build(); } /** Convert a Json map to a MD5MD5CRC32FileChecksum. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fbaff1dd4b..bf05c4e5db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -198,6 +198,9 @@ Trunk (Unreleased) HADOOP-11684. S3a to use thread pool that blocks clients. (Thomas Demoor and Aaron Fabbri via lei) + HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. + (Surendra Singh Lilhore via xyao) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 4b0e63ec81..f107e66a4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -253,6 +253,21 @@ public static String toJsonString(final ContentSummary contentsummary) { m.put("quota", contentsummary.getQuota()); m.put("spaceConsumed", contentsummary.getSpaceConsumed()); m.put("spaceQuota", contentsummary.getSpaceQuota()); + final Map> typeQuota = + new TreeMap>(); + for (StorageType t : StorageType.getTypesSupportingQuota()) { + long tQuota = contentsummary.getTypeQuota(t); + if (tQuota != HdfsConstants.QUOTA_RESET) { + Map type = typeQuota.get(t.toString()); + if (type == null) { + type = new TreeMap(); + typeQuota.put(t.toString(), type); + } + type.put("quota", contentsummary.getTypeQuota(t)); + type.put("consumed", contentsummary.getTypeConsumed(t)); + } + } + m.put("typeQuota", typeQuota); return toJsonString(ContentSummary.class, m); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index d0a0fe08c7..2b7a493fe5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -535,7 +535,25 @@ Other File System Operations "length" : 24930, "quota" : -1, "spaceConsumed" : 24930, - "spaceQuota" : -1 + "spaceQuota" : -1, + "typeQuota": + { + "ARCHIVE": + { + "consumed": 500, + "quota": 10000 + }, + "DISK": + { + "consumed": 500, + "quota": 10000 + }, + "SSD": + { + "consumed": 500, + "quota": 10000 + } + } } } @@ -1260,6 +1278,70 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [` "description": "The disk space quota.", "type" : "integer", "required" : true + }, + "typeQuota": + { + "type" : "object", + "properties": + { + "ARCHIVE": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + }, + "DISK": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + }, + "SSD": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + } + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 00b6856a59..89a7822597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -39,10 +39,12 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -642,6 +644,28 @@ public void testWebHdfsOffsetAndLength() throws Exception{ } } + @Test + public void testContentSummary() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + final Path path = new Path("/QuotaDir"); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + final DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.mkdirs(path); + dfs.setQuotaByStorageType(path, StorageType.DISK, 100000); + ContentSummary contentSummary = webHdfs.getContentSummary(path); + Assert.assertTrue((contentSummary.getTypeQuota( + StorageType.DISK) == 100000)); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testWebHdfsPread() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf();