diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 3889326e82..34ad50f8a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; @@ -426,30 +427,66 @@ static ContentSummary toContentSummary(final Map json) { return null; } - final Map m = (Map)json.get( - ContentSummary.class.getSimpleName()); + final Map m = (Map) + json.get(ContentSummary.class.getSimpleName()); final long length = ((Number) m.get("length")).longValue(); final long fileCount = ((Number) m.get("fileCount")).longValue(); final long directoryCount = ((Number) m.get("directoryCount")).longValue(); + ContentSummary.Builder builder = new ContentSummary.Builder() + .length(length) + .fileCount(fileCount) + .directoryCount(directoryCount); + builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class); + return builder.build(); + } + + /** Convert a JSON map to a QuotaUsage. */ + static QuotaUsage toQuotaUsage(final Map json) { + if (json == null) { + return null; + } + + final Map m = (Map) json.get(QuotaUsage.class.getSimpleName()); + QuotaUsage.Builder builder = new QuotaUsage.Builder(); + builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class); + return builder.build(); + } + + /** + * Given a builder for QuotaUsage, parse the provided map and + * construct the relevant fields. Return the updated builder. + */ + private static T buildQuotaUsage( + T builder, Map m, Class type) { final long quota = ((Number) m.get("quota")).longValue(); final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); final Map typem = (Map) m.get("typeQuota"); - ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder() - .length(length).fileCount(fileCount).directoryCount(directoryCount) - .quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota); + T result = type.cast(builder + .quota(quota) + .spaceConsumed(spaceConsumed) + .spaceQuota(spaceQuota)); + + // ContentSummary doesn't set this so check before using it + if (m.get("fileAndDirectoryCount") != null) { + final long fileAndDirectoryCount = + ((Number) m.get("fileAndDirectoryCount")).longValue(); + result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount)); + } + if (typem != null) { for (StorageType t : StorageType.getTypesSupportingQuota()) { - Map type = (Map) typem.get(t.toString()); - if (type != null) { - contentSummaryBuilder = contentSummaryBuilder.typeQuota(t, - ((Number) type.get("quota")).longValue()).typeConsumed(t, - ((Number) type.get("consumed")).longValue()); + Map typeQuota = (Map) typem.get(t.toString()); + if (typeQuota != null) { + result = type.cast(result.typeQuota(t, + ((Number) typeQuota.get("quota")).longValue()).typeConsumed(t, + ((Number) typeQuota.get("consumed")).longValue())); } } } - return contentSummaryBuilder.build(); + + return result; } /** Convert a Json map to a MD5MD5CRC32FileChecksum. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index e8049e9b9f..8cce9787c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -75,6 +75,7 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; @@ -1868,6 +1869,20 @@ ContentSummary decodeResponse(Map json) { }.run(); } + @Override + public QuotaUsage getQuotaUsage(final Path p) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_QUOTA_USAGE); + + final HttpOpParam.Op op = GetOpParam.Op.GETQUOTAUSAGE; + return new FsPathResponseRunner(op, p) { + @Override + QuotaUsage decodeResponse(Map json) { + return JsonUtilClient.toQuotaUsage(json); + } + }.run(); + } + @Override public MD5MD5CRC32FileChecksum getFileChecksum(final Path p ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index cefa12fabc..f9a5fa6de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -28,6 +28,7 @@ public enum Op implements HttpOpParam.Op { GETFILESTATUS(false, HttpURLConnection.HTTP_OK), LISTSTATUS(false, HttpURLConnection.HTTP_OK), GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK), + GETQUOTAUSAGE(false, HttpURLConnection.HTTP_OK), GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK), GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK), diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index d80c276d16..1c1b93b3e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttrCodec; @@ -191,9 +192,16 @@ public static FILE_TYPE getType(FileStatus fileStatus) { public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount"; public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount"; public static final String CONTENT_SUMMARY_LENGTH_JSON = "length"; - public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota"; - public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed"; - public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota"; + + public static final String QUOTA_USAGE_JSON = "QuotaUsage"; + public static final String QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON = + "fileAndDirectoryCount"; + public static final String QUOTA_USAGE_QUOTA_JSON = "quota"; + public static final String QUOTA_USAGE_SPACE_CONSUMED_JSON = "spaceConsumed"; + public static final String QUOTA_USAGE_SPACE_QUOTA_JSON = "spaceQuota"; + public static final String QUOTA_USAGE_CONSUMED_JSON = "consumed"; + public static final String QUOTA_USAGE_TYPE_QUOTA_JSON = "typeQuota"; + public static final String ACL_STATUS_JSON = "AclStatus"; public static final String ACL_STICKY_BIT_JSON = "stickyBit"; @@ -222,8 +230,9 @@ public static FILE_TYPE getType(FileStatus fileStatus) { public enum Operation { OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET), GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET), - GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET), - INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET), + GETQUOTAUSAGE(HTTP_GET), GETFILECHECKSUM(HTTP_GET), + GETFILEBLOCKLOCATIONS(HTTP_GET), INSTRUMENTATION(HTTP_GET), + GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET), APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST), CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT), SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT), @@ -1124,14 +1133,65 @@ public ContentSummary getContentSummary(Path f) throws IOException { getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true); HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); JSONObject json = (JSONObject) ((JSONObject) - HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON); - return new ContentSummary.Builder(). - length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)). - fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)). - directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)). - quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)). - spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)). - spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build(); + HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON); + ContentSummary.Builder builder = new ContentSummary.Builder() + .length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)) + .fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)) + .directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)); + builder = buildQuotaUsage(builder, json, ContentSummary.Builder.class); + return builder.build(); + } + + @Override + public QuotaUsage getQuotaUsage(Path f) throws IOException { + Map params = new HashMap<>(); + params.put(OP_PARAM, Operation.GETQUOTAUSAGE.toString()); + HttpURLConnection conn = + getConnection(Operation.GETQUOTAUSAGE.getMethod(), params, f, true); + JSONObject json = (JSONObject) ((JSONObject) + HttpFSUtils.jsonParse(conn)).get(QUOTA_USAGE_JSON); + QuotaUsage.Builder builder = new QuotaUsage.Builder(); + builder = buildQuotaUsage(builder, json, QuotaUsage.Builder.class); + return builder.build(); + } + + /** + * Given a builder for QuotaUsage, parse the provided JSON object and + * construct the relevant fields. Return the updated builder. + */ + private static T buildQuotaUsage( + T builder, JSONObject json, Class type) { + long quota = (Long) json.get(QUOTA_USAGE_QUOTA_JSON); + long spaceConsumed = (Long) json.get(QUOTA_USAGE_SPACE_CONSUMED_JSON); + long spaceQuota = (Long) json.get(QUOTA_USAGE_SPACE_QUOTA_JSON); + JSONObject typeJson = (JSONObject) json.get(QUOTA_USAGE_TYPE_QUOTA_JSON); + + builder = type.cast(builder + .quota(quota) + .spaceConsumed(spaceConsumed) + .spaceQuota(spaceQuota) + ); + + // ContentSummary doesn't set this so check before using it + if (json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON) != null) { + long fileAndDirectoryCount = (Long) + json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON); + builder = type.cast(builder.fileAndDirectoryCount(fileAndDirectoryCount)); + } + + if (typeJson != null) { + for (StorageType t : StorageType.getTypesSupportingQuota()) { + JSONObject typeQuota = (JSONObject) typeJson.get(t.toString()); + if (typeQuota != null) { + builder = type.cast(builder + .typeQuota(t, ((Long) typeQuota.get(QUOTA_USAGE_QUOTA_JSON))) + .typeConsumed(t, ((Long) typeQuota.get(QUOTA_USAGE_CONSUMED_JSON)) + )); + } + } + } + + return builder; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 7989895d3c..7f0b5d2642 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.GlobFilter; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; @@ -36,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtil; @@ -56,6 +58,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.TreeMap; import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT; @@ -249,17 +252,66 @@ private static Map xAttrNamesToJSON(List names) throws IOException { @SuppressWarnings({"unchecked"}) private static Map contentSummaryToJSON(ContentSummary contentSummary) { Map json = new LinkedHashMap(); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount()); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount()); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength()); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota()); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed()); - json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, + contentSummary.getDirectoryCount()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, + contentSummary.getFileCount()); + json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, + contentSummary.getLength()); + Map quotaUsageMap = quotaUsageToMap(contentSummary); + for (Map.Entry e : quotaUsageMap.entrySet()) { + // For ContentSummary we don't need this since we already have + // separate count for file and directory. + if (!e.getKey().equals( + HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON)) { + json.put(e.getKey(), e.getValue()); + } + } Map response = new LinkedHashMap(); response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json); return response; } + /** + * Converts a QuotaUsage object into a JSON array + * object. + */ + @SuppressWarnings({"unchecked"}) + private static Map quotaUsageToJSON(QuotaUsage quotaUsage) { + Map response = new LinkedHashMap(); + Map quotaUsageMap = quotaUsageToMap(quotaUsage); + response.put(HttpFSFileSystem.QUOTA_USAGE_JSON, quotaUsageMap); + return response; + } + + private static Map quotaUsageToMap(QuotaUsage quotaUsage) { + Map result = new LinkedHashMap<>(); + result.put(HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON, + quotaUsage.getFileAndDirectoryCount()); + result.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota()); + result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_CONSUMED_JSON, + quotaUsage.getSpaceConsumed()); + result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_QUOTA_JSON, + quotaUsage.getSpaceQuota()); + Map> typeQuota = new TreeMap<>(); + for (StorageType t : StorageType.getTypesSupportingQuota()) { + long tQuota = quotaUsage.getTypeQuota(t); + if (tQuota != HdfsConstants.QUOTA_RESET) { + Map type = typeQuota.get(t.toString()); + if (type == null) { + type = new TreeMap<>(); + typeQuota.put(t.toString(), type); + } + type.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, + quotaUsage.getTypeQuota(t)); + type.put(HttpFSFileSystem.QUOTA_USAGE_CONSUMED_JSON, + quotaUsage.getTypeConsumed(t)); + } + } + result.put(HttpFSFileSystem.QUOTA_USAGE_TYPE_QUOTA_JSON, typeQuota); + return result; + } + /** * Converts an object into a Json Map with with one key-value entry. *

@@ -473,6 +525,26 @@ public Map execute(FileSystem fs) throws IOException { } + /** + * Executor that performs a quota-usage FileSystemAccess files system + * operation. + */ + @InterfaceAudience.Private + public static class FSQuotaUsage + implements FileSystemAccess.FileSystemExecutor { + private Path path; + + public FSQuotaUsage(String path) { + this.path = new Path(path); + } + + @Override + public Map execute(FileSystem fs) throws IOException { + QuotaUsage quotaUsage = fs.getQuotaUsage(path); + return quotaUsageToJSON(quotaUsage); + } + } + /** * Executor that performs a create FileSystemAccess files system operation. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index f5450797eb..857ec94fa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -57,6 +57,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class}); PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{}); PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{}); + PARAMS_DEF.put(Operation.GETQUOTAUSAGE, new Class[]{}); PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{NoRedirectParam.class}); PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{}); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 08cd29c1e6..f2ef811dfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -304,6 +304,14 @@ public InputStream run() throws Exception { response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } + case GETQUOTAUSAGE: { + FSOperations.FSQuotaUsage command = + new FSOperations.FSQuotaUsage(path); + Map json = fsExecute(user, command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 4514739953..5743f7af95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -22,12 +22,15 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; import org.apache.hadoop.fs.permission.AclEntry; @@ -673,17 +676,56 @@ private void testContentSummary() throws Exception { fs = getHttpFSFileSystem(); ContentSummary httpContentSummary = fs.getContentSummary(path); fs.close(); - assertEquals(httpContentSummary.getDirectoryCount(), - hdfsContentSummary.getDirectoryCount()); - assertEquals(httpContentSummary.getFileCount(), - hdfsContentSummary.getFileCount()); - assertEquals(httpContentSummary.getLength(), - hdfsContentSummary.getLength()); - assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota()); - assertEquals(httpContentSummary.getSpaceConsumed(), - hdfsContentSummary.getSpaceConsumed()); - assertEquals(httpContentSummary.getSpaceQuota(), - hdfsContentSummary.getSpaceQuota()); + assertEquals(hdfsContentSummary.getDirectoryCount(), + httpContentSummary.getDirectoryCount()); + assertEquals(hdfsContentSummary.getFileCount(), + httpContentSummary.getFileCount()); + assertEquals(hdfsContentSummary.getLength(), + httpContentSummary.getLength()); + assertEquals(hdfsContentSummary.getQuota(), httpContentSummary.getQuota()); + assertEquals(hdfsContentSummary.getSpaceConsumed(), + httpContentSummary.getSpaceConsumed()); + assertEquals(hdfsContentSummary.getSpaceQuota(), + httpContentSummary.getSpaceQuota()); + } + + private void testQuotaUsage() throws Exception { + if (isLocalFS()) { + // LocalFS doesn't support setQuota so skip here + return; + } + + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(getProxiedFSConf()); + Path path = new Path(getProxiedFSTestDir(), "foo"); + dfs.mkdirs(path); + dfs.setQuota(path, 20, 600 * 1024 * 1024); + for (int i = 0; i < 10; i++) { + dfs.createNewFile(new Path(path, "test_file_" + i)); + } + FSDataOutputStream out = dfs.create(new Path(path, "test_file")); + out.writeUTF("Hello World"); + out.close(); + + dfs.setQuotaByStorageType(path, StorageType.SSD, 100000); + dfs.setQuotaByStorageType(path, StorageType.DISK, 200000); + + QuotaUsage hdfsQuotaUsage = dfs.getQuotaUsage(path); + dfs.close(); + FileSystem fs = getHttpFSFileSystem(); + QuotaUsage httpQuotaUsage = fs.getQuotaUsage(path); + fs.close(); + assertEquals(hdfsQuotaUsage.getFileAndDirectoryCount(), + httpQuotaUsage.getFileAndDirectoryCount()); + assertEquals(hdfsQuotaUsage.getQuota(), httpQuotaUsage.getQuota()); + assertEquals(hdfsQuotaUsage.getSpaceConsumed(), + httpQuotaUsage.getSpaceConsumed()); + assertEquals(hdfsQuotaUsage.getSpaceQuota(), + httpQuotaUsage.getSpaceQuota()); + assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.SSD), + httpQuotaUsage.getTypeQuota(StorageType.SSD)); + assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.DISK), + httpQuotaUsage.getTypeQuota(StorageType.DISK)); } /** Set xattr */ @@ -1078,9 +1120,9 @@ private void testStoragePolicy() throws Exception { protected enum Operation { GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, - SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR, - GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH, - GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, + SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, QUOTA_USAGE, FILEACLS, DIRACLS, + SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, + LIST_STATUS_BATCH, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT, ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION, FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST @@ -1139,6 +1181,9 @@ private void operation(Operation op) throws Exception { case CONTENT_SUMMARY: testContentSummary(); break; + case QUOTA_USAGE: + testQuotaUsage(); + break; case FILEACLS: testFileAclsCustomizedUserAndGroupNames(); testFileAcls(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index ddaf0e9c6f..b14c3a6fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -56,6 +56,7 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; +import org.apache.hadoop.fs.QuotaUsage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -1171,6 +1172,12 @@ protected Response get( final String js = JsonUtil.toJsonString(contentsummary); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETQUOTAUSAGE: + { + final QuotaUsage quotaUsage = cp.getQuotaUsage(fullpath); + final String js = JsonUtil.toJsonString(quotaUsage); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } case GETFILECHECKSUM: { final NameNode namenode = (NameNode)context.getAttribute("name.node"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index f96664095a..58a18d29e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; +import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrCodec; @@ -353,25 +354,44 @@ public static String toJsonString(final ContentSummary contentsummary) { m.put("length", contentsummary.getLength()); m.put("fileCount", contentsummary.getFileCount()); m.put("directoryCount", contentsummary.getDirectoryCount()); - m.put("quota", contentsummary.getQuota()); - m.put("spaceConsumed", contentsummary.getSpaceConsumed()); - m.put("spaceQuota", contentsummary.getSpaceQuota()); - final Map> typeQuota = - new TreeMap>(); + // For ContentSummary we don't need this since we already have + // separate count for file and directory. + m.putAll(toJsonMap(contentsummary, false)); + return toJsonString(ContentSummary.class, m); + } + + /** Convert a QuotaUsage to a JSON string. */ + public static String toJsonString(final QuotaUsage quotaUsage) { + if (quotaUsage == null) { + return null; + } + return toJsonString(QuotaUsage.class, toJsonMap(quotaUsage, true)); + } + + private static Map toJsonMap( + final QuotaUsage quotaUsage, boolean includeFileAndDirectoryCount) { + final Map m = new TreeMap<>(); + if (includeFileAndDirectoryCount) { + m.put("fileAndDirectoryCount", quotaUsage.getFileAndDirectoryCount()); + } + m.put("quota", quotaUsage.getQuota()); + m.put("spaceConsumed", quotaUsage.getSpaceConsumed()); + m.put("spaceQuota", quotaUsage.getSpaceQuota()); + final Map> typeQuota = new TreeMap<>(); for (StorageType t : StorageType.getTypesSupportingQuota()) { - long tQuota = contentsummary.getTypeQuota(t); + long tQuota = quotaUsage.getTypeQuota(t); if (tQuota != HdfsConstants.QUOTA_RESET) { Map type = typeQuota.get(t.toString()); if (type == null) { - type = new TreeMap(); + type = new TreeMap<>(); typeQuota.put(t.toString(), type); } - type.put("quota", contentsummary.getTypeQuota(t)); - type.put("consumed", contentsummary.getTypeConsumed(t)); + type.put("quota", quotaUsage.getTypeQuota(t)); + type.put("consumed", quotaUsage.getTypeConsumed(t)); } } m.put("typeQuota", typeQuota); - return toJsonString(ContentSummary.class, m); + return m; } /** Convert a MD5MD5CRC32FileChecksum to a Json string. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 0e224645cc..00bb704945 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -39,6 +39,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`LISTSTATUS`](#List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus) * [`LISTSTATUS_BATCH`](#Iteratively_List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatusIterator) * [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary) + * [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage) * [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum) * [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory) * [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken) @@ -788,6 +789,48 @@ Other File System Operations See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary +### Get Quota Usage of a Directory + +* Submit a HTTP GET request. + + curl -i "http://:/webhdfs/v1/?op=GETQUOTAUSAGE" + + The client receives a response with a [`QuotaUsage` JSON object](#QuotaUsage_JSON_Schema): + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + + { + "QuotaUsage": + { + "fileAndDirectoryCount": 1, + "quota" : 100, + "spaceConsumed" : 24930, + "spaceQuota" : 100000, + "typeQuota": + { + "ARCHIVE": + { + "consumed": 500, + "quota": 10000 + }, + "DISK": + { + "consumed": 500, + "quota": 10000 + }, + "SSD": + { + "consumed": 500, + "quota": 10000 + } + } + } + } + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage + ### Get File Checksum * Submit a HTTP GET request. @@ -1935,6 +1978,114 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [` See also: [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) +### QuotaUsage JSON Schema + +```json +{ + "name" : "QuotaUsage", + "properties": + { + "QuotaUsage": + { + "type" : "object", + "properties": + { + "fileAndDirectoryCount": + { + "description": "The number of files and directories.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The namespace quota of this directory.", + "type" : "integer", + "required" : true + }, + "spaceConsumed": + { + "description": "The disk space consumed by the content.", + "type" : "integer", + "required" : true + }, + "spaceQuota": + { + "description": "The disk space quota.", + "type" : "integer", + "required" : true + }, + "typeQuota": + { + "type" : "object", + "properties": + { + "ARCHIVE": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + }, + "DISK": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + }, + "SSD": + { + "type" : "object", + "properties": + { + "consumed": + { + "description": "The storage type space consumed.", + "type" : "integer", + "required" : true + }, + "quota": + { + "description": "The storage type quota.", + "type" : "integer", + "required" : true + } + } + } + } + } + } + } + } +} +``` + +See also: [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory) + ### FileChecksum JSON Schema diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 0f4d441900..719da7ea4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -54,6 +54,7 @@ import com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.QuotaUsage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -1038,6 +1039,47 @@ public void testContentSummary() throws Exception { } } + @Test + public void testQuotaUsage() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + final Path path = new Path("/TestDir"); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + final DistributedFileSystem dfs = cluster.getFileSystem(); + + final long nsQuota = 100; + final long spaceQuota = 600 * 1024 * 1024; + final long diskQuota = 100000; + final byte[] bytes = {0x0, 0x1, 0x2, 0x3}; + + dfs.mkdirs(path); + dfs.setQuota(path, nsQuota, spaceQuota); + for (int i = 0; i < 10; i++) { + dfs.createNewFile(new Path(path, "test_file_" + i)); + } + FSDataOutputStream out = dfs.create(new Path(path, "test_file")); + out.write(bytes); + out.close(); + + dfs.setQuotaByStorageType(path, StorageType.DISK, diskQuota); + + QuotaUsage quotaUsage = webHdfs.getQuotaUsage(path); + assertEquals(12, quotaUsage.getFileAndDirectoryCount()); + assertEquals(nsQuota, quotaUsage.getQuota()); + assertEquals(bytes.length * dfs.getDefaultReplication(), quotaUsage.getSpaceConsumed()); + assertEquals(spaceQuota, quotaUsage.getSpaceQuota()); + assertEquals(diskQuota, quotaUsage.getTypeQuota(StorageType.DISK)); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + + } + @Test public void testWebHdfsPread() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf();