diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java index 6276dda2ad..66137d02c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java @@ -21,6 +21,7 @@ import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Writable; @@ -36,17 +37,106 @@ public class ContentSummary implements Writable{ private long quota; private long spaceConsumed; private long spaceQuota; - + private long typeConsumed[]; + private long typeQuota[]; - /** Constructor */ + public static class Builder{ + public Builder() { + this.quota = -1; + this.spaceQuota = -1; + + typeConsumed = new long[StorageType.values().length]; + typeQuota = new long[StorageType.values().length]; + for (int i = 0; i < typeQuota.length; i++) { + typeQuota[i] = -1; + } + } + + public Builder length(long length) { + this.length = length; + return this; + } + + public Builder fileCount(long fileCount) { + this.fileCount = fileCount; + return this; + } + + public Builder directoryCount(long directoryCount) { + this.directoryCount = directoryCount; + return this; + } + + public Builder quota(long quota){ + this.quota = quota; + return this; + } + + public Builder spaceConsumed(long spaceConsumed) { + this.spaceConsumed = spaceConsumed; + return this; + } + + public Builder spaceQuota(long spaceQuota) { + this.spaceQuota = spaceQuota; + return this; + } + + public Builder typeConsumed(long typeConsumed[]) { + for (int i = 0; i < typeConsumed.length; i++) { + this.typeConsumed[i] = typeConsumed[i]; + } + return this; + } + + public Builder typeQuota(StorageType type, long quota) { + this.typeQuota[type.ordinal()] = quota; + return this; + } + + public Builder typeConsumed(StorageType type, long consumed) { + this.typeConsumed[type.ordinal()] = consumed; + return this; + } + + public Builder typeQuota(long typeQuota[]) { + for (int i = 0; i < typeQuota.length; i++) { + this.typeQuota[i] = typeQuota[i]; + } + return this; + } + + public ContentSummary build() { + return new ContentSummary(length, fileCount, directoryCount, quota, + spaceConsumed, spaceQuota, typeConsumed, typeQuota); + } + + private long length; + private long fileCount; + private long directoryCount; + private long quota; + private long spaceConsumed; + private long spaceQuota; + private long typeConsumed[]; + private long typeQuota[]; + } + + /** Constructor deprecated by ContentSummary.Builder*/ + @Deprecated public ContentSummary() {} - /** Constructor */ + /** Constructor, deprecated by ContentSummary.Builder + * This constructor implicitly set spaceConsumed the same as length. + * spaceConsumed and length must be set explicitly with + * ContentSummary.Builder + * */ + @Deprecated public ContentSummary(long length, long fileCount, long directoryCount) { this(length, fileCount, directoryCount, -1L, length, -1L); } - /** Constructor */ + /** Constructor, deprecated by ContentSummary.Builder */ + @Deprecated public ContentSummary( long length, long fileCount, long directoryCount, long quota, long spaceConsumed, long spaceQuota) { @@ -58,6 +148,21 @@ public ContentSummary( this.spaceQuota = spaceQuota; } + /** Constructor for ContentSummary.Builder*/ + private ContentSummary( + long length, long fileCount, long directoryCount, long quota, + long spaceConsumed, long spaceQuota, long typeConsumed[], + long typeQuota[]) { + this.length = length; + this.fileCount = fileCount; + this.directoryCount = directoryCount; + this.quota = quota; + this.spaceConsumed = spaceConsumed; + this.spaceQuota = spaceQuota; + this.typeConsumed = typeConsumed; + this.typeQuota = typeQuota; + } + /** @return the length */ public long getLength() {return length;} @@ -70,12 +175,48 @@ public ContentSummary( /** Return the directory quota */ public long getQuota() {return quota;} - /** Retuns (disk) space consumed */ + /** Retuns storage space consumed */ public long getSpaceConsumed() {return spaceConsumed;} - /** Returns (disk) space quota */ + /** Returns storage space quota */ public long getSpaceQuota() {return spaceQuota;} - + + /** Returns storage type quota */ + public long getTypeQuota(StorageType type) { + return (typeQuota != null) ? typeQuota[type.ordinal()] : -1; + } + + /** Returns storage type consumed*/ + public long getTypeConsumed(StorageType type) { + return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0; + } + + /** Returns true if any storage type quota has been set*/ + public boolean isTypeQuotaSet() { + if (typeQuota == null) { + return false; + } + for (StorageType t : StorageType.getTypesSupportingQuota()) { + if (typeQuota[t.ordinal()] > 0) { + return true; + } + } + return false; + } + + /** Returns true if any storage type consumption information is available*/ + public boolean isTypeConsumedAvailable() { + if (typeConsumed == null) { + return false; + } + for (StorageType t : StorageType.getTypesSupportingQuota()) { + if (typeConsumed[t.ordinal()] > 0) { + return true; + } + } + return false; + } + @Override @InterfaceAudience.Private public void write(DataOutput out) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 27131449e7..aad8be9e2c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -1644,20 +1644,27 @@ public ContentSummary getContentSummary(Path f) UnsupportedFileSystemException, IOException { FileStatus status = FileContext.this.getFileStatus(f); if (status.isFile()) { - return new ContentSummary(status.getLen(), 1, 0); + long length = status.getLen(); + return new ContentSummary.Builder().length(length). + fileCount(1).directoryCount(0).spaceConsumed(length). + build(); } long[] summary = {0, 0, 1}; - RemoteIterator statusIterator = + RemoteIterator statusIterator = FileContext.this.listStatus(f); while(statusIterator.hasNext()) { FileStatus s = statusIterator.next(); + long length = s.getLen(); ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) : - new ContentSummary(s.getLen(), 1, 0); + new ContentSummary.Builder().length(length).fileCount(1). + directoryCount(0).spaceConsumed(length).build(); summary[0] += c.getLength(); summary[1] += c.getFileCount(); summary[2] += c.getDirectoryCount(); } - return new ContentSummary(summary[0], summary[1], summary[2]); + return new ContentSummary.Builder().length(summary[0]). + fileCount(summary[1]).directoryCount(summary[2]). + spaceConsumed(summary[0]).build(); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 2ca8813ee1..305fef2d60 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -1467,18 +1467,24 @@ public ContentSummary getContentSummary(Path f) throws IOException { FileStatus status = getFileStatus(f); if (status.isFile()) { // f is a file - return new ContentSummary(status.getLen(), 1, 0); + long length = status.getLen(); + return new ContentSummary.Builder().length(length). + fileCount(1).directoryCount(0).spaceConsumed(length).build(); } // f is a directory long[] summary = {0, 0, 1}; for(FileStatus s : listStatus(f)) { + long length = s.getLen(); ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) : - new ContentSummary(s.getLen(), 1, 0); + new ContentSummary.Builder().length(length). + fileCount(1).directoryCount(0).spaceConsumed(length).build(); summary[0] += c.getLength(); summary[1] += c.getFileCount(); summary[2] += c.getDirectoryCount(); } - return new ContentSummary(summary[0], summary[1], summary[2]); + return new ContentSummary.Builder().length(summary[0]). + fileCount(summary[1]).directoryCount(summary[2]). + spaceConsumed(summary[0]).build(); } final private static PathFilter DEFAULT_FILTER = new PathFilter() { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java index 5db0de3394..7cc7ae4094 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java @@ -32,13 +32,13 @@ public class TestContentSummary { // check the empty constructor correctly initialises the object @Test public void testConstructorEmpty() { - ContentSummary contentSummary = new ContentSummary(); + ContentSummary contentSummary = new ContentSummary.Builder().build(); assertEquals("getLength", 0, contentSummary.getLength()); assertEquals("getFileCount", 0, contentSummary.getFileCount()); assertEquals("getDirectoryCount", 0, contentSummary.getDirectoryCount()); - assertEquals("getQuota", 0, contentSummary.getQuota()); + assertEquals("getQuota", -1, contentSummary.getQuota()); assertEquals("getSpaceConsumed", 0, contentSummary.getSpaceConsumed()); - assertEquals("getSpaceQuota", 0, contentSummary.getSpaceQuota()); + assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota()); } // check the full constructor with quota information @@ -51,8 +51,9 @@ public void testConstructorWithQuota() { long spaceConsumed = 55555; long spaceQuota = 66666; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); assertEquals("getLength", length, contentSummary.getLength()); assertEquals("getFileCount", fileCount, contentSummary.getFileCount()); assertEquals("getDirectoryCount", directoryCount, @@ -70,8 +71,9 @@ public void testConstructorNoQuota() { long fileCount = 22222; long directoryCount = 33333; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount). + spaceConsumed(length).build(); assertEquals("getLength", length, contentSummary.getLength()); assertEquals("getFileCount", fileCount, contentSummary.getFileCount()); assertEquals("getDirectoryCount", directoryCount, @@ -91,8 +93,9 @@ public void testWrite() throws IOException { long spaceConsumed = 55555; long spaceQuota = 66666; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); DataOutput out = mock(DataOutput.class); InOrder inOrder = inOrder(out); @@ -116,7 +119,7 @@ public void testReadFields() throws IOException { long spaceConsumed = 55555; long spaceQuota = 66666; - ContentSummary contentSummary = new ContentSummary(); + ContentSummary contentSummary = new ContentSummary.Builder().build(); DataInput in = mock(DataInput.class); when(in.readLong()).thenReturn(length).thenReturn(fileCount) @@ -159,8 +162,9 @@ public void testToStringWithQuota() { long spaceConsumed = 55555; long spaceQuota = 66665; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 44444 -11111 66665 11110" + " 33333 22222 11111 "; assertEquals(expected, contentSummary.toString(true)); @@ -173,8 +177,8 @@ public void testToStringNoQuota() { long fileCount = 22222; long directoryCount = 33333; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).build(); String expected = " none inf none" + " inf 33333 22222 11111 "; assertEquals(expected, contentSummary.toString(true)); @@ -190,8 +194,9 @@ public void testToStringNoShowQuota() { long spaceConsumed = 55555; long spaceQuota = 66665; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 33333 22222 11111 "; assertEquals(expected, contentSummary.toString(false)); } @@ -206,8 +211,9 @@ public void testToString() { long spaceConsumed = 55555; long spaceQuota = 66665; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 44444 -11111 66665" + " 11110 33333 22222 11111 "; assertEquals(expected, contentSummary.toString()); @@ -223,8 +229,9 @@ public void testToStringHumanWithQuota() { long spaceConsumed = 1073741825; long spaceQuota = 1; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 212.0 M 1023 1 " + " -1 G 32.6 K 211.9 M 8.0 E "; assertEquals(expected, contentSummary.toString(true, true)); @@ -240,8 +247,9 @@ public void testToStringHumanNoShowQuota() { long spaceConsumed = 55555; long spaceQuota = Long.MAX_VALUE; - ContentSummary contentSummary = new ContentSummary(length, fileCount, - directoryCount, quota, spaceConsumed, spaceQuota); + ContentSummary contentSummary = new ContentSummary.Builder().length(length). + fileCount(fileCount).directoryCount(directoryCount).quota(quota). + spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 32.6 K 211.9 M 8.0 E "; assertEquals(expected, contentSummary.toString(false, true)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index 1f2f2d4804..d5f097d386 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -315,6 +315,8 @@ public void getDescription() { // mock content system static class MockContentSummary extends ContentSummary { + @SuppressWarnings("deprecation") + // suppress warning on the usage of deprecated ContentSummary constructor public MockContentSummary() { } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 20b212e60e..e797d1207d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -1013,13 +1013,13 @@ public ContentSummary getContentSummary(Path f) throws IOException { HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); JSONObject json = (JSONObject) ((JSONObject) HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON); - return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON), - (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON), - (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON), - (Long) json.get(CONTENT_SUMMARY_QUOTA_JSON), - (Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON), - (Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON) - ); + return new ContentSummary.Builder(). + length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)). + fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)). + directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)). + quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)). + spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)). + spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 51842ff41e..e16348a5cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1300,6 +1300,9 @@ Release 2.7.0 - UNRELEASED HDFS-7806. Refactor: move StorageType from hadoop-hdfs to hadoop-common. (Xiaoyu Yao via Arpit Agarwal) + HDFS-7824. GetContentSummary API and its namenode implementation for + Storage Type Quota/Usage. (Xiaoyu Yao via Arpit Agarwal) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b841850646..9446b705b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -1728,21 +1728,49 @@ public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { public static ContentSummary convert(ContentSummaryProto cs) { if (cs == null) return null; - return new ContentSummary( - cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(), - cs.getSpaceConsumed(), cs.getSpaceQuota()); + ContentSummary.Builder builder = new ContentSummary.Builder(); + builder.length(cs.getLength()). + fileCount(cs.getFileCount()). + directoryCount(cs.getDirectoryCount()). + quota(cs.getQuota()). + spaceConsumed(cs.getSpaceConsumed()). + spaceQuota(cs.getSpaceQuota()); + if (cs.hasTypeQuotaInfos()) { + for (HdfsProtos.StorageTypeQuotaInfoProto info : + cs.getTypeQuotaInfos().getTypeQuotaInfoList()) { + StorageType type = PBHelper.convertStorageType(info.getType()); + builder.typeConsumed(type, info.getConsumed()); + builder.typeQuota(type, info.getQuota()); + } + } + return builder.build(); } public static ContentSummaryProto convert(ContentSummary cs) { if (cs == null) return null; - return ContentSummaryProto.newBuilder(). - setLength(cs.getLength()). + ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder(); + builder.setLength(cs.getLength()). setFileCount(cs.getFileCount()). setDirectoryCount(cs.getDirectoryCount()). setQuota(cs.getQuota()). setSpaceConsumed(cs.getSpaceConsumed()). - setSpaceQuota(cs.getSpaceQuota()). - build(); + setSpaceQuota(cs.getSpaceQuota()); + + if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) { + HdfsProtos.StorageTypeQuotaInfosProto.Builder isb = + HdfsProtos.StorageTypeQuotaInfosProto.newBuilder(); + for (StorageType t: StorageType.getTypesSupportingQuota()) { + HdfsProtos.StorageTypeQuotaInfoProto info = + HdfsProtos.StorageTypeQuotaInfoProto.newBuilder(). + setType(convertStorageType(t)). + setConsumed(cs.getTypeConsumed(t)). + setQuota(cs.getTypeQuota(t)). + build(); + isb.addTypeQuotaInfo(info); + } + builder.setTypeQuotaInfos(isb); + } + return builder.build(); } public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index 15476118b9..e9baf8535b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -36,7 +36,7 @@ public interface BlockCollection { /** * Get content summary. */ - public ContentSummary computeContentSummary(); + public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps); /** * @return the number of blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 91cfead36a..ad40782b3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -745,7 +745,7 @@ public LocatedBlock convertLastBlockToUnderConstruction( // always decrement total blocks -1); - final long fileLength = bc.computeContentSummary().getLength(); + final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength(); final long pos = fileLength - ucBlock.getNumBytes(); return createLocatedBlock(ucBlock, pos, AccessMode.WRITE); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java new file mode 100644 index 0000000000..16f0771f1a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java @@ -0,0 +1,146 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.util.EnumCounters; + +/** + * The counter to be computed for content types such as file, directory and symlink, + * and the storage type usage such as SSD, DISK, ARCHIVE. + */ +public class ContentCounts { + private EnumCounters contents; + private EnumCounters types; + + public static class Builder { + private EnumCounters contents; + // storage spaces used by corresponding storage types + private EnumCounters types; + + public Builder() { + contents = new EnumCounters(Content.class); + types = new EnumCounters(StorageType.class); + } + + public Builder file(long file) { + contents.set(Content.FILE, file); + return this; + } + + public Builder directory(long directory) { + contents.set(Content.DIRECTORY, directory); + return this; + } + + public Builder symlink(long symlink) { + contents.set(Content.SYMLINK, symlink); + return this; + } + + public Builder length(long length) { + contents.set(Content.LENGTH, length); + return this; + } + + public Builder storagespace(long storagespace) { + contents.set(Content.DISKSPACE, storagespace); + return this; + } + + public Builder snapshot(long snapshot) { + contents.set(Content.SNAPSHOT, snapshot); + return this; + } + + public Builder snapshotable_directory(long snapshotable_directory) { + contents.set(Content.SNAPSHOTTABLE_DIRECTORY, snapshotable_directory); + return this; + } + + public ContentCounts build() { + return new ContentCounts(contents, types); + } + } + + private ContentCounts(EnumCounters contents, + EnumCounters types) { + this.contents = contents; + this.types = types; + } + + // Get the number of files. + public long getFileCount() { + return contents.get(Content.FILE); + } + + // Get the number of directories. + public long getDirectoryCount() { + return contents.get(Content.DIRECTORY); + } + + // Get the number of symlinks. + public long getSymlinkCount() { + return contents.get(Content.SYMLINK); + } + + // Get the total of file length in bytes. + public long getLength() { + return contents.get(Content.LENGTH); + } + + // Get the total of storage space usage in bytes including replication. + public long getStoragespace() { + return contents.get(Content.DISKSPACE); + } + + // Get the number of snapshots + public long getSnapshotCount() { + return contents.get(Content.SNAPSHOT); + } + + // Get the number of snapshottable directories. + public long getSnapshotableDirectoryCount() { + return contents.get(Content.SNAPSHOTTABLE_DIRECTORY); + } + + public long[] getTypeSpaces() { + return types.asArray(); + } + + public long getTypeSpace(StorageType t) { + return types.get(t); + } + + public void addContent(Content c, long val) { + contents.add(c, val); + } + + public void addContents(ContentCounts that) { + contents.add(that.contents); + types.add(that.types); + } + + public void addTypeSpace(StorageType t, long val) { + types.add(t, val); + } + + public void addTypeSpaces(EnumCounters that) { + this.types.add(that); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 63fa8c1eb8..31f34b9133 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; @@ -26,7 +27,8 @@ public class ContentSummaryComputationContext { private FSDirectory dir = null; private FSNamesystem fsn = null; - private Content.Counts counts = null; + private BlockStoragePolicySuite bsps = null; + private ContentCounts counts = null; private long nextCountLimit = 0; private long limitPerRun = 0; private long yieldCount = 0; @@ -46,12 +48,13 @@ public ContentSummaryComputationContext(FSDirectory dir, this.fsn = fsn; this.limitPerRun = limitPerRun; this.nextCountLimit = limitPerRun; - this.counts = Content.Counts.newInstance(); + this.counts = new ContentCounts.Builder().build(); } /** Constructor for blocking computation. */ - public ContentSummaryComputationContext() { + public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) { this(null, null, 0); + this.bsps = bsps; } /** Return current yield count */ @@ -73,10 +76,10 @@ public boolean yield() { } // Have we reached the limit? - long currentCount = counts.get(Content.FILE) + - counts.get(Content.SYMLINK) + - counts.get(Content.DIRECTORY) + - counts.get(Content.SNAPSHOTTABLE_DIRECTORY); + long currentCount = counts.getFileCount() + + counts.getSymlinkCount() + + counts.getDirectoryCount() + + counts.getSnapshotableDirectoryCount(); if (currentCount <= nextCountLimit) { return false; } @@ -114,11 +117,15 @@ public boolean yield() { } /** Get the content counts */ - public Content.Counts getCounts() { + public ContentCounts getCounts() { return counts; } public BlockStoragePolicySuite getBlockStoragePolicySuite() { - return fsn.getBlockManager().getStoragePolicySuite(); + Preconditions.checkState((bsps != null || fsn != null), + "BlockStoragePolicySuite must be either initialized or available via" + + " FSNameSystem"); + return (bsps != null) ? bsps: + fsn.getBlockManager().getStoragePolicySuite(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java index 01eb22fe40..31b45ad758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java @@ -126,12 +126,12 @@ QuotaCounts AddCurrentSpaceUsage(QuotaCounts counts) { ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir, final ContentSummaryComputationContext summary) { - final long original = summary.getCounts().get(Content.DISKSPACE); + final long original = summary.getCounts().getStoragespace(); long oldYieldCount = summary.getYieldCount(); dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID); // Check only when the content has not changed in the middle. if (oldYieldCount == summary.getYieldCount()) { - checkStoragespace(dir, summary.getCounts().get(Content.DISKSPACE) - original); + checkStoragespace(dir, summary.getCounts().getStoragespace() - original); } return summary; } @@ -277,4 +277,4 @@ public String toString() { return "Quota[" + namespaceString() + ", " + storagespaceString() + ", " + typeSpaceString() + "]"; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 8c4e466e2d..586cce4b1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -432,9 +432,9 @@ public abstract void destroyAndCollectBlocks( BlocksMapUpdateInfo collectedBlocks, List removedINodes); /** Compute {@link ContentSummary}. Blocking call */ - public final ContentSummary computeContentSummary() { + public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) { return computeAndConvertContentSummary( - new ContentSummaryComputationContext()); + new ContentSummaryComputationContext(bsps)); } /** @@ -442,17 +442,22 @@ public final ContentSummary computeContentSummary() { */ public final ContentSummary computeAndConvertContentSummary( ContentSummaryComputationContext summary) { - Content.Counts counts = computeContentSummary(summary).getCounts(); + ContentCounts counts = computeContentSummary(summary).getCounts(); final QuotaCounts q = getQuotaCounts(); - return new ContentSummary(counts.get(Content.LENGTH), - counts.get(Content.FILE) + counts.get(Content.SYMLINK), - counts.get(Content.DIRECTORY), q.getNameSpace(), - counts.get(Content.DISKSPACE), q.getStorageSpace()); - // TODO: storage type quota reporting HDFS-7701. + return new ContentSummary.Builder(). + length(counts.getLength()). + fileCount(counts.getFileCount() + counts.getSymlinkCount()). + directoryCount(counts.getDirectoryCount()). + quota(q.getNameSpace()). + spaceConsumed(counts.getStoragespace()). + spaceQuota(q.getStorageSpace()). + typeConsumed(counts.getTypeSpaces()). + typeQuota(q.getTypeSpaces().asArray()). + build(); } /** - * Count subtree content summary with a {@link Content.Counts}. + * Count subtree content summary with a {@link ContentCounts}. * * @param summary the context object holding counts for the subtree. * @return The same objects as summary. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index ebb8ae4552..dadb8c7065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -664,7 +664,7 @@ protected ContentSummaryComputationContext computeDirectoryContentSummary( } // Increment the directory count for this directory. - summary.getCounts().add(Content.DIRECTORY, 1); + summary.getCounts().addContent(Content.DIRECTORY, 1); // Relinquish and reacquire locks if necessary. summary.yield(); return summary; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index ae554febf5..a6f07f99bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -599,22 +599,36 @@ public final QuotaCounts computeQuotaUsage( @Override public final ContentSummaryComputationContext computeContentSummary( final ContentSummaryComputationContext summary) { - final Content.Counts counts = summary.getCounts(); + final ContentCounts counts = summary.getCounts(); FileWithSnapshotFeature sf = getFileWithSnapshotFeature(); + long fileLen = 0; if (sf == null) { - counts.add(Content.LENGTH, computeFileSize()); - counts.add(Content.FILE, 1); + fileLen = computeFileSize(); + counts.addContent(Content.FILE, 1); } else { final FileDiffList diffs = sf.getDiffs(); final int n = diffs.asList().size(); - counts.add(Content.FILE, n); + counts.addContent(Content.FILE, n); if (n > 0 && sf.isCurrentFileDeleted()) { - counts.add(Content.LENGTH, diffs.getLast().getFileSize()); + fileLen = diffs.getLast().getFileSize(); } else { - counts.add(Content.LENGTH, computeFileSize()); + fileLen = computeFileSize(); + } + } + counts.addContent(Content.LENGTH, fileLen); + counts.addContent(Content.DISKSPACE, storagespaceConsumed()); + + if (getStoragePolicyID() != BlockStoragePolicySuite.ID_UNSPECIFIED){ + BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite(). + getPolicy(getStoragePolicyID()); + List storageTypes = bsp.chooseStorageTypes(getFileReplication()); + for (StorageType t : storageTypes) { + if (!t.supportTypeQuota()) { + continue; + } + counts.addTypeSpace(t, fileLen); } } - counts.add(Content.DISKSPACE, storagespaceConsumed()); return summary; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 911279a5fb..eee50a5291 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -23,6 +23,7 @@ import java.util.Comparator; import java.util.List; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -512,7 +513,8 @@ public final ContentSummaryComputationContext computeContentSummary( //only count storagespace for WithName final QuotaCounts q = new QuotaCounts.Builder().build(); computeQuotaUsage(summary.getBlockStoragePolicySuite(), q, false, lastSnapshotId); - summary.getCounts().add(Content.DISKSPACE, q.getStorageSpace()); + summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace()); + summary.getCounts().addTypeSpaces(q.getTypeSpaces()); return summary; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index fe75687dff..120d0dcd76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -102,7 +102,7 @@ public QuotaCounts computeQuotaUsage( @Override public ContentSummaryComputationContext computeContentSummary( final ContentSummaryComputationContext summary) { - summary.getCounts().add(Content.SYMLINK, 1); + summary.getCounts().addContent(Content.SYMLINK, 1); return summary; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 5168f0b2b9..fa1bf94f56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -237,8 +237,8 @@ public ContentSummaryComputationContext computeContentSummary( final INodeDirectory snapshotRoot, final ContentSummaryComputationContext summary) { snapshotRoot.computeContentSummary(summary); - summary.getCounts().add(Content.SNAPSHOT, snapshotsByNames.size()); - summary.getCounts().add(Content.SNAPSHOTTABLE_DIRECTORY, 1); + summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size()); + summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1); return summary; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 07ff744f5e..d55332f544 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.AclStorage; import org.apache.hadoop.hdfs.server.namenode.Content; +import org.apache.hadoop.hdfs.server.namenode.ContentCounts; import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.INode; @@ -650,19 +651,19 @@ public QuotaCounts computeQuotaUsage4CurrentDirectory( } public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, - final Content.Counts counts) { + final ContentCounts counts) { // Create a new blank summary context for blocking processing of subtree. ContentSummaryComputationContext summary = - new ContentSummaryComputationContext(); + new ContentSummaryComputationContext(bsps); for(DirectoryDiff d : diffs) { for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) { deleted.computeContentSummary(summary); } } // Add the counts from deleted trees. - counts.add(summary.getCounts()); + counts.addContents(summary.getCounts()); // Add the deleted directory count. - counts.add(Content.DIRECTORY, diffs.asList().size()); + counts.addContent(Content.DIRECTORY, diffs.asList().size()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index 18f4bd6969..86ba341e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -21,6 +21,7 @@ import java.util.HashMap; import com.google.common.base.Preconditions; +import org.apache.commons.lang.ArrayUtils; /** * Counters for an enum type. @@ -64,6 +65,11 @@ public final long get(final E e) { return counters[e.ordinal()]; } + /** @return the values of counter as a shadow copy of array*/ + public long[] asArray() { + return ArrayUtils.clone(counters); + } + /** Negate all counters. */ public final void negation() { for(int i = 0; i < counters.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index ae9612f3ec..d53bc318cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -553,8 +553,9 @@ public static ContentSummary toContentSummary(final Map json) { final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); - return new ContentSummary(length, fileCount, directoryCount, - quota, spaceConsumed, spaceQuota); + return new ContentSummary.Builder().length(length).fileCount(fileCount). + directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed). + spaceQuota(spaceQuota).build(); } /** Convert a MD5MD5CRC32FileChecksum to a Json string. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 2966e51721..7d94f04845 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -134,6 +134,20 @@ message ContentSummaryProto { required uint64 quota = 4; required uint64 spaceConsumed = 5; required uint64 spaceQuota = 6; + optional StorageTypeQuotaInfosProto typeQuotaInfos = 7; +} + +/** + * Storage type quota and usage information of a file or directory + */ +message StorageTypeQuotaInfosProto { + repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1; +} + +message StorageTypeQuotaInfoProto { + required StorageTypeProto type = 1; + required uint64 quota = 2; + required uint64 consumed = 3; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 485cb9b4c5..32fae45a09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1221,7 +1221,7 @@ public void testAddStoredBlockDoesNotCauseSkippedReplication() when(mbc.isUnderConstruction()).thenReturn(true); ContentSummary cs = mock(ContentSummary.class); when(cs.getLength()).thenReturn((long)1); - when(mbc.computeContentSummary()).thenReturn(cs); + when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs); info.setBlockCollection(mbc); bm.addBlockCollection(info, mbc); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java index aee756f074..6d3893791d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; + import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -156,6 +157,11 @@ public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception { ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, ssdConsumed); + + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION); + assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len); + assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); } @Test(timeout = 60000) @@ -192,6 +198,11 @@ public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception { fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true); assertEquals(fnode.dumpTreeRecursively().toString(), 0, counts.getTypeSpaces().get(StorageType.SSD)); + + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), 0); + assertEquals(cs.getTypeConsumed(StorageType.SSD), 0); + assertEquals(cs.getTypeConsumed(StorageType.DISK), 0); } @Test(timeout = 60000) @@ -233,6 +244,11 @@ public void testQuotaByStorageTypeWithFileCreateRename() throws Exception { } catch (Throwable t) { LOG.info("Got expected exception ", t); } + + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION); + assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len); + assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); } /** @@ -554,6 +570,11 @@ public void testQuotaByStorageTypeWithSnapshot() throws Exception { assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len, counts1.getTypeSpaces().get(StorageType.SSD)); + ContentSummary cs1 = dfs.getContentSummary(sub1); + assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION); + assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len); + assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2); + // Delete the snapshot s1 dfs.deleteSnapshot(sub1, "s1"); @@ -566,6 +587,11 @@ public void testQuotaByStorageTypeWithSnapshot() throws Exception { sub1Node.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts2, true); assertEquals(sub1Node.dumpTreeRecursively().toString(), 0, counts2.getTypeSpaces().get(StorageType.SSD)); + + ContentSummary cs2 = dfs.getContentSummary(sub1); + assertEquals(cs2.getSpaceConsumed(), 0); + assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0); + assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0); } @Test(timeout = 60000) @@ -601,6 +627,11 @@ public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception { ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(newFile1Len, ssdConsumed); + + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION); + assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len); + assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2); } @Test @@ -701,6 +732,55 @@ public void testQuotaByStorageTypePersistenceInFsImage() throws IOException { .getDirectoryWithQuotaFeature() .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD); assertEquals(file1Len, ssdConsumedAfterNNRestart); + } + @Test(timeout = 60000) + public void testContentSummaryWithoutQuotaByStorageType() throws Exception { + final Path foo = new Path(dir, "foo"); + Path createdFile1 = new Path(foo, "created_file1.data"); + dfs.mkdirs(foo); + + // set storage policy on directory "foo" to ONESSD + dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); + + INode fnode = fsdir.getINode4Write(foo.toString()); + assertTrue(fnode.isDirectory()); + assertTrue(!fnode.isQuotaSet()); + + // Create file of size 2 * BLOCKSIZE under directory "foo" + long file1Len = BLOCKSIZE * 2; + int bufLen = BLOCKSIZE / 16; + DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); + + // Verify getContentSummary without any quota set + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION); + assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len); + assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2); + } + + @Test(timeout = 60000) + public void testContentSummaryWithoutStoragePolicy() throws Exception { + final Path foo = new Path(dir, "foo"); + Path createdFile1 = new Path(foo, "created_file1.data"); + dfs.mkdirs(foo); + + INode fnode = fsdir.getINode4Write(foo.toString()); + assertTrue(fnode.isDirectory()); + assertTrue(!fnode.isQuotaSet()); + + // Create file of size 2 * BLOCKSIZE under directory "foo" + long file1Len = BLOCKSIZE * 2; + int bufLen = BLOCKSIZE / 16; + DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed); + + // Verify getContentSummary without any quota set + // Expect no type quota and usage information available + ContentSummary cs = dfs.getContentSummary(foo); + assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION); + for (StorageType t : StorageType.values()) { + assertEquals(cs.getTypeConsumed(t), 0); + assertEquals(cs.getTypeQuota(t), -1); + } } } \ No newline at end of file