diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index 2b5e4f1c5c..b17864a222 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -34,12 +34,13 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { + // sorted by the speed of the storage types, from fast to slow RAM_DISK(true, true), + NVDIMM(false, true), SSD(false, false), DISK(false, false), ARCHIVE(false, false), - PROVIDED(false, false), - NVDIMM(false, true); + PROVIDED(false, false); private final boolean isTransient; private final boolean isRAM; @@ -121,4 +122,4 @@ public static String getConf(Configuration conf, StorageType t, String name) { return conf.get(CONF_KEY_HEADER + t.toString() + "." + name); } -} +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java index 19516b8c40..618cb0190a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java @@ -283,10 +283,10 @@ public void processPathWithQuotasByStorageTypesHeader() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> <----13-----> <------17-------> + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " + // <----13---> <-------17------> "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); @@ -338,11 +338,11 @@ public void processPathWithQuotasByQTVH() throws Exception { count.processOptions(options); String withStorageTypeHeader = // <----13---> <-------17------> + " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + " SSD_QUOTA REM_SSD_QUOTA " + " DISK_QUOTA REM_DISK_QUOTA " + "ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " + "PROVIDED_QUOTA REM_PROVIDED_QUOTA " + - " NVDIMM_QUOTA REM_NVDIMM_QUOTA " + "PATHNAME"; verify(out).println(withStorageTypeHeader); verifyNoMoreInteractions(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index b69004198e..551ae8a8e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -414,13 +414,13 @@ public void testStorageTypeQuota() throws Exception { QuotaUsage usage = client.getQuotaUsage("/type0"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, - null, usage); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, null, + usage); // Verify /type1 quota on NN1. usage = client.getQuotaUsage("/type1"); assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota()); assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, null, + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, null, usage); FileSystem routerFs = routerContext.getFileSystem(); @@ -431,15 +431,15 @@ public void testStorageTypeQuota() throws Exception { assertEquals(2, u1.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u1.getSpaceQuota()); assertEquals(fileSize * 3, u1.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, - new long[] {0, 0, fileSize * 3, 0, 0, 0}, u1); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, + new long[] {0, 0, 0, fileSize * 3, 0, 0}, u1); // Verify /type0 storage type quota usage on Router. assertEquals(HdfsConstants.QUOTA_RESET, u0.getQuota()); assertEquals(4, u0.getFileAndDirectoryCount()); assertEquals(HdfsConstants.QUOTA_RESET, u0.getSpaceQuota()); assertEquals(fileSize * 3 * 2, u0.getSpaceConsumed()); - verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1}, - new long[] {0, 0, fileSize * 3 * 2, 0, 0, 0}, u0); + verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, + new long[] {0, 0, 0, fileSize * 3 * 2, 0, 0}, u0); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e559515696..926c4e6482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2429,10 +2429,6 @@ private void checkStoragePolicyEnabled(final String operationNameReadable, * @throws IOException */ void setStoragePolicy(String src, String policyName) throws IOException { - if (policyName.equalsIgnoreCase( - HdfsConstants.ALLNVDIMM_STORAGE_POLICY_NAME)) { - requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); - } final String operationName = "setStoragePolicy"; checkOperation(OperationCategory.WRITE); checkStoragePolicyEnabled("set storage policy", true); @@ -3575,9 +3571,6 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) if (type != null) { requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE); } - if (type == StorageType.NVDIMM) { - requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT); - } checkOperation(OperationCategory.WRITE); final String operationName = getQuotaCommand(nsQuota, ssQuota); final FSPermissionChecker pc = getPermissionChecker(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index b2477466be..297ca74c5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -89,8 +89,7 @@ public enum Feature implements LayoutFeature { APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"), QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"), ERASURE_CODING(-64, -61, "Support erasure coding"), - EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"), - NVDIMM_SUPPORT(-66, -61, "Support NVDIMM storage type"); + EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index ef116957fb..efb4b4df35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -1421,29 +1421,29 @@ public void testStorageType() { final EnumMap map = new EnumMap<>(StorageType.class); //put storage type is reversed order - map.put(StorageType.NVDIMM, 1); map.put(StorageType.ARCHIVE, 1); map.put(StorageType.DISK, 1); map.put(StorageType.SSD, 1); map.put(StorageType.RAM_DISK, 1); + map.put(StorageType.NVDIMM, 1); { final Iterator i = map.keySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next()); + Assert.assertEquals(StorageType.NVDIMM, i.next()); Assert.assertEquals(StorageType.SSD, i.next()); Assert.assertEquals(StorageType.DISK, i.next()); Assert.assertEquals(StorageType.ARCHIVE, i.next()); - Assert.assertEquals(StorageType.NVDIMM, i.next()); } { final Iterator> i = map.entrySet().iterator(); Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); + Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); Assert.assertEquals(StorageType.SSD, i.next().getKey()); Assert.assertEquals(StorageType.DISK, i.next().getKey()); Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); - Assert.assertEquals(StorageType.NVDIMM, i.next().getKey()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java index 316480961a..2c9905d6fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java @@ -128,8 +128,7 @@ public void testNameNodeFeatureMinimumCompatibleLayoutVersions() { NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK, NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE, NameNodeLayoutVersion.Feature.ERASURE_CODING, - NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE, - NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT); + NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE); for (LayoutFeature f : compatibleFeatures) { assertEquals(String.format("Expected minimum compatible layout version " + "%d for feature %s.", baseLV, f), baseLV,