Revert "HDFS-15624. fix the function of setting quota by storage type (#2377)"
This reverts commit 394b9f7a5c
.
Ref: HDFS-15995.
Had to revert this commit, so we can commit HDFS-15566 (a critical bug preventing rolling upgrade to Hadoop 3.3)
Will re-work this fix again later.
This commit is contained in:
parent
88a550bc3a
commit
90c6caf650
@ -34,12 +34,13 @@
|
|||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public enum StorageType {
|
public enum StorageType {
|
||||||
|
// sorted by the speed of the storage types, from fast to slow
|
||||||
RAM_DISK(true, true),
|
RAM_DISK(true, true),
|
||||||
|
NVDIMM(false, true),
|
||||||
SSD(false, false),
|
SSD(false, false),
|
||||||
DISK(false, false),
|
DISK(false, false),
|
||||||
ARCHIVE(false, false),
|
ARCHIVE(false, false),
|
||||||
PROVIDED(false, false),
|
PROVIDED(false, false);
|
||||||
NVDIMM(false, true);
|
|
||||||
|
|
||||||
private final boolean isTransient;
|
private final boolean isTransient;
|
||||||
private final boolean isRAM;
|
private final boolean isRAM;
|
||||||
@ -121,4 +122,4 @@ public static String getConf(Configuration conf,
|
|||||||
StorageType t, String name) {
|
StorageType t, String name) {
|
||||||
return conf.get(CONF_KEY_HEADER + t.toString() + "." + name);
|
return conf.get(CONF_KEY_HEADER + t.toString() + "." + name);
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -283,10 +283,10 @@ public void processPathWithQuotasByStorageTypesHeader() throws Exception {
|
|||||||
count.processOptions(options);
|
count.processOptions(options);
|
||||||
String withStorageTypeHeader =
|
String withStorageTypeHeader =
|
||||||
// <----13---> <-------17------> <----13-----> <------17------->
|
// <----13---> <-------17------> <----13-----> <------17------->
|
||||||
|
" NVDIMM_QUOTA REM_NVDIMM_QUOTA " +
|
||||||
" SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " +
|
" SSD_QUOTA REM_SSD_QUOTA DISK_QUOTA REM_DISK_QUOTA " +
|
||||||
// <----13---> <-------17------>
|
// <----13---> <-------17------>
|
||||||
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
|
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
|
||||||
" NVDIMM_QUOTA REM_NVDIMM_QUOTA " +
|
|
||||||
"PATHNAME";
|
"PATHNAME";
|
||||||
verify(out).println(withStorageTypeHeader);
|
verify(out).println(withStorageTypeHeader);
|
||||||
verifyNoMoreInteractions(out);
|
verifyNoMoreInteractions(out);
|
||||||
@ -338,11 +338,11 @@ public void processPathWithQuotasByQTVH() throws Exception {
|
|||||||
count.processOptions(options);
|
count.processOptions(options);
|
||||||
String withStorageTypeHeader =
|
String withStorageTypeHeader =
|
||||||
// <----13---> <-------17------>
|
// <----13---> <-------17------>
|
||||||
|
" NVDIMM_QUOTA REM_NVDIMM_QUOTA " +
|
||||||
" SSD_QUOTA REM_SSD_QUOTA " +
|
" SSD_QUOTA REM_SSD_QUOTA " +
|
||||||
" DISK_QUOTA REM_DISK_QUOTA " +
|
" DISK_QUOTA REM_DISK_QUOTA " +
|
||||||
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
|
"ARCHIVE_QUOTA REM_ARCHIVE_QUOTA " +
|
||||||
"PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
|
"PROVIDED_QUOTA REM_PROVIDED_QUOTA " +
|
||||||
" NVDIMM_QUOTA REM_NVDIMM_QUOTA " +
|
|
||||||
"PATHNAME";
|
"PATHNAME";
|
||||||
verify(out).println(withStorageTypeHeader);
|
verify(out).println(withStorageTypeHeader);
|
||||||
verifyNoMoreInteractions(out);
|
verifyNoMoreInteractions(out);
|
||||||
|
@ -414,13 +414,13 @@ public void testStorageTypeQuota() throws Exception {
|
|||||||
QuotaUsage usage = client.getQuotaUsage("/type0");
|
QuotaUsage usage = client.getQuotaUsage("/type0");
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota());
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota());
|
||||||
verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1},
|
verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1}, null,
|
||||||
null, usage);
|
usage);
|
||||||
// Verify /type1 quota on NN1.
|
// Verify /type1 quota on NN1.
|
||||||
usage = client.getQuotaUsage("/type1");
|
usage = client.getQuotaUsage("/type1");
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, usage.getQuota());
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, usage.getSpaceQuota());
|
||||||
verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1}, null,
|
verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1}, null,
|
||||||
usage);
|
usage);
|
||||||
|
|
||||||
FileSystem routerFs = routerContext.getFileSystem();
|
FileSystem routerFs = routerContext.getFileSystem();
|
||||||
@ -431,15 +431,15 @@ public void testStorageTypeQuota() throws Exception {
|
|||||||
assertEquals(2, u1.getFileAndDirectoryCount());
|
assertEquals(2, u1.getFileAndDirectoryCount());
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, u1.getSpaceQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, u1.getSpaceQuota());
|
||||||
assertEquals(fileSize * 3, u1.getSpaceConsumed());
|
assertEquals(fileSize * 3, u1.getSpaceConsumed());
|
||||||
verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota, -1, -1, -1},
|
verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota, -1, -1},
|
||||||
new long[] {0, 0, fileSize * 3, 0, 0, 0}, u1);
|
new long[] {0, 0, 0, fileSize * 3, 0, 0}, u1);
|
||||||
// Verify /type0 storage type quota usage on Router.
|
// Verify /type0 storage type quota usage on Router.
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, u0.getQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, u0.getQuota());
|
||||||
assertEquals(4, u0.getFileAndDirectoryCount());
|
assertEquals(4, u0.getFileAndDirectoryCount());
|
||||||
assertEquals(HdfsConstants.QUOTA_RESET, u0.getSpaceQuota());
|
assertEquals(HdfsConstants.QUOTA_RESET, u0.getSpaceQuota());
|
||||||
assertEquals(fileSize * 3 * 2, u0.getSpaceConsumed());
|
assertEquals(fileSize * 3 * 2, u0.getSpaceConsumed());
|
||||||
verifyTypeQuotaAndConsume(new long[] {-1, -1, ssQuota * 2, -1, -1, -1},
|
verifyTypeQuotaAndConsume(new long[] {-1, -1, -1, ssQuota * 2, -1, -1},
|
||||||
new long[] {0, 0, fileSize * 3 * 2, 0, 0, 0}, u0);
|
new long[] {0, 0, 0, fileSize * 3 * 2, 0, 0}, u0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -2429,10 +2429,6 @@ private void checkStoragePolicyEnabled(final String operationNameReadable,
|
|||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void setStoragePolicy(String src, String policyName) throws IOException {
|
void setStoragePolicy(String src, String policyName) throws IOException {
|
||||||
if (policyName.equalsIgnoreCase(
|
|
||||||
HdfsConstants.ALLNVDIMM_STORAGE_POLICY_NAME)) {
|
|
||||||
requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT);
|
|
||||||
}
|
|
||||||
final String operationName = "setStoragePolicy";
|
final String operationName = "setStoragePolicy";
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
checkStoragePolicyEnabled("set storage policy", true);
|
checkStoragePolicyEnabled("set storage policy", true);
|
||||||
@ -3575,9 +3571,6 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
|
|||||||
if (type != null) {
|
if (type != null) {
|
||||||
requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE);
|
requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE);
|
||||||
}
|
}
|
||||||
if (type == StorageType.NVDIMM) {
|
|
||||||
requireEffectiveLayoutVersionForFeature(Feature.NVDIMM_SUPPORT);
|
|
||||||
}
|
|
||||||
checkOperation(OperationCategory.WRITE);
|
checkOperation(OperationCategory.WRITE);
|
||||||
final String operationName = getQuotaCommand(nsQuota, ssQuota);
|
final String operationName = getQuotaCommand(nsQuota, ssQuota);
|
||||||
final FSPermissionChecker pc = getPermissionChecker();
|
final FSPermissionChecker pc = getPermissionChecker();
|
||||||
|
@ -89,8 +89,7 @@ public enum Feature implements LayoutFeature {
|
|||||||
APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"),
|
APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"),
|
||||||
QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"),
|
QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types"),
|
||||||
ERASURE_CODING(-64, -61, "Support erasure coding"),
|
ERASURE_CODING(-64, -61, "Support erasure coding"),
|
||||||
EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage"),
|
EXPANDED_STRING_TABLE(-65, -61, "Support expanded string table in fsimage");
|
||||||
NVDIMM_SUPPORT(-66, -61, "Support NVDIMM storage type");
|
|
||||||
|
|
||||||
private final FeatureInfo info;
|
private final FeatureInfo info;
|
||||||
|
|
||||||
|
@ -1421,29 +1421,29 @@ public void testStorageType() {
|
|||||||
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
|
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
|
||||||
|
|
||||||
//put storage type is reversed order
|
//put storage type is reversed order
|
||||||
map.put(StorageType.NVDIMM, 1);
|
|
||||||
map.put(StorageType.ARCHIVE, 1);
|
map.put(StorageType.ARCHIVE, 1);
|
||||||
map.put(StorageType.DISK, 1);
|
map.put(StorageType.DISK, 1);
|
||||||
map.put(StorageType.SSD, 1);
|
map.put(StorageType.SSD, 1);
|
||||||
map.put(StorageType.RAM_DISK, 1);
|
map.put(StorageType.RAM_DISK, 1);
|
||||||
|
map.put(StorageType.NVDIMM, 1);
|
||||||
|
|
||||||
{
|
{
|
||||||
final Iterator<StorageType> i = map.keySet().iterator();
|
final Iterator<StorageType> i = map.keySet().iterator();
|
||||||
Assert.assertEquals(StorageType.RAM_DISK, i.next());
|
Assert.assertEquals(StorageType.RAM_DISK, i.next());
|
||||||
|
Assert.assertEquals(StorageType.NVDIMM, i.next());
|
||||||
Assert.assertEquals(StorageType.SSD, i.next());
|
Assert.assertEquals(StorageType.SSD, i.next());
|
||||||
Assert.assertEquals(StorageType.DISK, i.next());
|
Assert.assertEquals(StorageType.DISK, i.next());
|
||||||
Assert.assertEquals(StorageType.ARCHIVE, i.next());
|
Assert.assertEquals(StorageType.ARCHIVE, i.next());
|
||||||
Assert.assertEquals(StorageType.NVDIMM, i.next());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
final Iterator<Map.Entry<StorageType, Integer>> i
|
final Iterator<Map.Entry<StorageType, Integer>> i
|
||||||
= map.entrySet().iterator();
|
= map.entrySet().iterator();
|
||||||
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
|
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
|
||||||
|
Assert.assertEquals(StorageType.NVDIMM, i.next().getKey());
|
||||||
Assert.assertEquals(StorageType.SSD, i.next().getKey());
|
Assert.assertEquals(StorageType.SSD, i.next().getKey());
|
||||||
Assert.assertEquals(StorageType.DISK, i.next().getKey());
|
Assert.assertEquals(StorageType.DISK, i.next().getKey());
|
||||||
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
|
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
|
||||||
Assert.assertEquals(StorageType.NVDIMM, i.next().getKey());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,8 +128,7 @@ public void testNameNodeFeatureMinimumCompatibleLayoutVersions() {
|
|||||||
NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
|
NameNodeLayoutVersion.Feature.APPEND_NEW_BLOCK,
|
||||||
NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE,
|
NameNodeLayoutVersion.Feature.QUOTA_BY_STORAGE_TYPE,
|
||||||
NameNodeLayoutVersion.Feature.ERASURE_CODING,
|
NameNodeLayoutVersion.Feature.ERASURE_CODING,
|
||||||
NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE,
|
NameNodeLayoutVersion.Feature.EXPANDED_STRING_TABLE);
|
||||||
NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT);
|
|
||||||
for (LayoutFeature f : compatibleFeatures) {
|
for (LayoutFeature f : compatibleFeatures) {
|
||||||
assertEquals(String.format("Expected minimum compatible layout version " +
|
assertEquals(String.format("Expected minimum compatible layout version " +
|
||||||
"%d for feature %s.", baseLV, f), baseLV,
|
"%d for feature %s.", baseLV, f), baseLV,
|
||||||
|
Loading…
Reference in New Issue
Block a user