From 728ed10a7c9d224727580f9ed4f754af5ab9f8f1 Mon Sep 17 00:00:00 2001 From: daimin Date: Mon, 24 Jan 2022 14:34:26 +0800 Subject: [PATCH] HDFS-16430. Add validation to maximum blocks in EC group when adding an EC policy (#3899). Contributed by daimin. Reviewed-by: tomscut Signed-off-by: Ayush Saxena (cherry picked from commit 5ef335da1ed49e06cc8973412952e09ed08bb9c0) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java --- .../hdfs/server/namenode/ErasureCodingPolicyManager.java | 7 +++++++ .../apache/hadoop/hdfs/TestErasureCodingPolicies.java | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index d5fdba8463..a653ff459c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -19,6 +19,7 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -304,6 +305,12 @@ public synchronized ErasureCodingPolicy addPolicy( + policy.getCodecName() + " is not supported"); } + int blocksInGroup = policy.getNumDataUnits() + policy.getNumParityUnits(); + if (blocksInGroup > HdfsServerConstants.MAX_BLOCKS_IN_GROUP) { + throw new HadoopIllegalArgumentException("Number of data and parity blocks in an EC group " + + blocksInGroup + " should not exceed maximum " + HdfsServerConstants.MAX_BLOCKS_IN_GROUP); + } + if (policy.getCellSize() > maxCellSize) { throw new HadoopIllegalArgumentException("Cell size " + policy.getCellSize() + " should not exceed maximum " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index 835d18f3a0..1fd909c949 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -747,6 +747,15 @@ public void testAddErasureCodingPolicies() throws Exception { assertEquals(1, responses.length); assertFalse(responses[0].isSucceed()); + // Test numDataUnits + numParityUnits > 16 + toAddSchema = new ECSchema("rs", 14, 4); + newPolicy = + new ErasureCodingPolicy(toAddSchema, 128 * 1024 * 1024); + policyArray = new ErasureCodingPolicy[]{newPolicy}; + responses = fs.addErasureCodingPolicies(policyArray); + assertEquals(1, responses.length); + assertFalse(responses[0].isSucceed()); + // Test too big cell size toAddSchema = new ECSchema("rs", 3, 2); newPolicy =