diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java index 251193a958..62f197246f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -167,8 +167,6 @@ private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) thr new Path(helper.getTestRootDir(), "test.jks").toUri(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, jceksPath); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ERASURE_CODING_POLICY.getName()); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); int totalDataNodes = ERASURE_CODING_POLICY.getNumDataUnits() + ERASURE_CODING_POLICY.getNumParityUnits(); @@ -178,6 +176,7 @@ private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) thr DFSTestUtil.createKey(testkey, miniHdfs, conf); DistributedFileSystem fileSystem = miniHdfs.getFileSystem(); + fileSystem.enableErasureCodingPolicy(ERASURE_CODING_POLICY.getName()); fileSystem.getClient().setKeyProvider(miniHdfs.getNameNode() .getNamesystem().getProvider()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 322cae4eca..d6fcd5bfbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -563,8 +563,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT = "10m"; - public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = "dfs.namenode.ec.policies.enabled"; - public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = ""; public static final String DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY = "dfs.namenode.ec.policies.max.cellsize"; public static final int DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT = 4 * 1024 * 1024; public static final String DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 4c75709730..77deb856c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -101,15 +100,10 @@ public static ErasureCodingPolicyManager getInstance() { private ErasureCodingPolicyManager() {} public void init(Configuration conf) { - // Populate the list of enabled policies from configuration - final String[] enablePolicyNames = conf.getTrimmedStrings( - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); + // Load erasure coding default policy final String defaultPolicyName = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT); - final String[] policyNames = - (String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName); this.policiesByName = new TreeMap<>(); this.policiesByID = new TreeMap<>(); this.enabledPoliciesByName = new TreeMap<>(); @@ -129,11 +123,8 @@ public void init(Configuration conf) { policiesByID.put(policy.getId(), policy); } - for (String policyName : policyNames) { - if (policyName.trim().isEmpty()) { - continue; - } - ErasureCodingPolicy ecPolicy = policiesByName.get(policyName); + if (!defaultPolicyName.trim().isEmpty()) { + ErasureCodingPolicy ecPolicy = policiesByName.get(defaultPolicyName); if (ecPolicy == null) { String names = policiesByName.values() .stream().map(ErasureCodingPolicy::getName) @@ -141,8 +132,8 @@ public void init(Configuration conf) { String msg = String.format("EC policy '%s' specified at %s is not a " + "valid policy. Please choose from list of available " + "policies: [%s]", - policyName, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + defaultPolicyName, + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, names); throw new HadoopIllegalArgumentException(msg); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index deb03afd85..381a871f60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -80,11 +79,10 @@ static ErasureCodingPolicy getErasureCodingPolicyByName( .collect(Collectors.joining(", ")); final String message = String.format("Policy '%s' does not match any " + "enabled erasure" + - " coding policies: [%s]. The set of enabled erasure coding " + - "policies can be configured at '%s'.", + " coding policies: [%s]. An erasure coding policy can be" + + " enabled by enableErasureCodingPolicy API.", ecPolicyName, - sysPolicies, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + sysPolicies ); throw new HadoopIllegalArgumentException(message); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 15ee9bbc55..21773170bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3016,16 +3016,6 @@ - - dfs.namenode.ec.policies.enabled - - Comma-delimited list of enabled erasure coding policies. - The NameNode will enforce this when setting an erasure coding policy - on a directory. By default, none of the built-in erasure coding - policies are enabled. - - - dfs.namenode.ec.system.default.policy RS-6-3-1024k diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md index 0a88a5f9a6..5fca3ebb3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md @@ -69,7 +69,7 @@ Architecture `REPLICATION` is a special policy. It can only be set on directory, to force the directory to adopt 3x replication scheme, instead of inheriting its ancestor's erasure coding policy. This policy makes it possible to interleave 3x replication scheme directory with erasure coding directory. - `REPLICATION` policy is always enabled. For other built-in policies, unless they are configured in `dfs.namenode.ec.policies.enabled` property, otherwise they are disabled by default. + `REPLICATION` policy is always enabled. For other built-in policies, they are disabled by default. Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory. @@ -110,11 +110,8 @@ Deployment ### Configuration keys - The set of enabled erasure coding policies can be configured on the NameNode via `dfs.namenode.ec.policies.enabled` configuration. This restricts - what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies. - - By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them - in the `dfs.namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance, + By default, all built-in erasure coding policies are disabled, except the one defined in `dfs.namenode.ec.system.default.policy` which is enabled by default. + The cluster administrator can enable set of policies through `hdfs ec [-enablePolicy -policy ]` command based on the size of the cluster and the desired fault-tolerance properties. For instance, for a cluster with 9 racks, a policy like `RS-10-4-1024k` will not preserve rack-level fault-tolerance, and `RS-6-3-1024k` or `RS-3-2-1024k` might be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-1024k` would still be appropriate as long as there are at least 14 DataNodes in the cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java index 60f4f561a1..566755db99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java @@ -21,8 +21,8 @@ import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CLICommandErasureCodingCli; import org.apache.hadoop.cli.util.CommandExecutor.Result; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; import org.junit.Before; @@ -34,7 +34,7 @@ public class TestErasureCodingCLI extends CLITestHelper { private final int NUM_OF_DATANODES = 3; private MiniDFSCluster dfsCluster = null; - private FileSystem fs = null; + private DistributedFileSystem fs = null; private String namenode = null; @Rule @@ -44,10 +44,6 @@ public class TestErasureCodingCLI extends CLITestHelper { @Override public void setUp() throws Exception { super.setUp(); - - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - "RS-6-3-1024k,RS-3-2-1024k,XOR-2-1-1024k"); - dfsCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_OF_DATANODES).build(); dfsCluster.waitClusterUp(); @@ -56,6 +52,9 @@ public void setUp() throws Exception { username = System.getProperty("user.name"); fs = dfsCluster.getFileSystem(); + fs.enableErasureCodingPolicy("RS-6-3-1024k"); + fs.enableErasureCodingPolicy("RS-3-2-1024k"); + fs.enableErasureCodingPolicy("XOR-2-1-1024k"); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index b00eff2e18..de1a8ad68e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -71,7 +71,6 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import com.google.common.base.Charsets; import com.google.common.base.Joiner; @@ -289,12 +288,13 @@ public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) { Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog); } - public static void enableAllECPolicies(Configuration conf) { - // Enable all the available EC policies - String policies = SystemErasureCodingPolicies.getPolicies().stream() - .map(ErasureCodingPolicy::getName) - .collect(Collectors.joining(",")); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policies); + public static void enableAllECPolicies(DistributedFileSystem fs) + throws IOException { + // Enable all available EC policies + for (ErasureCodingPolicy ecPolicy : + SystemErasureCodingPolicies.getPolicies()) { + fs.enableErasureCodingPolicy(ecPolicy.getName()); + } } /** class MyFile contains enough information to recreate the contents of diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java index 4202969ee4..7057010663 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java @@ -81,11 +81,11 @@ public static MiniDFSCluster initializeCluster() throws IOException { 0); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); MiniDFSCluster myCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATANODES) .build(); + myCluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); myCluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); return myCluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java index 4f67a0a1a2..f94b7abeee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java @@ -93,8 +93,6 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - getEcPolicy().getName()); if (ErasureCodeNative.isNativeCodeLoaded()) { conf.set( CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, @@ -108,6 +106,7 @@ public void setup() throws IOException { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true); } fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(getEcPolicy().getName()); fs.mkdirs(dirPath); fs.getClient() .setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index c0cfea2200..3714542411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -94,11 +94,10 @@ public void setup() throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } - DFSTestUtil.enableAllECPolicies(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); - cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy - .getName()); fs = cluster.getFileSystem(); + DFSTestUtil.enableAllECPolicies(fs); + fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index 6b94a08fdd..231f260c69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -216,10 +216,10 @@ private void setup(Configuration conf) throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } - DFSTestUtil.enableAllECPolicies(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); + DFSTestUtil.enableAllECPolicies(dfs); dfs.mkdirs(dir); dfs.setErasureCodingPolicy(dir, ecPolicy.getName()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index bb394feb7c..7bd85b4989 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -131,8 +131,6 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); numDNs = dataBlocks + parityBlocks + 2; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); @@ -142,6 +140,8 @@ public void setup() throws IOException { bm = fsn.getBlockManager(); client = getDfsClient(cluster.getNameNode(0), conf); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); dfs.mkdirs(ecDir); dfs.setErasureCodingPolicy(ecDir, StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java index a6a3a80ba3..d4e01b72d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java @@ -59,11 +59,11 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); fileContext = FileContext.getFileContext(cluster.getURI(0), conf); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.mkdirs(new Path("/ec")); cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java index be962dc4cd..da3407d2fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java @@ -48,11 +48,11 @@ public static void setup() throws IOException { conf = new HdfsConfiguration(); int numDN = ErasureCodeBenchmarkThroughput.getEcPolicy().getNumDataUnits() + ErasureCodeBenchmarkThroughput.getEcPolicy().getNumParityUnits(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ErasureCodeBenchmarkThroughput.getEcPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + ((DistributedFileSystem)fs).enableErasureCodingPolicy( + ErasureCodeBenchmarkThroughput.getEcPolicy().getName()); } @AfterClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index e095602c6e..3c549b185e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -77,13 +77,13 @@ public void setupCluster() throws IOException { ecPolicy = getEcPolicy(); conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - DFSTestUtil.enableAllECPolicies(conf); cluster = new MiniDFSCluster.Builder(conf). numDataNodes(ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()). build(); cluster.waitActive(); fs = cluster.getFileSystem(); namesystem = cluster.getNamesystem(); + DFSTestUtil.enableAllECPolicies(fs); } @After @@ -206,8 +206,6 @@ public void testBasicSetECPolicy() // Verify that policies are successfully loaded even when policies // are disabled - cluster.getConfiguration(0).set( - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ""); cluster.restartNameNodes(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java index fbeada67dc..6ab018bbea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java @@ -53,11 +53,10 @@ public void setupCluster() throws IOException { groupSize = (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()); conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index b804523b3c..9d6687c6c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -77,8 +77,6 @@ public void setup() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); Path ecPath = new Path(ecDir); cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault()); @@ -86,7 +84,8 @@ public void setup() throws IOException { StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); client = fs.getClient(); - + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); bytesPerCRC = conf.getInt( HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java index e04f957325..077cf3a115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java @@ -45,13 +45,13 @@ public class TestFileStatusWithECPolicy { @Before public void before() throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); client = fs.getClient(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 86b1aadf6e..2846dbf7f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -88,12 +88,11 @@ public void setup() throws IOException { false); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); final int numDNs = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy(ecPolicy.getName()); dfs.mkdirs(dir); dfs.setErasureCodingPolicy(dir, ecPolicy.getName()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index 34cba92ad2..f3b8dd84f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -58,12 +58,11 @@ public class TestReadStripedFileWithMissingBlocks { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy( "/", ecPolicy.getName()); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); } public void tearDown() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 7cd34c2acd..8ae5e6472a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -105,12 +105,12 @@ public void setup() throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index edecbf27a6..3d3ec9c6c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -61,12 +61,12 @@ public void setup() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); cluster.waitActive(); + cluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java index 50d7b2756f..497d450de2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java @@ -110,13 +110,13 @@ public void testSetRepWithStoragePolicyOnEmptyFile() throws Exception { public void testSetRepOnECFile() throws Exception { ClientProtocol client; Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .build(); cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); + client.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); client.setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index 5371e205ac..529a110c0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -70,11 +70,11 @@ public void setup() throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, NativeRSRawErasureCoderFactory.CODER_NAME); } - DFSTestUtil.enableAllECPolicies(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes( dataBlocks + parityBlocks).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + DFSTestUtil.enableAllECPolicies(fs); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 9b14df14c3..f27c9786db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -76,10 +76,10 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.mkdirs(new Path("/ec")); cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index ec9c39a622..a900ad191d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1981,8 +1981,6 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception for (int i = 0; i < numOfDatanodes; i++) { racks[i] = "/rack" + (i % numOfRacks); } - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) .racks(racks) @@ -1993,6 +1991,8 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception cluster.waitActive(); client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy(); + client.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); client.setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index 4092e5ef33..4c1ea7b763 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -1368,8 +1368,6 @@ public void testPlacementPolicySatisfied() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) @@ -1382,6 +1380,8 @@ public void testPlacementPolicySatisfied() throws Exception { final Path ecDir = new Path("/ec"); final Path testFileUnsatisfied = new Path(ecDir, "test1"); final Path testFileSatisfied = new Path(ecDir, "test2"); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster.getFileSystem().getClient().mkdirs(ecDir.toString(), null, true); cluster.getFileSystem().getClient() .setErasureCodingPolicy(ecDir.toString(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 54f28053f6..7627cf5c6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -55,8 +55,6 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { private Configuration getConf() { Configuration conf = super.getConf(numDNs); conf.setInt("io.bytes.per.checksum", cellSize); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); return conf; } @@ -85,6 +83,8 @@ public void testRead() throws Exception { .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100)) .numDataNodes(numDNs) .build(); + cluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); try { @@ -116,8 +116,6 @@ public void testAppend() throws Exception { public void testEnd2End() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); new TestBalancer().integrationTestWithStripedFile(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index e7bd3d231d..cf4299b501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -70,8 +69,6 @@ public void setup() throws Exception { ecPolicy = SystemErasureCodingPolicies.getByID( SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES) .build(); cluster.waitActive(); @@ -84,6 +81,7 @@ public void setup() throws Exception { // Create a striped file Path ecDir = new Path("/ec"); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.mkdirs(ecDir); fs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName()); ecFile = new Path(ecDir, "ec-file"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index aaa48997ea..7d16017c0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -145,13 +145,12 @@ private DataNode getDataNode(String host) { public void testReconstructForNotEnoughRacks() throws Exception { LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts), Arrays.asList(racks)); - - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts) .numDataNodes(hosts.length).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.setErasureCodingPolicy(new Path("/"), StripedFileTestUtil.getDefaultECPolicy().getName()); FSNamesystem fsn = cluster.getNamesystem(); @@ -219,12 +218,12 @@ public void testReconstructForNotEnoughRacks() throws Exception { @Test public void testChooseExcessReplicasToDelete() throws Exception { - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts) .numDataNodes(hosts.length).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.setErasureCodingPolicy(new Path("/"), StripedFileTestUtil.getDefaultECPolicy().getName()); @@ -271,8 +270,6 @@ public void testChooseExcessReplicasToDelete() throws Exception { */ @Test public void testReconstructionWithDecommission() throws Exception { - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); final String[] rackNames = getRacks(dataBlocks + parityBlocks + 2, dataBlocks); final String[] hostNames = getHosts(dataBlocks + parityBlocks + 2); @@ -281,6 +278,8 @@ public void testReconstructionWithDecommission() throws Exception { .numDataNodes(hostNames.length).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.setErasureCodingPolicy(new Path("/"), StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index c5066a04a2..241c2dcf99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -31,10 +31,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.StripedFileTestUtil; @@ -72,7 +72,7 @@ public class TestSequentialBlockGroupId { private final int fileLen = blockSize * dataBlocks * blockGrpCount; private MiniDFSCluster cluster; - private FileSystem fs; + private DistributedFileSystem fs; private SequentialBlockGroupIdGenerator blockGrpIdGenerator; private Path ecDir = new Path("/ecDir"); @@ -81,12 +81,12 @@ public void setup() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); blockGrpIdGenerator = cluster.getNamesystem().getBlockManager() .getBlockIdManager().getBlockGroupIdGenerator(); fs.mkdirs(ecDir); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index 8ababfacf6..7194385090 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -72,13 +72,13 @@ public void setup() throws IOException { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 707d46fd3c..8ff660fb8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -508,8 +508,6 @@ public void testMoverWithStripedFile() throws Exception { capacities[i][j]=capacity; } } - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) .storagesPerDatanode(storagesPerDatanode) @@ -529,6 +527,8 @@ public void testMoverWithStripedFile() throws Exception { try { cluster.waitActive(); + cluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/bar" directory with HOT storage policy. ClientProtocol client = NameNodeProxies.createProxy(conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index ecbf99d804..aad8e9b96a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -76,12 +76,11 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); SimulatedFSDataset.setFactory(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.mkdirs(dirPath); fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java index a4f470b34d..45e98ea30e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java @@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -62,11 +61,11 @@ public class TestAddStripedBlockInFBR { @Before public void setup() throws IOException { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 623c444f71..ec13b448e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -19,7 +19,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSStripedOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -85,11 +84,10 @@ public class TestAddStripedBlocks { @Before public void setup() throws IOException { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy(ecPolicy.getName()); dfs.getClient().setErasureCodingPolicy("/", ecPolicy.getName()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java index d769f8bc6b..63bfa27b4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java @@ -28,10 +28,8 @@ import org.junit.Test; import org.junit.rules.Timeout; -import java.util.Arrays; import java.util.HashSet; import java.util.Set; -import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -47,7 +45,7 @@ public class TestEnabledECPolicies { private void expectInvalidPolicy(String value) { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, value); try { ErasureCodingPolicyManager.getInstance().init(conf); @@ -60,11 +58,10 @@ private void expectInvalidPolicy(String value) { private void expectValidPolicy(String value, final int numEnabled) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - value); ErasureCodingPolicyManager manager = ErasureCodingPolicyManager.getInstance(); manager.init(conf); + manager.enablePolicy(value); assertEquals("Incorrect number of enabled policies", numEnabled, manager.getEnabledPolicies().length); } @@ -73,8 +70,8 @@ private void expectValidPolicy(String value, final int numEnabled) throws public void testDefaultPolicy() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); String defaultECPolicies = conf.get( - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY, + DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT); expectValidPolicy(defaultECPolicies, 1); } @@ -97,11 +94,6 @@ public void testInvalid() throws Exception { public void testValid() throws Exception { String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy().getName(); expectValidPolicy(ecPolicyName, 1); - expectValidPolicy(ecPolicyName + ", ", 1); - expectValidPolicy(",", 1); - expectValidPolicy(", " + ecPolicyName, 1); - expectValidPolicy(" ", 1); - expectValidPolicy(" , ", 1); } @Test @@ -128,13 +120,12 @@ public void testGetPolicies() throws Exception { private void testGetPolicies(ErasureCodingPolicy[] enabledPolicies) throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - Arrays.asList(enabledPolicies).stream() - .map(ErasureCodingPolicy::getName) - .collect(Collectors.joining(", "))); ErasureCodingPolicyManager manager = ErasureCodingPolicyManager.getInstance(); manager.init(conf); + for (ErasureCodingPolicy p : enabledPolicies) { + manager.enablePolicy(p.getName()); + } // Check that returned values are unique Set found = new HashSet<>(); for (ErasureCodingPolicy p : manager.getEnabledPolicies()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 4467dc1068..ae62dab092 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -458,8 +458,6 @@ public void testFSEditLogOpCodes() throws IOException { public void testAddNewStripedBlock() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - testECPolicy.getName()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) @@ -467,6 +465,7 @@ public void testAddNewStripedBlock() throws IOException{ cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); FSNamesystem fns = cluster.getNamesystem(); + fs.enableErasureCodingPolicy(testECPolicy.getName()); String testDir = "/ec"; String testFile = "testfile_001"; @@ -533,8 +532,6 @@ public void testAddNewStripedBlock() throws IOException{ public void testUpdateStripedBlocks() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - testECPolicy.getName()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) @@ -542,6 +539,7 @@ public void testUpdateStripedBlocks() throws IOException{ cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); FSNamesystem fns = cluster.getNamesystem(); + fs.enableErasureCodingPolicy(testECPolicy.getName()); String testDir = "/ec"; String testFile = "testfile_002"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 9256056b4e..16f625891b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -241,11 +241,11 @@ private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration con @Test public void testSaveAndLoadStripedINodeFile() throws IOException{ Configuration conf = new Configuration(); - DFSTestUtil.enableAllECPolicies(conf); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); + DFSTestUtil.enableAllECPolicies(cluster.getFileSystem()); testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, false); } finally { if (cluster != null) { @@ -262,11 +262,11 @@ public void testSaveAndLoadStripedINodeFile() throws IOException{ public void testSaveAndLoadStripedINodeFileUC() throws IOException { // construct a INode with StripedBlock for saving and loading Configuration conf = new Configuration(); - DFSTestUtil.enableAllECPolicies(conf); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); + DFSTestUtil.enableAllECPolicies(cluster.getFileSystem()); testSaveAndLoadStripedINodeFile(cluster.getNamesystem(), conf, true); } finally { if (cluster != null) { @@ -462,13 +462,13 @@ public void testSupportBlockGroup() throws Exception { final int BLOCK_SIZE = 8 * 1024 * 1024; Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - DFSTestUtil.enableAllECPolicies(conf); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE) .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); + DFSTestUtil.enableAllECPolicies(fs); Path parentDir = new Path("/ec-10-4"); Path childDir = new Path(parentDir, "ec-3-2"); ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies @@ -732,13 +732,13 @@ public void testBlockTypeProtoDefaultsToContiguous() throws Exception { public void testSaveAndLoadFileUnderReplicationPolicyDir() throws IOException { Configuration conf = new Configuration(); - DFSTestUtil.enableAllECPolicies(conf); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); FSNamesystem fsn = cluster.getNamesystem(); DistributedFileSystem fs = cluster.getFileSystem(); + DFSTestUtil.enableAllECPolicies(fs); ErasureCodingPolicy replicaPolicy = SystemErasureCodingPolicies.getReplicationPolicy(); ErasureCodingPolicy defaultEcPolicy = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 7cdbde21d0..558e337770 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -685,13 +685,12 @@ public void testFsckOpenECFiles() throws Exception { final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits(); int blockSize = 2 * cellSize; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes( numAllUnits + 1).build(); String topDir = "/myDir"; cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(ecPolicy.getName()); util.createFiles(fs, topDir); // set topDir to EC when it has replicated files cluster.getFileSystem().getClient().setErasureCodingPolicy( @@ -1999,19 +1998,19 @@ public Boolean get() { @Test public void testECFsck() throws Exception { - FileSystem fs = null; + DistributedFileSystem fs = null; final long precision = 1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int totalSize = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // create a contiguous file Path replDirPath = new Path("/replicated"); @@ -2301,11 +2300,11 @@ public void testFsckCorruptECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); Map dnIndices = new HashMap<>(); ArrayList dnList = cluster.getDataNodes(); for (int i = 0; i < totalSize; i++) { @@ -2372,11 +2371,11 @@ public void testFsckMissingECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // create file Path ecDirPath = new Path("/striped"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 95a391bb1f..d21b275c65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -729,8 +729,6 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception { DistributedFileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); @@ -739,6 +737,8 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // create file Path ecDirPath = new Path("/striped"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index f97492b7e0..9995393e67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -65,13 +65,12 @@ public class TestQuotaWithStripedBlocks { public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy(ecPolicy.getName()); dfs.mkdirs(ecDir); dfs.getClient() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 02075f045d..46907fd64e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -110,13 +110,12 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception { Configuration conf = new HdfsConfiguration(); initConf(conf); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1) .build(); - try { cluster.waitActive(); + cluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); final int numBlocks = 4; DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true); @@ -203,14 +202,14 @@ public void test2RecoveryTasksForSameBlockGroup() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); try { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); BlockManager bm = cluster.getNamesystem().getBlockManager(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); fs.getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); int fileLen = dataBlocks * blockSize; @@ -280,13 +279,12 @@ public void testCountLiveReplicas() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); try { fs.mkdirs(dirPath); fs.setErasureCodingPolicy(dirPath, @@ -383,8 +381,6 @@ public void testReconstructionWork() throws Exception { ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID( SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); - conf.setStrings(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - policy.getName()); Path ecDir = new Path("/ec"); Path ecFilePath = new Path(ecDir, "ec-file"); int blockGroups = 2; @@ -396,6 +392,7 @@ public void testReconstructionWork() throws Exception { try { // create an EC file with 2 block groups final DistributedFileSystem fs = dfsCluster.getFileSystem(); + fs.enableErasureCodingPolicy(policy.getName()); fs.mkdirs(ecDir); fs.setErasureCodingPolicy(ecDir, policy.getName()); DFSTestUtil.createStripedFile(dfsCluster, ecFilePath, ecDir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 94172bbe69..d5f548736f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -577,7 +577,6 @@ public void testCorruptImageFallbackLostECPolicy() throws IOException { .getDefaultECPolicy(); final String policy = defaultPolicy.getName(); final Path f1 = new Path("/f1"); - config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policy); MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) .numDataNodes(0) @@ -586,6 +585,7 @@ public void testCorruptImageFallbackLostECPolicy() throws IOException { try { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(policy); // set root directory to use the default ec policy Path srcECDir = new Path("/"); fs.setErasureCodingPolicy(srcECDir, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index c71d049243..468e47fd18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -318,8 +318,6 @@ public void testDeleteOp() throws Exception { final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE) .build(); @@ -327,6 +325,8 @@ public void testDeleteOp() throws Exception { FSNamesystem fsn = cluster.getNamesystem(); dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); dfs.mkdirs(ecDir); // set erasure coding policy @@ -395,8 +395,6 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() 1L); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); // start 10 datanodes int numOfDatanodes = 10; @@ -426,6 +424,8 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() try { cluster.waitActive(); + cluster.getFileSystem().enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); // set "/foo" directory with ONE_SSD storage policy. ClientProtocol client = NameNodeProxies.createProxy(conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index e046b50d6a..d716f04b1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -137,9 +137,6 @@ public class TestNameNodeMetrics { // Enable stale DataNodes checking CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); - // Enable erasure coding - CONF.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - EC_POLICY.getName()); GenericTestUtils.setLogLevel(LogFactory.getLog(MetricsAsserts.class), Level.DEBUG); } @@ -166,6 +163,7 @@ public void setUp() throws Exception { namesystem = cluster.getNamesystem(); bm = namesystem.getBlockManager(); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy(EC_POLICY.getName()); ecDir = getTestPath("/ec"); fs.mkdirs(ecDir); fs.setErasureCodingPolicy(ecDir, EC_POLICY.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index c515df36ca..6a01de2189 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -534,8 +534,6 @@ public void testReportCommand() throws Exception { final Configuration dfsConf = new HdfsConfiguration(); ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID( SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); - dfsConf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); dfsConf.setInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); @@ -565,6 +563,7 @@ public void testReportCommand() throws Exception { final long fileLength = 512L; final DistributedFileSystem fs = miniCluster.getFileSystem(); final Path file = new Path(baseDir, "/corrupted"); + fs.enableErasureCodingPolicy(ecPolicy.getName()); DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L); DFSTestUtil.waitReplication(fs, file, replFactor); final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 46f194107c..b32b308958 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -137,11 +137,10 @@ public static void createOriginalFSImage() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); DistributedFileSystem hdfs = cluster.getFileSystem(); + hdfs.enableErasureCodingPolicy(ecPolicy.getName()); // Create a reasonable namespace for (int i = 0; i < NUM_DIRS; i++, dirCount++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index d04ef99d63..187b297b42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -61,13 +61,13 @@ public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); fs = cluster.getFileSystem(); + fs.enableErasureCodingPolicy( + StripedFileTestUtil.getDefaultECPolicy().getName()); Path eczone = new Path("/eczone"); fs.mkdirs(eczone); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 60d90fb37b..77e5be1e90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -519,13 +519,12 @@ public void testWebHdfsAllowandDisallowSnapshots() throws Exception { public void testWebHdfsErasureCodingFiles() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - SystemErasureCodingPolicies.getByID( - SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName()); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.enableErasureCodingPolicy(SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName()); final WebHdfsFileSystem webHdfs = WebHdfsTestUtil .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java index f626de4ee4..9ccddd122e 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java @@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.tools.ECAdmin; @@ -60,12 +59,11 @@ public class TestDistCpUtils { @BeforeClass public static void create() throws IOException { - config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - "XOR-2-1-1024k"); cluster = new MiniDFSCluster.Builder(config) .numDataNodes(2) .format(true) - .build(); + .build(); + cluster.getFileSystem().enableErasureCodingPolicy("XOR-2-1-1024k"); } @AfterClass