From 7515e75103c06ce7139b305dd04d4fb2e94b12ad Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 14 Mar 2017 11:47:25 -0700 Subject: [PATCH] HDFS-11505. Do not enable any erasure coding policies by default. Contributed by Manoj Govindassamy. --- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +- .../namenode/ErasureCodingPolicyManager.java | 5 ++++- .../src/main/resources/hdfs-default.xml | 5 +++-- .../src/site/markdown/HDFSErasureCoding.md | 13 +++++++++---- .../hadoop/hdfs/TestDecommissionWithStriped.java | 2 ++ .../hdfs/TestErasureCodeBenchmarkThroughput.java | 2 ++ .../hdfs/TestErasureCodingPolicyWithSnapshot.java | 2 ++ .../org/apache/hadoop/hdfs/TestFileChecksum.java | 2 ++ .../hadoop/hdfs/TestFileStatusWithECPolicy.java | 5 ++++- .../hadoop/hdfs/TestLeaseRecoveryStriped.java | 2 ++ .../hdfs/TestReadStripedFileWithDecoding.java | 2 ++ .../TestReadStripedFileWithMissingBlocks.java | 2 ++ .../hadoop/hdfs/TestReconstructStripedFile.java | 2 ++ .../hadoop/hdfs/TestSafeModeWithStripedFile.java | 2 ++ .../hadoop/hdfs/TestWriteReadStripedFile.java | 2 ++ .../hadoop/hdfs/server/balancer/TestBalancer.java | 2 ++ .../TestBlockTokenWithDFSStriped.java | 4 ++++ ...ReconstructStripedBlocksWithRackAwareness.java | 6 ++++++ .../TestSequentialBlockGroupId.java | 2 ++ .../TestDataNodeErasureCodingMetrics.java | 2 ++ .../hadoop/hdfs/server/mover/TestMover.java | 2 ++ .../TestAddOverReplicatedStripedBlocks.java | 4 +++- .../server/namenode/TestAddStripedBlockInFBR.java | 3 +++ .../server/namenode/TestAddStripedBlocks.java | 10 ++++++---- .../server/namenode/TestEnabledECPolicies.java | 15 +++++++++++++-- .../hdfs/server/namenode/TestFSEditLogLoader.java | 4 ++++ .../hadoop/hdfs/server/namenode/TestFsck.java | 8 ++++++++ .../hdfs/server/namenode/TestNameNodeMXBean.java | 2 ++ .../namenode/TestQuotaWithStripedBlocks.java | 2 ++ .../namenode/TestReconstructStripedBlocks.java | 6 ++++++ .../server/namenode/TestStripedINodeFile.java | 4 ++++ .../TestOfflineImageViewerWithStripedBlocks.java | 2 ++ 32 files changed, 112 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 3fc4980648..06b33f977d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -563,7 +563,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "10m"; public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = "dfs.namenode.ec.policies.enabled"; - public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = "RS-6-3-64k"; + public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = ""; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads"; public static final int DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 29af207299..c23b034ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -98,12 +98,15 @@ public final class ErasureCodingPolicyManager { DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); this.enabledPoliciesByName = new TreeMap<>(); for (String policyName : policyNames) { + if (policyName.trim().isEmpty()) { + continue; + } ErasureCodingPolicy ecPolicy = SYSTEM_POLICIES_BY_NAME.get(policyName); if (ecPolicy == null) { String sysPolicies = Arrays.asList(SYS_POLICIES).stream() .map(ErasureCodingPolicy::getName) .collect(Collectors.joining(", ")); - String msg = String.format("EC policy %s specified at %s is not a " + + String msg = String.format("EC policy '%s' specified at %s is not a " + "valid policy. Please choose from list of available policies: " + "[%s]", policyName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index fa5d814eaf..2b1495196f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2930,10 +2930,11 @@ dfs.namenode.ec.policies.enabled - RS-6-3-64k + Comma-delimited list of enabled erasure coding policies. The NameNode will enforce this when setting an erasure coding policy - on a directory. + on a directory. By default, none of the built-in erasure coding + policies are enabled. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md index 9d9f2ce3c6..04acdce6ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md @@ -66,7 +66,7 @@ Architecture Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell size*. Currently, five built-in policies are supported: `RS-3-2-64k`, `RS-6-3-64k`, `RS-10-4-64k`, `RS-LEGACY-6-3-64k`, and `XOR-2-1-64k`. - By default, only `RS-6-3-64k` is enabled. + By default, all built-in erasure coding policies are disabled. Similar to HDFS storage policies, erasure coding policies are set on a directory. When a file is created, it inherits the EC policy of its nearest ancestor directory. @@ -91,15 +91,20 @@ Deployment Network bisection bandwidth is thus very important. For rack fault-tolerance, it is also important to have at least as many racks as the configured EC stripe width. - For the default EC policy of RS (6,3), this means minimally 9 racks, and ideally 10 or 11 to handle planned and unplanned outages. + For EC policy RS (6,3), this means minimally 9 racks, and ideally 10 or 11 to handle planned and unplanned outages. For clusters with fewer racks than the stripe width, HDFS cannot maintain rack fault-tolerance, but will still attempt to spread a striped file across multiple nodes to preserve node-level fault-tolerance. ### Configuration keys - The set of enabled erasure coding policies can be configured on the NameNode via `dfs.namenode.ec.policies.enabled`. This restricts what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies. + The set of enabled erasure coding policies can be configured on the NameNode via `dfs.namenode.ec.policies.enabled` configuration. This restricts + what EC policies can be set by clients. It does not affect the behavior of already set file or directory-level EC policies. - By default, only the `RS-6-3-64k` policy is enabled. Typically, the cluster administrator will configure the set of enabled policies based on the size of the cluster and the desired fault-tolerance properties. For instance, for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as there are at least 14 DataNodes in the cluster. + By default, all built-in erasure coding policies are disabled. Typically, the cluster administrator will enable set of policies by including them + in the `dfs .namenode.ec.policies.enabled` configuration based on the size of the cluster and the desired fault-tolerance properties. For instance, + for a cluster with 9 racks, a policy like `RS-10-4-64k` will not preserve rack-level fault-tolerance, and `RS-6-3-64k` or `RS-3-2-64k` might + be more appropriate. If the administrator only cares about node-level fault-tolerance, `RS-10-4-64k` would still be appropriate as long as + there are at least 14 DataNodes in the cluster. The codec implementation for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys: `io.erasurecode.codec.rs.rawcoder` for the default RS codec, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index a5df85fcb9..bb394feb7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -131,6 +131,8 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); numDNs = dataBlocks + parityBlocks + 2; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java index 383e53a243..be962dc4cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodeBenchmarkThroughput.java @@ -48,6 +48,8 @@ public static void setup() throws IOException { conf = new HdfsConfiguration(); int numDN = ErasureCodeBenchmarkThroughput.getEcPolicy().getNumDataUnits() + ErasureCodeBenchmarkThroughput.getEcPolicy().getNumParityUnits(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ErasureCodeBenchmarkThroughput.getEcPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).build(); cluster.waitActive(); fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java index 4833c24d2e..c5651ee89e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java @@ -48,6 +48,8 @@ public class TestErasureCodingPolicyWithSnapshot { @Before public void setupCluster() throws IOException { conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + sysDefaultPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index 7f63e18abd..b804523b3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -77,6 +77,8 @@ public void setup() throws IOException { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); Path ecPath = new Path(ecDir); cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java index 3be0e8d585..f033ab75b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java @@ -43,8 +43,11 @@ public class TestFileStatusWithECPolicy { @Before public void before() throws IOException { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = - new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build(); + new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); client = fs.getClient(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 2ba5aede85..86b1aadf6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -88,6 +88,8 @@ public void setup() throws IOException { false); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); final int numDNs = dataBlocks + parityBlocks; cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 4c6ad99c88..cb1640c0b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -100,6 +100,8 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java index 930986f076..34cba92ad2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java @@ -58,6 +58,8 @@ public class TestReadStripedFileWithMissingBlocks { public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy( "/", ecPolicy.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 88edcb40d8..affb541891 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -102,6 +102,8 @@ public void setup() throws IOException { CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY, NativeRSRawErasureCoderFactory.class.getCanonicalName()); } + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java index 85538ba916..edecbf27a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java @@ -61,6 +61,8 @@ public void setup() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 9aac65b411..9b14df14c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -76,6 +76,8 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); fs = cluster.getFileSystem(); fs.mkdirs(new Path("/ec")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index d9a8e5b165..d2e8f4485e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -1929,6 +1929,8 @@ private void doTestBalancerWithStripedFile(Configuration conf) throws Exception for (int i = 0; i < numOfDatanodes; i++) { racks[i] = "/rack" + (i % numOfRacks); } + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) .racks(racks) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java index 82c9fde1d5..54f28053f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java @@ -55,6 +55,8 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS { private Configuration getConf() { Configuration conf = super.getConf(numDNs); conf.setInt("io.bytes.per.checksum", cellSize); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); return conf; } @@ -114,6 +116,8 @@ public void testAppend() throws Exception { public void testEnd2End() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); new TestBalancer().integrationTestWithStripedFile(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index 9b7c7cd81e..4ecfd50a30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -146,6 +146,8 @@ public void testReconstructForNotEnoughRacks() throws Exception { LOG.info("cluster hosts: {}, racks: {}", Arrays.asList(hosts), Arrays.asList(racks)); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts) .numDataNodes(hosts.length).build(); cluster.waitActive(); @@ -217,6 +219,8 @@ public void testReconstructForNotEnoughRacks() throws Exception { @Test public void testChooseExcessReplicasToDelete() throws Exception { + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts) .numDataNodes(hosts.length).build(); cluster.waitActive(); @@ -267,6 +271,8 @@ public void testChooseExcessReplicasToDelete() throws Exception { */ @Test public void testReconstructionWithDecommission() throws Exception { + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); final String[] rackNames = getRacks(dataBlocks + parityBlocks + 2, dataBlocks); final String[] hostNames = getHosts(dataBlocks + parityBlocks + 2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java index 3c18112b56..c5066a04a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java @@ -81,6 +81,8 @@ public void setup() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index d36db233f5..ee2afbbd8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -72,6 +72,8 @@ public void setup() throws IOException { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 0fcb275595..9af61aedba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -508,6 +508,8 @@ public void testMoverWithStripedFile() throws Exception { capacities[i][j]=capacity; } } + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numOfDatanodes) .storagesPerDatanode(storagesPerDatanode) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 6a4cd32a58..ecbf99d804 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -76,13 +76,15 @@ public void setup() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); SimulatedFSDataset.setFactory(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); fs = cluster.getFileSystem(); fs.mkdirs(dirPath); fs.getClient().setErasureCodingPolicy(dirPath.toString(), - StripedFileTestUtil.getDefaultECPolicy().getName()); + ecPolicy.getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java index a3afa93e3a..a4f470b34d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -61,6 +62,8 @@ public class TestAddStripedBlockInFBR { @Before public void setup() throws IOException { Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index 2eb23b9f82..555c2fac81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSStripedOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -83,12 +84,13 @@ public class TestAddStripedBlocks { @Before public void setup() throws IOException { - cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) - .numDataNodes(groupSize).build(); + HdfsConfiguration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); dfs = cluster.getFileSystem(); - dfs.getClient().setErasureCodingPolicy("/", - StripedFileTestUtil.getDefaultECPolicy().getName()); + dfs.getClient().setErasureCodingPolicy("/", ecPolicy.getName()); } @After diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java index 7f18d18d73..4b4b196a06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java @@ -69,6 +69,15 @@ private void expectValidPolicy(String value, final int numEnabled) throws numEnabled, manager.getEnabledPolicies().length); } + @Test + public void testDefaultPolicy() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + String defaultECPolicies = conf.get( + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT); + expectValidPolicy(defaultECPolicies, 0); + } + @Test public void testInvalid() throws Exception { // Test first with an invalid policy @@ -90,14 +99,16 @@ public void testValid() throws Exception { expectValidPolicy(ecPolicyName, 1); expectValidPolicy(ecPolicyName + ", ", 1); expectValidPolicy(",", 0); + expectValidPolicy(", " + ecPolicyName, 1); + expectValidPolicy(" ", 0); + expectValidPolicy(" , ", 0); } @Test public void testGetPolicies() throws Exception { ErasureCodingPolicy[] enabledPolicies; // Enable no policies - enabledPolicies = new ErasureCodingPolicy[] - {SYSTEM_POLICIES[1], SYSTEM_POLICIES[2]}; + enabledPolicies = new ErasureCodingPolicy[] {}; testGetPolicies(enabledPolicies); // Enable one policy diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index 104727d83a..4467dc1068 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -458,6 +458,8 @@ public void testFSEditLogOpCodes() throws IOException { public void testAddNewStripedBlock() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + testECPolicy.getName()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) @@ -531,6 +533,8 @@ public void testAddNewStripedBlock() throws IOException{ public void testUpdateStripedBlocks() throws IOException{ // start a cluster Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + testECPolicy.getName()); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index b94e6108c4..e061c5cfe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -682,6 +682,8 @@ public void testFsckOpenECFiles() throws Exception { final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits(); int blockSize = 2 * cellSize; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes( numAllUnits + 1).build(); String topDir = "/myDir"; @@ -1997,6 +1999,8 @@ public void testECFsck() throws Exception { conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); @@ -2292,6 +2296,8 @@ public void testFsckCorruptECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); @@ -2361,6 +2367,8 @@ public void testFsckMissingECFile() throws Exception { StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize(); int totalSize = dataBlocks + parityBlocks; + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(totalSize).build(); fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 231f75fc90..ed9ed3a213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -725,6 +725,8 @@ public void testVerifyMissingBlockGroupsMetrics() throws Exception { DistributedFileSystem fs = null; try { Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits(); int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java index 1e836dc09b..f97492b7e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java @@ -65,6 +65,8 @@ public class TestQuotaWithStripedBlocks { public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + ecPolicy.getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 01a8e08bb1..e9ff2e360b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -108,6 +108,8 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception { Configuration conf = new HdfsConfiguration(); initConf(conf); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1) .build(); @@ -195,6 +197,8 @@ public void test2RecoveryTasksForSameBlockGroup() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); try { @@ -260,6 +264,8 @@ public void testCountLiveReplicas() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index f1610b1839..2c6390011a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -311,6 +311,8 @@ public void testDeleteOp() throws Exception { final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE) .build(); @@ -386,6 +388,8 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() 1L); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); // start 10 datanodes int numOfDatanodes = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java index 0bfa054302..d04ef99d63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java @@ -61,6 +61,8 @@ public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + StripedFileTestUtil.getDefaultECPolicy().getName()); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/",