From e8bdad7385ab63a122c1e8e8a6a73e0f1100e80b Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 7 Apr 2017 16:46:28 -0700 Subject: [PATCH] HDFS-11623. Move system erasure coding policies into hadoop-hdfs-client. --- .../hadoop/hdfs/protocol/HdfsConstants.java | 6 - .../protocol/SystemErasureCodingPolicies.java | 121 ++++++++++++++++++ .../apache/hadoop/test/TestHdfsHelper.java | 5 +- .../namenode/ErasureCodingPolicyManager.java | 75 +---------- .../server/namenode/FSDirErasureCodingOp.java | 6 +- .../server/namenode/FSImageFormatPBINode.java | 3 +- .../hdfs/server/namenode/INodeFile.java | 8 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 5 +- .../hadoop/hdfs/StripedFileTestUtil.java | 6 +- ...estDFSRSDefault10x4StripedInputStream.java | 7 +- ...stDFSRSDefault10x4StripedOutputStream.java | 7 +- ...ult10x4StripedOutputStreamWithFailure.java | 7 +- .../hdfs/TestDFSXORStripedInputStream.java | 7 +- .../hdfs/TestDFSXORStripedOutputStream.java | 7 +- ...tDFSXORStripedOutputStreamWithFailure.java | 7 +- .../hdfs/TestErasureCodingPolicies.java | 26 ++-- .../TestUnsetAndChangeDirectoryEcPolicy.java | 11 +- .../server/datanode/TestBlockRecovery.java | 4 +- .../namenode/TestEnabledECPolicies.java | 17 +-- .../hdfs/server/namenode/TestFSImage.java | 13 +- .../hdfs/server/namenode/TestINodeFile.java | 7 +- .../server/namenode/TestStripedINodeFile.java | 8 +- .../TestOfflineImageViewer.java | 11 +- .../hdfs/util/TestStripedBlockUtil.java | 4 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 10 +- 25 files changed, 212 insertions(+), 176 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index d2209a4995..0d31bc40fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -144,12 +144,6 @@ public enum DatanodeReportType { ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE } - public static final byte RS_6_3_POLICY_ID = 1; - public static final byte RS_3_2_POLICY_ID = 2; - public static final byte RS_6_3_LEGACY_POLICY_ID = 3; - public static final byte XOR_2_1_POLICY_ID = 4; - public static final byte RS_10_4_POLICY_ID = 5; - /* Hidden constructor */ protected HdfsConstants() { } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java new file mode 100644 index 0000000000..2cd838b8fa --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SystemErasureCodingPolicies.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + *

The set of built-in erasure coding policies.

+ *

Although this is a private class, EC policy IDs need to be treated like a + * stable interface. Adding, modifying, or removing built-in policies can cause + * inconsistencies with older clients.

+ */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class SystemErasureCodingPolicies { + + // Private constructor, this is a utility class. + private SystemErasureCodingPolicies() {} + + // 64 KB + private static final int DEFAULT_CELLSIZE = 64 * 1024; + + public static final byte RS_6_3_POLICY_ID = 1; + private static final ErasureCodingPolicy SYS_POLICY1 = + new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA, + DEFAULT_CELLSIZE, RS_6_3_POLICY_ID); + + public static final byte RS_3_2_POLICY_ID = 2; + private static final ErasureCodingPolicy SYS_POLICY2 = + new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA, + DEFAULT_CELLSIZE, RS_3_2_POLICY_ID); + + public static final byte RS_6_3_LEGACY_POLICY_ID = 3; + private static final ErasureCodingPolicy SYS_POLICY3 = + new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA, + DEFAULT_CELLSIZE, RS_6_3_LEGACY_POLICY_ID); + + public static final byte XOR_2_1_POLICY_ID = 4; + private static final ErasureCodingPolicy SYS_POLICY4 = + new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA, + DEFAULT_CELLSIZE, XOR_2_1_POLICY_ID); + + public static final byte RS_10_4_POLICY_ID = 5; + private static final ErasureCodingPolicy SYS_POLICY5 = + new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA, + DEFAULT_CELLSIZE, RS_10_4_POLICY_ID); + + private static final List SYS_POLICIES = + Collections.unmodifiableList(Arrays.asList( + SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4, + SYS_POLICY5)); + + /** + * System policies sorted by name for fast querying. + */ + private static final Map SYSTEM_POLICIES_BY_NAME; + + /** + * System policies sorted by ID for fast querying. + */ + private static final Map SYSTEM_POLICIES_BY_ID; + + /** + * Populate the lookup maps in a static block. + */ + static { + SYSTEM_POLICIES_BY_NAME = new TreeMap<>(); + SYSTEM_POLICIES_BY_ID = new TreeMap<>(); + for (ErasureCodingPolicy policy : SYS_POLICIES) { + SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy); + SYSTEM_POLICIES_BY_ID.put(policy.getId(), policy); + } + } + + /** + * Get system defined policies. + * @return system policies + */ + public static List getPolicies() { + return SYS_POLICIES; + } + + /** + * Get a policy by policy ID. + * @return ecPolicy, or null if not found + */ + public static ErasureCodingPolicy getByID(byte id) { + return SYSTEM_POLICIES_BY_ID.get(id); + } + + /** + * Get a policy by policy name. + * @return ecPolicy, or null if not found + */ + public static ErasureCodingPolicy getByName(String name) { + return SYSTEM_POLICIES_BY_NAME.get(name); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java index 258dde54f8..251193a958 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java @@ -31,9 +31,8 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.junit.Test; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.Statement; @@ -142,7 +141,7 @@ public static Configuration getHdfsConf() { public static final Path ERASURE_CODING_DIR = new Path("/ec"); public static final Path ERASURE_CODING_FILE = new Path("/ec/ecfile"); public static final ErasureCodingPolicy ERASURE_CODING_POLICY = - ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID); + StripedFileTestUtil.getDefaultECPolicy(); private static MiniDFSCluster MINI_DFS = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 17b48f7ef3..177c0e09fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -20,11 +20,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.io.erasurecode.ErasureCodeConstants; -import java.util.Arrays; import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; @@ -40,50 +39,12 @@ @InterfaceAudience.LimitedPrivate({"HDFS"}) public final class ErasureCodingPolicyManager { - /** - * TODO: HDFS-8095. - */ - private static final int DEFAULT_CELLSIZE = 64 * 1024; - private static final ErasureCodingPolicy SYS_POLICY1 = - new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA, - DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_POLICY_ID); - private static final ErasureCodingPolicy SYS_POLICY2 = - new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA, - DEFAULT_CELLSIZE, HdfsConstants.RS_3_2_POLICY_ID); - private static final ErasureCodingPolicy SYS_POLICY3 = - new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA, - DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID); - private static final ErasureCodingPolicy SYS_POLICY4 = - new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA, - DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID); - private static final ErasureCodingPolicy SYS_POLICY5 = - new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA, - DEFAULT_CELLSIZE, HdfsConstants.RS_10_4_POLICY_ID); - - //We may add more later. - private static final ErasureCodingPolicy[] SYS_POLICIES = - new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, - SYS_POLICY4, SYS_POLICY5}; - // Supported storage policies for striped EC files private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[]{ HdfsConstants.HOT_STORAGE_POLICY_ID, HdfsConstants.COLD_STORAGE_POLICY_ID, HdfsConstants.ALLSSD_STORAGE_POLICY_ID}; - /** - * All supported policies maintained in NN memory for fast querying, - * identified and sorted by its name. - */ - private static final Map SYSTEM_POLICIES_BY_NAME; - - static { - // Create a hashmap of all available policies for quick lookup by name - SYSTEM_POLICIES_BY_NAME = new TreeMap<>(); - for (ErasureCodingPolicy policy : SYS_POLICIES) { - SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy); - } - } /** * All enabled policies maintained in NN memory for fast querying, @@ -101,9 +62,10 @@ public final class ErasureCodingPolicyManager { if (policyName.trim().isEmpty()) { continue; } - ErasureCodingPolicy ecPolicy = SYSTEM_POLICIES_BY_NAME.get(policyName); + ErasureCodingPolicy ecPolicy = + SystemErasureCodingPolicies.getByName(policyName); if (ecPolicy == null) { - String sysPolicies = Arrays.asList(SYS_POLICIES).stream() + String sysPolicies = SystemErasureCodingPolicies.getPolicies().stream() .map(ErasureCodingPolicy::getName) .collect(Collectors.joining(", ")); String msg = String.format("EC policy '%s' specified at %s is not a " + @@ -124,35 +86,6 @@ public final class ErasureCodingPolicyManager { */ } - /** - * Get system defined policies. - * @return system policies - */ - public static ErasureCodingPolicy[] getSystemPolicies() { - return SYS_POLICIES; - } - - /** - * Get a policy by policy ID. - * @return ecPolicy, or null if not found - */ - public static ErasureCodingPolicy getPolicyByID(byte id) { - for (ErasureCodingPolicy policy : SYS_POLICIES) { - if (policy.getId() == id) { - return policy; - } - } - return null; - } - - /** - * Get a policy by policy name. - * @return ecPolicy, or null if not found - */ - public static ErasureCodingPolicy getPolicyByName(String name) { - return SYSTEM_POLICIES_BY_NAME.get(name); - } - /** * Get the set of enabled policies. * @return all policies diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 50843c8706..aa9772d582 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.XAttrHelper; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; @@ -302,7 +303,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath( if (inode.isFile()) { byte id = inode.asFile().getErasureCodingPolicyID(); return id < 0 ? null : - ErasureCodingPolicyManager.getPolicyByID(id); + SystemErasureCodingPolicies.getByID(id); } // We don't allow setting EC policies on paths with a symlink. Thus // if a symlink is encountered, the dir shouldn't have EC policy. @@ -317,8 +318,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath( ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue()); DataInputStream dIn = new DataInputStream(bIn); String ecPolicyName = WritableUtils.readString(dIn); - return ErasureCodingPolicyManager - .getPolicyByName(ecPolicyName); + return SystemErasureCodingPolicies.getByName(ecPolicyName); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index eab728283b..9c89be1548 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -335,7 +336,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) { assert ((!isStriped) || (isStriped && !f.hasReplication())); Short replication = (!isStriped ? (short) f.getReplication() : null); ErasureCodingPolicy ecPolicy = isStriped ? - ErasureCodingPolicyManager.getPolicyByID( + SystemErasureCodingPolicies.getByID( (byte) f.getErasureCodingPolicyID()) : null; Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 3da6aa7c81..f35bf3ce5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -191,8 +192,8 @@ static long getBlockLayoutRedundancy(final BlockType blockType, if (blockType == STRIPED) { Preconditions.checkArgument(replication == null && erasureCodingPolicyID != null); - Preconditions.checkArgument(ErasureCodingPolicyManager - .getPolicyByID(erasureCodingPolicyID) != null, + Preconditions.checkArgument(SystemErasureCodingPolicies + .getByID(erasureCodingPolicyID) != null, "Could not find EC policy with ID 0x" + StringUtils .byteToHexString(erasureCodingPolicyID)); layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED; @@ -516,8 +517,7 @@ public short getPreferredBlockReplication() { } ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getPolicyByID( - getErasureCodingPolicyID()); + SystemErasureCodingPolicies.getByID(getErasureCodingPolicyID()); Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" + StringUtils.byteToHexString(getErasureCodingPolicyID())); return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 9b782f32f2..445e19dedc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -138,7 +139,6 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLog; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -283,8 +283,7 @@ public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) { public static void enableAllECPolicies(Configuration conf) { // Enable all the available EC policies - String policies = - Arrays.asList(ErasureCodingPolicyManager.getSystemPolicies()).stream() + String policies = SystemErasureCodingPolicies.getPolicies().stream() .map(ErasureCodingPolicy::getName) .collect(Collectors.joining(",")); conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policies); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java index 8008ed3396..1bab5dbbd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java @@ -27,13 +27,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream; import org.apache.hadoop.io.IOUtils; @@ -567,7 +566,6 @@ public static LocatedBlocks getLocatedBlocks(Path file, * @return ErasureCodingPolicy */ public static ErasureCodingPolicy getDefaultECPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_6_3_POLICY_ID); + return SystemErasureCodingPolicies.getPolicies().get(0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java index fa3e62cbfc..1d09a6c1c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests read operation of DFS striped file with RS-10-4-64k @@ -29,7 +28,7 @@ public class TestDFSRSDefault10x4StripedInputStream extends TestDFSStripedInputStream { public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_10_4_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_10_4_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java index f4dcdf735d..080033d364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests write operation of DFS striped file with RS-10-4-64k @@ -30,7 +29,7 @@ public class TestDFSRSDefault10x4StripedOutputStream @Override public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_10_4_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_10_4_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java index 27d3ccfcc3..5de9dd3434 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests write operation of DFS striped file with RS-10-4-64k @@ -30,7 +29,7 @@ public class TestDFSRSDefault10x4StripedOutputStreamWithFailure @Override public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_10_4_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_10_4_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java index de0852810a..d19093da10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedInputStream.java @@ -16,9 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests read operation of DFS striped file with XOR-2-1-64k erasure code @@ -27,7 +26,7 @@ public class TestDFSXORStripedInputStream extends TestDFSStripedInputStream{ public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java index 658c9ba95d..ba620b8cc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStream.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests write operation of DFS striped file with XOR-2-1-64k erasure code @@ -29,7 +28,7 @@ public class TestDFSXORStripedOutputStream extends TestDFSStripedOutputStream{ @Override public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java index c97644ecdc..d9ab3d5719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSXORStripedOutputStreamWithFailure.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; /** * This tests write operation of DFS striped file with XOR-2-1-64k erasure code @@ -30,7 +29,7 @@ public class TestDFSXORStripedOutputStreamWithFailure @Override public ErasureCodingPolicy getEcPolicy() { - return ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID); + return SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index 5ba4403d2e..c6f089093f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -23,9 +23,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.client.HdfsAdmin; @@ -44,8 +44,8 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.Arrays; import java.util.Collection; +import java.util.List; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.junit.Assert.*; @@ -302,12 +302,12 @@ public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Excepti @Test public void testGetErasureCodingPolicy() throws Exception { - ErasureCodingPolicy[] sysECPolicies = - ErasureCodingPolicyManager.getSystemPolicies(); + List sysECPolicies = + SystemErasureCodingPolicies.getPolicies(); assertTrue("System ecPolicies should exist", - sysECPolicies.length > 0); + sysECPolicies.size() > 0); - ErasureCodingPolicy usingECPolicy = sysECPolicies[0]; + ErasureCodingPolicy usingECPolicy = sysECPolicies.get(0); String src = "/ec2"; final Path ecDir = new Path(src); fs.mkdir(ecDir, FsPermission.getDirDefault()); @@ -353,12 +353,10 @@ public void testSetInvalidPolicy() @Test public void testGetAllErasureCodingPolicies() throws Exception { - ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager - .getSystemPolicies(); Collection allECPolicies = fs .getAllErasureCodingPolicies(); assertTrue("All system policies should be enabled", - allECPolicies.containsAll(Arrays.asList(sysECPolicies))); + allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies())); } @Test @@ -383,9 +381,9 @@ public void testGetErasureCodingPolicyOnANonExistentFile() throws Exception { @Test public void testMultiplePoliciesCoExist() throws Exception { - ErasureCodingPolicy[] sysPolicies = - ErasureCodingPolicyManager.getSystemPolicies(); - if (sysPolicies.length > 1) { + List sysPolicies = + SystemErasureCodingPolicies.getPolicies(); + if (sysPolicies.size() > 1) { for (ErasureCodingPolicy policy : sysPolicies) { Path dir = new Path("/policy_" + policy.getId()); fs.mkdir(dir, FsPermission.getDefault()); @@ -425,8 +423,8 @@ public HdfsAdmin run() throws Exception { Path ecfile = new Path(ecdir, "ecfile"); fs.setPermission(new Path("/"), new FsPermission((short)0777)); userfs.mkdirs(ecdir); - final String ecPolicyName = - ErasureCodingPolicyManager.getSystemPolicies()[0].getName(); + final String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy() + .getName(); useradmin.setErasureCodingPolicy(ecdir, ecPolicyName); assertEquals("Policy not present on dir", ecPolicyName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index 32f6bc8013..cc2453bbd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -21,9 +21,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; @@ -138,8 +137,8 @@ public void testNestedEcPolicy() throws Exception { final Path ec63FilePath = new Path(childDir, "ec_6_3_file"); final Path ec32FilePath = new Path(childDir, "ec_3_2_file"); final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2"); - final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager - .getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); + final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies + .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); fs.mkdirs(parentDir); fs.setErasureCodingPolicy(parentDir, ecPolicy.getName()); @@ -236,8 +235,8 @@ public void testChangeRootDirEcPolicy() throws Exception { final Path rootPath = new Path("/"); final Path ec63FilePath = new Path(rootPath, "ec_6_3_file"); final Path ec32FilePath = new Path(rootPath, "ec_3_2_file"); - final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager - .getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); + final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies + .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); fs.unsetErasureCodingPolicy(rootPath); fs.setErasureCodingPolicy(rootPath, ecPolicy.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index e34837c7b7..579252bd38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -80,7 +80,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock; @@ -803,8 +802,7 @@ public void testRURReplicas() throws Exception { @Test(timeout=60000) public void testSafeLength() throws Exception { // hard coded policy to work with hard coded test suite - ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager - .getSystemPolicies()[0]; + ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy(); RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy); BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java index 4b4b196a06..e35fa119ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEnabledECPolicies.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.StripedFileTestUtil; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; @@ -41,9 +42,6 @@ */ public class TestEnabledECPolicies { - private static final ErasureCodingPolicy[] SYSTEM_POLICIES = - ErasureCodingPolicyManager.getSystemPolicies(); - @Rule public Timeout testTimeout = new Timeout(60000); @@ -112,13 +110,16 @@ public void testGetPolicies() throws Exception { testGetPolicies(enabledPolicies); // Enable one policy - enabledPolicies = new ErasureCodingPolicy[] - {SYSTEM_POLICIES[1]}; + enabledPolicies = new ErasureCodingPolicy[]{ + SystemErasureCodingPolicies.getPolicies().get(1) + }; testGetPolicies(enabledPolicies); // Enable two policies - enabledPolicies = new ErasureCodingPolicy[] - {SYSTEM_POLICIES[1], SYSTEM_POLICIES[2]}; + enabledPolicies = new ErasureCodingPolicy[]{ + SystemErasureCodingPolicies.getPolicies().get(1), + SystemErasureCodingPolicies.getPolicies().get(2) + }; testGetPolicies(enabledPolicies); } @@ -145,7 +146,7 @@ private void testGetPolicies(ErasureCodingPolicy[] enabledPolicies) } Assert.assertEquals(enabledPolicies.length, found.size()); // Check that getEnabledPolicyByName only returns enabled policies - for (ErasureCodingPolicy p: SYSTEM_POLICIES) { + for (ErasureCodingPolicy p: SystemErasureCodingPolicies.getPolicies()) { if (found.contains(p.getName())) { // Enabled policy should be present Assert.assertNotNull( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index e1861643f3..22c40fb2e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; @@ -77,8 +78,8 @@ public class TestFSImage { private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ = "image-with-zero-block-size.tar.gz"; private static final ErasureCodingPolicy testECPolicy = - ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_10_4_POLICY_ID); + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_10_4_POLICY_ID); @Test public void testPersist() throws IOException { @@ -470,8 +471,8 @@ public void testSupportBlockGroup() throws Exception { DistributedFileSystem fs = cluster.getFileSystem(); Path parentDir = new Path("/ec-10-4"); Path childDir = new Path(parentDir, "ec-3-2"); - ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager - .getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); + ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies + .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); // Create directories and files fs.mkdirs(parentDir); @@ -519,8 +520,8 @@ public void testSupportBlockGroup() throws Exception { // check the information of file_3_2 inode = fsn.dir.getINode(file_3_2.toString()).asFile(); assertTrue(inode.isStriped()); - assertEquals(ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.RS_3_2_POLICY_ID).getId(), + assertEquals(SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID()); blks = inode.getBlocks(); assertEquals(1, blks.length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index ced6c7fc67..2246357343 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -107,7 +108,9 @@ INodeFile createINodeFile(short replication, long preferredBlockSize) { INodeFile createStripedINodeFile(long preferredBlockSize) { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, null, HdfsConstants.RS_6_3_POLICY_ID, preferredBlockSize, + null, null, + StripedFileTestUtil.getDefaultECPolicy().getId(), + preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED); } @@ -140,7 +143,7 @@ public void testContiguousLayoutRedundancy() { try { new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/, - HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/, + StripedFileTestUtil.getDefaultECPolicy().getId() /*ec policy*/, preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS); fail("INodeFile construction should fail when both replication and " + "ECPolicy requested!"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 2c6390011a..d2f41bec28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -74,14 +74,14 @@ public class TestStripedINodeFile { // use hard coded policy - see HDFS-9816 private static final ErasureCodingPolicy testECPolicy - = ErasureCodingPolicyManager.getSystemPolicies()[0]; + = StripedFileTestUtil.getDefaultECPolicy(); @Rule public Timeout globalTimeout = new Timeout(300000); private static INodeFile createStripedINodeFile() { return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, null, HdfsConstants.RS_6_3_POLICY_ID, 1024L, + null, null, StripedFileTestUtil.getDefaultECPolicy().getId(), 1024L, HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED); } @@ -118,7 +118,7 @@ public void testStripedLayoutRedundancy() { try { new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/, - HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/, + StripedFileTestUtil.getDefaultECPolicy().getId() /*ec policy*/, 1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED); fail("INodeFile construction should fail when both replication and " + "ECPolicy requested!"); @@ -147,7 +147,7 @@ public void testStripedLayoutRedundancy() { LOG.info("Expected exception: ", iae); } - final Byte ecPolicyID = HdfsConstants.RS_6_3_POLICY_ID; + final Byte ecPolicyID = StripedFileTestUtil.getDefaultECPolicy().getId(); try { new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 0656249cfc..7182071a41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -78,9 +78,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; @@ -124,9 +123,8 @@ public static void createOriginalFSImage() throws IOException { tempDir = Files.createTempDir(); MiniDFSCluster cluster = null; try { - final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID); + final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies + .getByID(SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); Configuration conf = new Configuration(); conf.setLong( @@ -412,8 +410,7 @@ public void endElement(String uri, String localName, String qName) Assert.assertEquals("INode '" + currentInodeName + "' has unexpected EC Policy!", Byte.parseByte(currentECPolicy), - ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID).getId()); + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID); Assert.assertEquals("INode '" + currentInodeName + "' has unexpected replication!", currentRepl, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java index c1728ac97f..5de63eb4eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java @@ -21,6 +21,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -28,7 +29,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.junit.Before; import org.junit.Rule; @@ -81,7 +81,7 @@ public class TestStripedBlockUtil { // use hard coded policy - see HDFS-9816 private final ErasureCodingPolicy ecPolicy = - ErasureCodingPolicyManager.getSystemPolicies()[0]; + StripedFileTestUtil.getDefaultECPolicy(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final short groupSize = (short) (dataBlocks + parityBlocks); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 46a8a56f1c..60d90fb37b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -81,12 +81,12 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; -import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; @@ -520,8 +520,8 @@ public void testWebHdfsErasureCodingFiles() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID).getName()); + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName()); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); @@ -532,8 +532,8 @@ public void testWebHdfsErasureCodingFiles() throws Exception { final Path ecDir = new Path("/ec"); dfs.mkdirs(ecDir); dfs.setErasureCodingPolicy(ecDir, - ErasureCodingPolicyManager.getPolicyByID( - HdfsConstants.XOR_2_1_POLICY_ID).getName()); + SystemErasureCodingPolicies.getByID( + SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName()); final Path ecFile = new Path(ecDir, "ec-file.log"); DFSTestUtil.createFile(dfs, ecFile, 1024 * 10, (short) 1, 0xFEED);