HDFS-11623. Move system erasure coding policies into hadoop-hdfs-client.

This commit is contained in:
Andrew Wang 2017-04-07 16:46:28 -07:00
parent 203edc026c
commit e8bdad7385
25 changed files with 212 additions and 176 deletions

View File

@ -144,12 +144,6 @@ public enum DatanodeReportType {
ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
} }
public static final byte RS_6_3_POLICY_ID = 1;
public static final byte RS_3_2_POLICY_ID = 2;
public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
public static final byte XOR_2_1_POLICY_ID = 4;
public static final byte RS_10_4_POLICY_ID = 5;
/* Hidden constructor */ /* Hidden constructor */
protected HdfsConstants() { protected HdfsConstants() {
} }

View File

@ -0,0 +1,121 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
/**
* <p>The set of built-in erasure coding policies.</p>
* <p>Although this is a private class, EC policy IDs need to be treated like a
* stable interface. Adding, modifying, or removing built-in policies can cause
* inconsistencies with older clients.</p>
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public final class SystemErasureCodingPolicies {
// Private constructor, this is a utility class.
private SystemErasureCodingPolicies() {}
// 64 KB
private static final int DEFAULT_CELLSIZE = 64 * 1024;
public static final byte RS_6_3_POLICY_ID = 1;
private static final ErasureCodingPolicy SYS_POLICY1 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA,
DEFAULT_CELLSIZE, RS_6_3_POLICY_ID);
public static final byte RS_3_2_POLICY_ID = 2;
private static final ErasureCodingPolicy SYS_POLICY2 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA,
DEFAULT_CELLSIZE, RS_3_2_POLICY_ID);
public static final byte RS_6_3_LEGACY_POLICY_ID = 3;
private static final ErasureCodingPolicy SYS_POLICY3 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
DEFAULT_CELLSIZE, RS_6_3_LEGACY_POLICY_ID);
public static final byte XOR_2_1_POLICY_ID = 4;
private static final ErasureCodingPolicy SYS_POLICY4 =
new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
DEFAULT_CELLSIZE, XOR_2_1_POLICY_ID);
public static final byte RS_10_4_POLICY_ID = 5;
private static final ErasureCodingPolicy SYS_POLICY5 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
DEFAULT_CELLSIZE, RS_10_4_POLICY_ID);
private static final List<ErasureCodingPolicy> SYS_POLICIES =
Collections.unmodifiableList(Arrays.asList(
SYS_POLICY1, SYS_POLICY2, SYS_POLICY3, SYS_POLICY4,
SYS_POLICY5));
/**
* System policies sorted by name for fast querying.
*/
private static final Map<String, ErasureCodingPolicy> SYSTEM_POLICIES_BY_NAME;
/**
* System policies sorted by ID for fast querying.
*/
private static final Map<Byte, ErasureCodingPolicy> SYSTEM_POLICIES_BY_ID;
/**
* Populate the lookup maps in a static block.
*/
static {
SYSTEM_POLICIES_BY_NAME = new TreeMap<>();
SYSTEM_POLICIES_BY_ID = new TreeMap<>();
for (ErasureCodingPolicy policy : SYS_POLICIES) {
SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy);
SYSTEM_POLICIES_BY_ID.put(policy.getId(), policy);
}
}
/**
* Get system defined policies.
* @return system policies
*/
public static List<ErasureCodingPolicy> getPolicies() {
return SYS_POLICIES;
}
/**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByID(byte id) {
return SYSTEM_POLICIES_BY_ID.get(id);
}
/**
* Get a policy by policy name.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getByName(String name) {
return SYSTEM_POLICIES_BY_NAME.get(name);
}
}

View File

@ -31,9 +31,8 @@
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.junit.Test; import org.junit.Test;
import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.FrameworkMethod;
import org.junit.runners.model.Statement; import org.junit.runners.model.Statement;
@ -142,7 +141,7 @@ public static Configuration getHdfsConf() {
public static final Path ERASURE_CODING_DIR = new Path("/ec"); public static final Path ERASURE_CODING_DIR = new Path("/ec");
public static final Path ERASURE_CODING_FILE = new Path("/ec/ecfile"); public static final Path ERASURE_CODING_FILE = new Path("/ec/ecfile");
public static final ErasureCodingPolicy ERASURE_CODING_POLICY = public static final ErasureCodingPolicy ERASURE_CODING_POLICY =
ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID); StripedFileTestUtil.getDefaultECPolicy();
private static MiniDFSCluster MINI_DFS = null; private static MiniDFSCluster MINI_DFS = null;

View File

@ -20,11 +20,10 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
import java.util.Arrays;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -40,50 +39,12 @@
@InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceAudience.LimitedPrivate({"HDFS"})
public final class ErasureCodingPolicyManager { public final class ErasureCodingPolicyManager {
/**
* TODO: HDFS-8095.
*/
private static final int DEFAULT_CELLSIZE = 64 * 1024;
private static final ErasureCodingPolicy SYS_POLICY1 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_POLICY_ID);
private static final ErasureCodingPolicy SYS_POLICY2 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_3_2_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_3_2_POLICY_ID);
private static final ErasureCodingPolicy SYS_POLICY3 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
private static final ErasureCodingPolicy SYS_POLICY4 =
new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID);
private static final ErasureCodingPolicy SYS_POLICY5 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_10_4_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_10_4_POLICY_ID);
//We may add more later.
private static final ErasureCodingPolicy[] SYS_POLICIES =
new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3,
SYS_POLICY4, SYS_POLICY5};
// Supported storage policies for striped EC files // Supported storage policies for striped EC files
private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE =
new byte[]{ new byte[]{
HdfsConstants.HOT_STORAGE_POLICY_ID, HdfsConstants.HOT_STORAGE_POLICY_ID,
HdfsConstants.COLD_STORAGE_POLICY_ID, HdfsConstants.COLD_STORAGE_POLICY_ID,
HdfsConstants.ALLSSD_STORAGE_POLICY_ID}; HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
/**
* All supported policies maintained in NN memory for fast querying,
* identified and sorted by its name.
*/
private static final Map<String, ErasureCodingPolicy> SYSTEM_POLICIES_BY_NAME;
static {
// Create a hashmap of all available policies for quick lookup by name
SYSTEM_POLICIES_BY_NAME = new TreeMap<>();
for (ErasureCodingPolicy policy : SYS_POLICIES) {
SYSTEM_POLICIES_BY_NAME.put(policy.getName(), policy);
}
}
/** /**
* All enabled policies maintained in NN memory for fast querying, * All enabled policies maintained in NN memory for fast querying,
@ -101,9 +62,10 @@ public final class ErasureCodingPolicyManager {
if (policyName.trim().isEmpty()) { if (policyName.trim().isEmpty()) {
continue; continue;
} }
ErasureCodingPolicy ecPolicy = SYSTEM_POLICIES_BY_NAME.get(policyName); ErasureCodingPolicy ecPolicy =
SystemErasureCodingPolicies.getByName(policyName);
if (ecPolicy == null) { if (ecPolicy == null) {
String sysPolicies = Arrays.asList(SYS_POLICIES).stream() String sysPolicies = SystemErasureCodingPolicies.getPolicies().stream()
.map(ErasureCodingPolicy::getName) .map(ErasureCodingPolicy::getName)
.collect(Collectors.joining(", ")); .collect(Collectors.joining(", "));
String msg = String.format("EC policy '%s' specified at %s is not a " + String msg = String.format("EC policy '%s' specified at %s is not a " +
@ -124,35 +86,6 @@ public final class ErasureCodingPolicyManager {
*/ */
} }
/**
* Get system defined policies.
* @return system policies
*/
public static ErasureCodingPolicy[] getSystemPolicies() {
return SYS_POLICIES;
}
/**
* Get a policy by policy ID.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getPolicyByID(byte id) {
for (ErasureCodingPolicy policy : SYS_POLICIES) {
if (policy.getId() == id) {
return policy;
}
}
return null;
}
/**
* Get a policy by policy name.
* @return ecPolicy, or null if not found
*/
public static ErasureCodingPolicy getPolicyByName(String name) {
return SYSTEM_POLICIES_BY_NAME.get(name);
}
/** /**
* Get the set of enabled policies. * Get the set of enabled policies.
* @return all policies * @return all policies

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
@ -302,7 +303,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
if (inode.isFile()) { if (inode.isFile()) {
byte id = inode.asFile().getErasureCodingPolicyID(); byte id = inode.asFile().getErasureCodingPolicyID();
return id < 0 ? null : return id < 0 ? null :
ErasureCodingPolicyManager.getPolicyByID(id); SystemErasureCodingPolicies.getByID(id);
} }
// We don't allow setting EC policies on paths with a symlink. Thus // We don't allow setting EC policies on paths with a symlink. Thus
// if a symlink is encountered, the dir shouldn't have EC policy. // if a symlink is encountered, the dir shouldn't have EC policy.
@ -317,8 +318,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue()); ByteArrayInputStream bIn = new ByteArrayInputStream(xattr.getValue());
DataInputStream dIn = new DataInputStream(bIn); DataInputStream dIn = new DataInputStream(bIn);
String ecPolicyName = WritableUtils.readString(dIn); String ecPolicyName = WritableUtils.readString(dIn);
return ErasureCodingPolicyManager return SystemErasureCodingPolicies.getByName(ecPolicyName);
.getPolicyByName(ecPolicyName);
} }
} }
} }

View File

@ -39,6 +39,7 @@
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@ -335,7 +336,7 @@ private INodeFile loadINodeFile(INodeSection.INode n) {
assert ((!isStriped) || (isStriped && !f.hasReplication())); assert ((!isStriped) || (isStriped && !f.hasReplication()));
Short replication = (!isStriped ? (short) f.getReplication() : null); Short replication = (!isStriped ? (short) f.getReplication() : null);
ErasureCodingPolicy ecPolicy = isStriped ? ErasureCodingPolicy ecPolicy = isStriped ?
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.getByID(
(byte) f.getErasureCodingPolicyID()) : null; (byte) f.getErasureCodingPolicyID()) : null;
Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null); Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@ -191,8 +192,8 @@ static long getBlockLayoutRedundancy(final BlockType blockType,
if (blockType == STRIPED) { if (blockType == STRIPED) {
Preconditions.checkArgument(replication == null && Preconditions.checkArgument(replication == null &&
erasureCodingPolicyID != null); erasureCodingPolicyID != null);
Preconditions.checkArgument(ErasureCodingPolicyManager Preconditions.checkArgument(SystemErasureCodingPolicies
.getPolicyByID(erasureCodingPolicyID) != null, .getByID(erasureCodingPolicyID) != null,
"Could not find EC policy with ID 0x" + StringUtils "Could not find EC policy with ID 0x" + StringUtils
.byteToHexString(erasureCodingPolicyID)); .byteToHexString(erasureCodingPolicyID));
layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED; layoutRedundancy |= BLOCK_TYPE_MASK_STRIPED;
@ -516,8 +517,7 @@ public short getPreferredBlockReplication() {
} }
ErasureCodingPolicy ecPolicy = ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.getByID(getErasureCodingPolicyID());
getErasureCodingPolicyID());
Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x" Preconditions.checkNotNull(ecPolicy, "Could not find EC policy with ID 0x"
+ StringUtils.byteToHexString(getErasureCodingPolicyID())); + StringUtils.byteToHexString(getErasureCodingPolicyID()));
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()); return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());

View File

@ -113,6 +113,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -138,7 +139,6 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw; import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -283,8 +283,7 @@ public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
public static void enableAllECPolicies(Configuration conf) { public static void enableAllECPolicies(Configuration conf) {
// Enable all the available EC policies // Enable all the available EC policies
String policies = String policies = SystemErasureCodingPolicies.getPolicies().stream()
Arrays.asList(ErasureCodingPolicyManager.getSystemPolicies()).stream()
.map(ErasureCodingPolicy::getName) .map(ErasureCodingPolicy::getName)
.collect(Collectors.joining(",")); .collect(Collectors.joining(","));
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policies); conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policies);

View File

@ -27,13 +27,12 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil; import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -567,7 +566,6 @@ public static LocatedBlocks getLocatedBlocks(Path file,
* @return ErasureCodingPolicy * @return ErasureCodingPolicy
*/ */
public static ErasureCodingPolicy getDefaultECPolicy() { public static ErasureCodingPolicy getDefaultECPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getPolicies().get(0);
HdfsConstants.RS_6_3_POLICY_ID);
} }
} }

View File

@ -17,9 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests read operation of DFS striped file with RS-10-4-64k * This tests read operation of DFS striped file with RS-10-4-64k
@ -29,7 +28,7 @@ public class TestDFSRSDefault10x4StripedInputStream extends
TestDFSStripedInputStream { TestDFSStripedInputStream {
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.RS_10_4_POLICY_ID); SystemErasureCodingPolicies.RS_10_4_POLICY_ID);
} }
} }

View File

@ -17,9 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests write operation of DFS striped file with RS-10-4-64k * This tests write operation of DFS striped file with RS-10-4-64k
@ -30,7 +29,7 @@ public class TestDFSRSDefault10x4StripedOutputStream
@Override @Override
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.RS_10_4_POLICY_ID); SystemErasureCodingPolicies.RS_10_4_POLICY_ID);
} }
} }

View File

@ -17,9 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests write operation of DFS striped file with RS-10-4-64k * This tests write operation of DFS striped file with RS-10-4-64k
@ -30,7 +29,7 @@ public class TestDFSRSDefault10x4StripedOutputStreamWithFailure
@Override @Override
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.RS_10_4_POLICY_ID); SystemErasureCodingPolicies.RS_10_4_POLICY_ID);
} }
} }

View File

@ -16,9 +16,8 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests read operation of DFS striped file with XOR-2-1-64k erasure code * This tests read operation of DFS striped file with XOR-2-1-64k erasure code
@ -27,7 +26,7 @@
public class TestDFSXORStripedInputStream extends TestDFSStripedInputStream{ public class TestDFSXORStripedInputStream extends TestDFSStripedInputStream{
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.XOR_2_1_POLICY_ID); SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
} }
} }

View File

@ -17,9 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests write operation of DFS striped file with XOR-2-1-64k erasure code * This tests write operation of DFS striped file with XOR-2-1-64k erasure code
@ -29,7 +28,7 @@ public class TestDFSXORStripedOutputStream extends TestDFSStripedOutputStream{
@Override @Override
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.XOR_2_1_POLICY_ID); SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
} }
} }

View File

@ -17,9 +17,8 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/** /**
* This tests write operation of DFS striped file with XOR-2-1-64k erasure code * This tests write operation of DFS striped file with XOR-2-1-64k erasure code
@ -30,7 +29,7 @@ public class TestDFSXORStripedOutputStreamWithFailure
@Override @Override
public ErasureCodingPolicy getEcPolicy() { public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByID( return SystemErasureCodingPolicies.getByID(
HdfsConstants.XOR_2_1_POLICY_ID); SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
} }
} }

View File

@ -23,9 +23,9 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
@ -44,8 +44,8 @@
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.List;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.*; import static org.junit.Assert.*;
@ -302,12 +302,12 @@ public void testGetErasureCodingPolicyWithSystemDefaultECPolicy() throws Excepti
@Test @Test
public void testGetErasureCodingPolicy() throws Exception { public void testGetErasureCodingPolicy() throws Exception {
ErasureCodingPolicy[] sysECPolicies = List<ErasureCodingPolicy> sysECPolicies =
ErasureCodingPolicyManager.getSystemPolicies(); SystemErasureCodingPolicies.getPolicies();
assertTrue("System ecPolicies should exist", assertTrue("System ecPolicies should exist",
sysECPolicies.length > 0); sysECPolicies.size() > 0);
ErasureCodingPolicy usingECPolicy = sysECPolicies[0]; ErasureCodingPolicy usingECPolicy = sysECPolicies.get(0);
String src = "/ec2"; String src = "/ec2";
final Path ecDir = new Path(src); final Path ecDir = new Path(src);
fs.mkdir(ecDir, FsPermission.getDirDefault()); fs.mkdir(ecDir, FsPermission.getDirDefault());
@ -353,12 +353,10 @@ public void testSetInvalidPolicy()
@Test @Test
public void testGetAllErasureCodingPolicies() throws Exception { public void testGetAllErasureCodingPolicies() throws Exception {
ErasureCodingPolicy[] sysECPolicies = ErasureCodingPolicyManager
.getSystemPolicies();
Collection<ErasureCodingPolicy> allECPolicies = fs Collection<ErasureCodingPolicy> allECPolicies = fs
.getAllErasureCodingPolicies(); .getAllErasureCodingPolicies();
assertTrue("All system policies should be enabled", assertTrue("All system policies should be enabled",
allECPolicies.containsAll(Arrays.asList(sysECPolicies))); allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies()));
} }
@Test @Test
@ -383,9 +381,9 @@ public void testGetErasureCodingPolicyOnANonExistentFile() throws Exception {
@Test @Test
public void testMultiplePoliciesCoExist() throws Exception { public void testMultiplePoliciesCoExist() throws Exception {
ErasureCodingPolicy[] sysPolicies = List<ErasureCodingPolicy> sysPolicies =
ErasureCodingPolicyManager.getSystemPolicies(); SystemErasureCodingPolicies.getPolicies();
if (sysPolicies.length > 1) { if (sysPolicies.size() > 1) {
for (ErasureCodingPolicy policy : sysPolicies) { for (ErasureCodingPolicy policy : sysPolicies) {
Path dir = new Path("/policy_" + policy.getId()); Path dir = new Path("/policy_" + policy.getId());
fs.mkdir(dir, FsPermission.getDefault()); fs.mkdir(dir, FsPermission.getDefault());
@ -425,8 +423,8 @@ public HdfsAdmin run() throws Exception {
Path ecfile = new Path(ecdir, "ecfile"); Path ecfile = new Path(ecdir, "ecfile");
fs.setPermission(new Path("/"), new FsPermission((short)0777)); fs.setPermission(new Path("/"), new FsPermission((short)0777));
userfs.mkdirs(ecdir); userfs.mkdirs(ecdir);
final String ecPolicyName = final String ecPolicyName = StripedFileTestUtil.getDefaultECPolicy()
ErasureCodingPolicyManager.getSystemPolicies()[0].getName(); .getName();
useradmin.setErasureCodingPolicy(ecdir, ecPolicyName); useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
assertEquals("Policy not present on dir", assertEquals("Policy not present on dir",
ecPolicyName, ecPolicyName,

View File

@ -21,9 +21,8 @@
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.io.erasurecode.CodecUtil; import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeNative; import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory; import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
@ -138,8 +137,8 @@ public void testNestedEcPolicy() throws Exception {
final Path ec63FilePath = new Path(childDir, "ec_6_3_file"); final Path ec63FilePath = new Path(childDir, "ec_6_3_file");
final Path ec32FilePath = new Path(childDir, "ec_3_2_file"); final Path ec32FilePath = new Path(childDir, "ec_3_2_file");
final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2"); final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2");
final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
fs.mkdirs(parentDir); fs.mkdirs(parentDir);
fs.setErasureCodingPolicy(parentDir, ecPolicy.getName()); fs.setErasureCodingPolicy(parentDir, ecPolicy.getName());
@ -236,8 +235,8 @@ public void testChangeRootDirEcPolicy() throws Exception {
final Path rootPath = new Path("/"); final Path rootPath = new Path("/");
final Path ec63FilePath = new Path(rootPath, "ec_6_3_file"); final Path ec63FilePath = new Path(rootPath, "ec_6_3_file");
final Path ec32FilePath = new Path(rootPath, "ec_3_2_file"); final Path ec32FilePath = new Path(rootPath, "ec_3_2_file");
final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
fs.unsetErasureCodingPolicy(rootPath); fs.unsetErasureCodingPolicy(rootPath);
fs.setErasureCodingPolicy(rootPath, ecPolicy.getName()); fs.setErasureCodingPolicy(rootPath, ecPolicy.getName());

View File

@ -80,7 +80,6 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord; import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
@ -803,8 +802,7 @@ public void testRURReplicas() throws Exception {
@Test(timeout=60000) @Test(timeout=60000)
public void testSafeLength() throws Exception { public void testSafeLength() throws Exception {
// hard coded policy to work with hard coded test suite // hard coded policy to work with hard coded test suite
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy();
.getSystemPolicies()[0];
RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock,
new byte[9], ecPolicy); new byte[9], ecPolicy);
BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn); BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert; import org.junit.Assert;
@ -41,9 +42,6 @@
*/ */
public class TestEnabledECPolicies { public class TestEnabledECPolicies {
private static final ErasureCodingPolicy[] SYSTEM_POLICIES =
ErasureCodingPolicyManager.getSystemPolicies();
@Rule @Rule
public Timeout testTimeout = new Timeout(60000); public Timeout testTimeout = new Timeout(60000);
@ -112,13 +110,16 @@ public void testGetPolicies() throws Exception {
testGetPolicies(enabledPolicies); testGetPolicies(enabledPolicies);
// Enable one policy // Enable one policy
enabledPolicies = new ErasureCodingPolicy[] enabledPolicies = new ErasureCodingPolicy[]{
{SYSTEM_POLICIES[1]}; SystemErasureCodingPolicies.getPolicies().get(1)
};
testGetPolicies(enabledPolicies); testGetPolicies(enabledPolicies);
// Enable two policies // Enable two policies
enabledPolicies = new ErasureCodingPolicy[] enabledPolicies = new ErasureCodingPolicy[]{
{SYSTEM_POLICIES[1], SYSTEM_POLICIES[2]}; SystemErasureCodingPolicies.getPolicies().get(1),
SystemErasureCodingPolicies.getPolicies().get(2)
};
testGetPolicies(enabledPolicies); testGetPolicies(enabledPolicies);
} }
@ -145,7 +146,7 @@ private void testGetPolicies(ErasureCodingPolicy[] enabledPolicies)
} }
Assert.assertEquals(enabledPolicies.length, found.size()); Assert.assertEquals(enabledPolicies.length, found.size());
// Check that getEnabledPolicyByName only returns enabled policies // Check that getEnabledPolicyByName only returns enabled policies
for (ErasureCodingPolicy p: SYSTEM_POLICIES) { for (ErasureCodingPolicy p: SystemErasureCodingPolicies.getPolicies()) {
if (found.contains(p.getName())) { if (found.contains(p.getName())) {
// Enabled policy should be present // Enabled policy should be present
Assert.assertNotNull( Assert.assertNotNull(

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -77,8 +78,8 @@ public class TestFSImage {
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ = private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
"image-with-zero-block-size.tar.gz"; "image-with-zero-block-size.tar.gz";
private static final ErasureCodingPolicy testECPolicy = private static final ErasureCodingPolicy testECPolicy =
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.getByID(
HdfsConstants.RS_10_4_POLICY_ID); SystemErasureCodingPolicies.RS_10_4_POLICY_ID);
@Test @Test
public void testPersist() throws IOException { public void testPersist() throws IOException {
@ -470,8 +471,8 @@ public void testSupportBlockGroup() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
Path parentDir = new Path("/ec-10-4"); Path parentDir = new Path("/ec-10-4");
Path childDir = new Path(parentDir, "ec-3-2"); Path childDir = new Path(parentDir, "ec-3-2");
ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID); .getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
// Create directories and files // Create directories and files
fs.mkdirs(parentDir); fs.mkdirs(parentDir);
@ -519,8 +520,8 @@ public void testSupportBlockGroup() throws Exception {
// check the information of file_3_2 // check the information of file_3_2
inode = fsn.dir.getINode(file_3_2.toString()).asFile(); inode = fsn.dir.getINode(file_3_2.toString()).asFile();
assertTrue(inode.isStriped()); assertTrue(inode.isStriped());
assertEquals(ErasureCodingPolicyManager.getPolicyByID( assertEquals(SystemErasureCodingPolicies.getByID(
HdfsConstants.RS_3_2_POLICY_ID).getId(), SystemErasureCodingPolicies.RS_3_2_POLICY_ID).getId(),
inode.getErasureCodingPolicyID()); inode.getErasureCodingPolicyID());
blks = inode.getBlocks(); blks = inode.getBlocks();
assertEquals(1, blks.length); assertEquals(1, blks.length);

View File

@ -57,6 +57,7 @@
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -107,7 +108,9 @@ INodeFile createINodeFile(short replication, long preferredBlockSize) {
INodeFile createStripedINodeFile(long preferredBlockSize) { INodeFile createStripedINodeFile(long preferredBlockSize) {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, null, HdfsConstants.RS_6_3_POLICY_ID, preferredBlockSize, null, null,
StripedFileTestUtil.getDefaultECPolicy().getId(),
preferredBlockSize,
HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED); HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
} }
@ -140,7 +143,7 @@ public void testContiguousLayoutRedundancy() {
try { try {
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/, null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/,
HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/, StripedFileTestUtil.getDefaultECPolicy().getId() /*ec policy*/,
preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS); preferredBlockSize, HdfsConstants.WARM_STORAGE_POLICY_ID, CONTIGUOUS);
fail("INodeFile construction should fail when both replication and " + fail("INodeFile construction should fail when both replication and " +
"ECPolicy requested!"); "ECPolicy requested!");

View File

@ -74,14 +74,14 @@ public class TestStripedINodeFile {
// use hard coded policy - see HDFS-9816 // use hard coded policy - see HDFS-9816
private static final ErasureCodingPolicy testECPolicy private static final ErasureCodingPolicy testECPolicy
= ErasureCodingPolicyManager.getSystemPolicies()[0]; = StripedFileTestUtil.getDefaultECPolicy();
@Rule @Rule
public Timeout globalTimeout = new Timeout(300000); public Timeout globalTimeout = new Timeout(300000);
private static INodeFile createStripedINodeFile() { private static INodeFile createStripedINodeFile() {
return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, return new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
null, null, HdfsConstants.RS_6_3_POLICY_ID, 1024L, null, null, StripedFileTestUtil.getDefaultECPolicy().getId(), 1024L,
HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED); HdfsConstants.COLD_STORAGE_POLICY_ID, BlockType.STRIPED);
} }
@ -118,7 +118,7 @@ public void testStripedLayoutRedundancy() {
try { try {
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/, null, perm, 0L, 0L, null, new Short((short) 3) /*replication*/,
HdfsConstants.RS_6_3_POLICY_ID /*ec policy*/, StripedFileTestUtil.getDefaultECPolicy().getId() /*ec policy*/,
1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED); 1024L, HdfsConstants.WARM_STORAGE_POLICY_ID, STRIPED);
fail("INodeFile construction should fail when both replication and " + fail("INodeFile construction should fail when both replication and " +
"ECPolicy requested!"); "ECPolicy requested!");
@ -147,7 +147,7 @@ public void testStripedLayoutRedundancy() {
LOG.info("Expected exception: ", iae); LOG.info("Expected exception: ", iae);
} }
final Byte ecPolicyID = HdfsConstants.RS_6_3_POLICY_ID; final Byte ecPolicyID = StripedFileTestUtil.getDefaultECPolicy().getId();
try { try {
new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID, null, perm, 0L, 0L, null, null /*replication*/, ecPolicyID,

View File

@ -78,9 +78,8 @@
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
@ -124,9 +123,8 @@ public static void createOriginalFSImage() throws IOException {
tempDir = Files.createTempDir(); tempDir = Files.createTempDir();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
final ErasureCodingPolicy ecPolicy = final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
ErasureCodingPolicyManager.getPolicyByID( .getByID(SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
HdfsConstants.XOR_2_1_POLICY_ID);
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong( conf.setLong(
@ -412,8 +410,7 @@ public void endElement(String uri, String localName, String qName)
Assert.assertEquals("INode '" Assert.assertEquals("INode '"
+ currentInodeName + "' has unexpected EC Policy!", + currentInodeName + "' has unexpected EC Policy!",
Byte.parseByte(currentECPolicy), Byte.parseByte(currentECPolicy),
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
HdfsConstants.XOR_2_1_POLICY_ID).getId());
Assert.assertEquals("INode '" Assert.assertEquals("INode '"
+ currentInodeName + "' has unexpected replication!", + currentInodeName + "' has unexpected replication!",
currentRepl, currentRepl,

View File

@ -21,6 +21,7 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -28,7 +29,6 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
@ -81,7 +81,7 @@
public class TestStripedBlockUtil { public class TestStripedBlockUtil {
// use hard coded policy - see HDFS-9816 // use hard coded policy - see HDFS-9816
private final ErasureCodingPolicy ecPolicy = private final ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemPolicies()[0]; StripedFileTestUtil.getDefaultECPolicy();
private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
private final short groupSize = (short) (dataBlocks + parityBlocks); private final short groupSize = (short) (dataBlocks + parityBlocks);

View File

@ -81,12 +81,12 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -520,8 +520,8 @@ public void testWebHdfsErasureCodingFiles() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf(); final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.getByID(
HdfsConstants.XOR_2_1_POLICY_ID).getName()); SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName());
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive(); cluster.waitActive();
@ -532,8 +532,8 @@ public void testWebHdfsErasureCodingFiles() throws Exception {
final Path ecDir = new Path("/ec"); final Path ecDir = new Path("/ec");
dfs.mkdirs(ecDir); dfs.mkdirs(ecDir);
dfs.setErasureCodingPolicy(ecDir, dfs.setErasureCodingPolicy(ecDir,
ErasureCodingPolicyManager.getPolicyByID( SystemErasureCodingPolicies.getByID(
HdfsConstants.XOR_2_1_POLICY_ID).getName()); SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName());
final Path ecFile = new Path(ecDir, "ec-file.log"); final Path ecFile = new Path(ecDir, "ec-file.log");
DFSTestUtil.createFile(dfs, ecFile, 1024 * 10, (short) 1, 0xFEED); DFSTestUtil.createFile(dfs, ecFile, 1024 * 10, (short) 1, 0xFEED);