HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. Contributed by Sammi Chen

This commit is contained in:
Kai Zheng 2016-11-30 15:52:56 +08:00
parent cfd8076f81
commit 51e6c1cc3f
11 changed files with 240 additions and 41 deletions

View File

@ -38,4 +38,7 @@ private ErasureCodeConstants() {
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema( public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
RS_LEGACY_CODEC_NAME, 6, 3); RS_LEGACY_CODEC_NAME, 6, 3);
public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
XOR_CODEC_NAME, 2, 1);
} }

View File

@ -147,6 +147,7 @@ public enum DatanodeReportType {
public static final byte RS_6_3_POLICY_ID = 0; public static final byte RS_6_3_POLICY_ID = 0;
public static final byte RS_3_2_POLICY_ID = 1; public static final byte RS_3_2_POLICY_ID = 1;
public static final byte RS_6_3_LEGACY_POLICY_ID = 2; public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
public static final byte XOR_2_1_POLICY_ID = 3;
/* Hidden constructor */ /* Hidden constructor */
protected HdfsConstants() { protected HdfsConstants() {

View File

@ -36,7 +36,7 @@
public final class ErasureCodingPolicyManager { public final class ErasureCodingPolicyManager {
/** /**
* TODO: HDFS-8095 * TODO: HDFS-8095.
*/ */
private static final int DEFAULT_CELLSIZE = 64 * 1024; private static final int DEFAULT_CELLSIZE = 64 * 1024;
private static final ErasureCodingPolicy SYS_POLICY1 = private static final ErasureCodingPolicy SYS_POLICY1 =
@ -48,10 +48,14 @@ public final class ErasureCodingPolicyManager {
private static final ErasureCodingPolicy SYS_POLICY3 = private static final ErasureCodingPolicy SYS_POLICY3 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA, new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID); DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
private static final ErasureCodingPolicy SYS_POLICY4 =
new ErasureCodingPolicy(ErasureCodeConstants.XOR_2_1_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.XOR_2_1_POLICY_ID);
//We may add more later. //We may add more later.
private static final ErasureCodingPolicy[] SYS_POLICIES = private static final ErasureCodingPolicy[] SYS_POLICIES =
new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3}; new ErasureCodingPolicy[]{SYS_POLICY1, SYS_POLICY2, SYS_POLICY3,
SYS_POLICY4};
// Supported storage policies for striped EC files // Supported storage policies for striped EC files
private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[] { private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = new byte[] {
@ -96,6 +100,19 @@ public static ErasureCodingPolicy getSystemDefaultPolicy() {
return SYS_POLICY1; return SYS_POLICY1;
} }
/**
* Get system-wide policy by policy ID.
* @return ecPolicy
*/
public static ErasureCodingPolicy getPolicyByPolicyID(byte id) {
for (ErasureCodingPolicy policy : SYS_POLICIES) {
if (policy.getId() == id) {
return policy;
}
}
return null;
}
/** /**
* Get all policies that's available to use. * Get all policies that's available to use.
* @return all policies * @return all policies
@ -141,7 +158,7 @@ public static boolean checkStoragePolicySuitableForECStripedMode(
} }
/** /**
* Clear and clean up * Clear and clean up.
*/ */
public void clear() { public void clear() {
activePoliciesByName.clear(); activePoliciesByName.clear();

View File

@ -455,9 +455,13 @@ public short getPreferredBlockReplication() {
if(!isStriped()){ if(!isStriped()){
return max; return max;
} }
// TODO support more policies based on policyId
ErasureCodingPolicy ecPolicy = ErasureCodingPolicy ecPolicy =
ErasureCodingPolicyManager.getSystemDefaultPolicy(); ErasureCodingPolicyManager.getPolicyByPolicyID(
getErasureCodingPolicyID());
if (ecPolicy == null){
ecPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits()); return (short) (ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits());
} }

View File

@ -1894,15 +1894,35 @@ public static StorageReceivedDeletedBlocks[] makeReportForReceivedBlock(
* @param numStripesPerBlk Number of striped cells in each block * @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir * @param toMkdir
*/ */
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, public static void createStripedFile(MiniDFSCluster cluster, Path file,
int numBlocks, int numStripesPerBlk, boolean toMkdir) throws Exception { Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
throws Exception {
createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
toMkdir, null);
}
/**
* Creates the metadata of a file in striped layout. This method only
* manipulates the NameNode state without injecting data to DataNode.
* You should disable periodical heartbeat before use this.
* @param file Path of the file to create
* @param dir Parent path of the file
* @param numBlocks Number of striped block groups to add to the file
* @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir
* @param ecPolicy erasure coding policy apply to created file. A null value
* means using default erasure coding policy.
*/
public static void createStripedFile(MiniDFSCluster cluster, Path file,
Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir,
ErasureCodingPolicy ecPolicy) throws Exception {
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
// If outer test already set EC policy, dir should be left as null // If outer test already set EC policy, dir should be left as null
if (toMkdir) { if (toMkdir) {
assert dir != null; assert dir != null;
dfs.mkdirs(dir); dfs.mkdirs(dir);
try { try {
dfs.getClient().setErasureCodingPolicy(dir.toString(), null); dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy);
} catch (IOException e) { } catch (IOException e) {
if (!e.getMessage().contains("non-empty directory")) { if (!e.getMessage().contains("non-empty directory")) {
throw e; throw e;

View File

@ -64,20 +64,34 @@ public class TestDFSStripedInputStream {
private DistributedFileSystem fs; private DistributedFileSystem fs;
private final Path dirPath = new Path("/striped"); private final Path dirPath = new Path("/striped");
private Path filePath = new Path(dirPath, "file"); private Path filePath = new Path(dirPath, "file");
private final ErasureCodingPolicy ecPolicy = private ErasureCodingPolicy ecPolicy;
ErasureCodingPolicyManager.getSystemDefaultPolicy(); private short dataBlocks;
private final short dataBlocks = (short) ecPolicy.getNumDataUnits(); private short parityBlocks;
private final short parityBlocks = (short) ecPolicy.getNumParityUnits(); private int cellSize;
private final int cellSize = ecPolicy.getCellSize();
private final int stripesPerBlock = 2; private final int stripesPerBlock = 2;
private final int blockSize = stripesPerBlock * cellSize; private int blockSize;
private final int blockGroupSize = dataBlocks * blockSize; private int blockGroupSize;
@Rule @Rule
public Timeout globalTimeout = new Timeout(300000); public Timeout globalTimeout = new Timeout(300000);
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
@Before @Before
public void setup() throws IOException { public void setup() throws IOException {
/*
* Initialize erasure coding policy.
*/
ecPolicy = getEcPolicy();
dataBlocks = (short) ecPolicy.getNumDataUnits();
parityBlocks = (short) ecPolicy.getNumParityUnits();
cellSize = ecPolicy.getCellSize();
blockSize = stripesPerBlock * cellSize;
blockGroupSize = dataBlocks * blockSize;
System.out.println("EC policy = " + ecPolicy);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
if (ErasureCodeNative.isNativeCodeLoaded()) { if (ErasureCodeNative.isNativeCodeLoaded()) {
@ -94,7 +108,7 @@ public void setup() throws IOException {
} }
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
fs.mkdirs(dirPath); fs.mkdirs(dirPath);
fs.getClient().setErasureCodingPolicy(dirPath.toString(), null); fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy);
} }
@After @After
@ -106,13 +120,13 @@ public void tearDown() {
} }
/** /**
* Test {@link DFSStripedInputStream#getBlockAt(long)} * Test {@link DFSStripedInputStream#getBlockAt(long)}.
*/ */
@Test @Test
public void testRefreshBlock() throws Exception { public void testRefreshBlock() throws Exception {
final int numBlocks = 4; final int numBlocks = 4;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
stripesPerBlock, false); stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
filePath.toString(), 0, blockGroupSize * numBlocks); filePath.toString(), 0, blockGroupSize * numBlocks);
final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
@ -136,7 +150,7 @@ public void testRefreshBlock() throws Exception {
public void testPread() throws Exception { public void testPread() throws Exception {
final int numBlocks = 2; final int numBlocks = 2;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
stripesPerBlock, false); stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
filePath.toString(), 0, blockGroupSize * numBlocks); filePath.toString(), 0, blockGroupSize * numBlocks);
int fileLen = blockGroupSize * numBlocks; int fileLen = blockGroupSize * numBlocks;
@ -154,7 +168,9 @@ public void testPread() throws Exception {
bg.getBlock().getBlockPoolId()); bg.getBlock().getBlockPoolId());
} }
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */ /**
* A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
*/
for (int i = 0; i < stripesPerBlock; i++) { for (int i = 0; i < stripesPerBlock; i++) {
for (int j = 0; j < dataBlocks; j++) { for (int j = 0; j < dataBlocks; j++) {
for (int k = 0; k < cellSize; k++) { for (int k = 0; k < cellSize; k++) {
@ -194,7 +210,7 @@ public void testPreadWithDNFailure() throws Exception {
final int numBlocks = 4; final int numBlocks = 4;
final int failedDNIdx = dataBlocks - 1; final int failedDNIdx = dataBlocks - 1;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
stripesPerBlock, false); stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
filePath.toString(), 0, blockGroupSize); filePath.toString(), 0, blockGroupSize);
@ -305,7 +321,7 @@ private void testStatefulRead(boolean useByteBuffer,
setup(); setup();
} }
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
stripesPerBlock, false); stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode. LocatedBlocks lbs = fs.getClient().namenode.
getBlockLocations(filePath.toString(), 0, fileSize); getBlockLocations(filePath.toString(), 0, fileSize);
@ -330,7 +346,9 @@ private void testStatefulRead(boolean useByteBuffer,
byte[] expected = new byte[fileSize]; byte[] expected = new byte[fileSize];
for (LocatedBlock bg : lbs.getLocatedBlocks()) { for (LocatedBlock bg : lbs.getLocatedBlocks()) {
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */ /**
* A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
*/
for (int i = 0; i < stripesPerBlock; i++) { for (int i = 0; i < stripesPerBlock; i++) {
for (int j = 0; j < dataBlocks; j++) { for (int j = 0; j < dataBlocks; j++) {
for (int k = 0; k < cellSize; k++) { for (int k = 0; k < cellSize; k++) {
@ -371,7 +389,7 @@ public void testStatefulReadWithDNFailure() throws Exception {
final int numBlocks = 4; final int numBlocks = 4;
final int failedDNIdx = dataBlocks - 1; final int failedDNIdx = dataBlocks - 1;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
stripesPerBlock, false); stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations( LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
filePath.toString(), 0, blockGroupSize); filePath.toString(), 0, blockGroupSize);

View File

@ -47,23 +47,36 @@ public class TestDFSStripedOutputStream {
GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL); GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
} }
private final ErasureCodingPolicy ecPolicy = private ErasureCodingPolicy ecPolicy;
ErasureCodingPolicyManager.getSystemDefaultPolicy(); private int dataBlocks;
private final int dataBlocks = ecPolicy.getNumDataUnits(); private int parityBlocks;
private final int parityBlocks = ecPolicy.getNumParityUnits();
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private DistributedFileSystem fs; private DistributedFileSystem fs;
private Configuration conf; private Configuration conf;
private final int cellSize = ecPolicy.getCellSize(); private int cellSize;
private final int stripesPerBlock = 4; private final int stripesPerBlock = 4;
private final int blockSize = cellSize * stripesPerBlock; private int blockSize;
@Rule @Rule
public Timeout globalTimeout = new Timeout(300000); public Timeout globalTimeout = new Timeout(300000);
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
@Before @Before
public void setup() throws IOException { public void setup() throws IOException {
/*
* Initialize erasure coding policy.
*/
ecPolicy = getEcPolicy();
dataBlocks = (short) ecPolicy.getNumDataUnits();
parityBlocks = (short) ecPolicy.getNumParityUnits();
cellSize = ecPolicy.getCellSize();
blockSize = stripesPerBlock * cellSize;
System.out.println("EC policy = " + ecPolicy);
int numDNs = dataBlocks + parityBlocks + 2; int numDNs = dataBlocks + parityBlocks + 2;
conf = new Configuration(); conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@ -76,7 +89,7 @@ public void setup() throws IOException {
NativeRSRawErasureCoderFactory.class.getCanonicalName()); NativeRSRawErasureCoderFactory.class.getCanonicalName());
} }
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy);
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
} }

View File

@ -47,6 +47,7 @@
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Assume; import org.junit.Assume;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.io.IOException; import java.io.IOException;
@ -76,18 +77,36 @@ public class TestDFSStripedOutputStreamWithFailure {
.getLogger().setLevel(Level.ALL); .getLogger().setLevel(Level.ALL);
} }
private final ErasureCodingPolicy ecPolicy = private ErasureCodingPolicy ecPolicy;
ErasureCodingPolicyManager.getSystemDefaultPolicy(); private int dataBlocks;
private final int dataBlocks = ecPolicy.getNumDataUnits(); private int parityBlocks;
private final int parityBlocks = ecPolicy.getNumParityUnits(); private int cellSize;
private final int cellSize = ecPolicy.getCellSize();
private final int stripesPerBlock = 4; private final int stripesPerBlock = 4;
private final int blockSize = cellSize * stripesPerBlock; private int blockSize;
private final int blockGroupSize = blockSize * dataBlocks; private int blockGroupSize;
private static final int FLUSH_POS = private static final int FLUSH_POS =
9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1; 9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getSystemDefaultPolicy();
}
/*
* Initialize erasure coding policy.
*/
@Before
public void init(){
ecPolicy = getEcPolicy();
dataBlocks = ecPolicy.getNumDataUnits();
parityBlocks = ecPolicy.getNumParityUnits();
cellSize = ecPolicy.getCellSize();
blockSize = cellSize * stripesPerBlock;
blockGroupSize = blockSize * dataBlocks;
dnIndexSuite = getDnIndexSuite();
lengths = newLengths();
}
List<Integer> newLengths() { List<Integer> newLengths() {
final List<Integer> lens = new ArrayList<>(); final List<Integer> lens = new ArrayList<>();
lens.add(FLUSH_POS + 2); lens.add(FLUSH_POS + 2);
@ -104,7 +123,7 @@ List<Integer> newLengths() {
return lens; return lens;
} }
private final int[][] dnIndexSuite = getDnIndexSuite(); private int[][] dnIndexSuite;
private int[][] getDnIndexSuite() { private int[][] getDnIndexSuite() {
final int maxNumLevel = 2; final int maxNumLevel = 2;
@ -167,7 +186,7 @@ private int[] getKillPositions(int fileLen, int num) {
return positions; return positions;
} }
private final List<Integer> lengths = newLengths(); private List<Integer> lengths;
Integer getLength(int i) { Integer getLength(int i) {
return i >= 0 && i < lengths.size()? lengths.get(i): null; return i >= 0 && i < lengths.size()? lengths.get(i): null;

View File

@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/**
* This tests read operation of DFS striped file with XOR-2-1-64k erasure code
* policy.
*/
public class TestDFSXORStripedInputStream extends TestDFSStripedInputStream{
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByPolicyID(
HdfsConstants.XOR_2_1_POLICY_ID);
}
}

View File

@ -0,0 +1,35 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/**
* This tests write operation of DFS striped file with XOR-2-1-64k erasure code
* policy.
*/
public class TestDFSXORStripedOutputStream extends TestDFSStripedOutputStream{
@Override
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByPolicyID(
HdfsConstants.XOR_2_1_POLICY_ID);
}
}

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
/**
* This tests write operation of DFS striped file with XOR-2-1-64k erasure code
* policy when there is data node failure.
*/
public class TestDFSXORStripedOutputStreamWithFailure
extends TestDFSStripedOutputStreamWithFailure{
@Override
public ErasureCodingPolicy getEcPolicy() {
return ErasureCodingPolicyManager.getPolicyByPolicyID(
HdfsConstants.XOR_2_1_POLICY_ID);
}
}