HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping related tests [Part 2]. Contributed by Rui Li.
Change-Id: I2100bc27ad484f83c9cb2d2e5bb232f4f74fd286
This commit is contained in:
parent
fa00d3e205
commit
0aa8c82894
@ -927,6 +927,9 @@ Trunk (Unreleased)
|
|||||||
HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to
|
HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to
|
||||||
BlockReconstructionWork. (Rakesh R via zhz)
|
BlockReconstructionWork. (Rakesh R via zhz)
|
||||||
|
|
||||||
|
HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping
|
||||||
|
related tests [Part 2]. (Rui Li via zhz)
|
||||||
|
|
||||||
Release 2.9.0 - UNRELEASED
|
Release 2.9.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -68,13 +68,21 @@ public class TestReadStripedFileWithDecoding {
|
|||||||
|
|
||||||
private MiniDFSCluster cluster;
|
private MiniDFSCluster cluster;
|
||||||
private DistributedFileSystem fs;
|
private DistributedFileSystem fs;
|
||||||
private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
private static final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||||
private final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
private static final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||||
private final int smallFileLength = blockSize * dataBlocks - 123;
|
private final int smallFileLength = blockSize * dataBlocks - 123;
|
||||||
private final int largeFileLength = blockSize * dataBlocks + 123;
|
private final int largeFileLength = blockSize * dataBlocks + 123;
|
||||||
private final int[] fileLengths = {smallFileLength, largeFileLength};
|
private final int[] fileLengths = {smallFileLength, largeFileLength};
|
||||||
private final int[] dnFailureNums = {1, 2, 3};
|
private static final int[] dnFailureNums = getDnFailureNums();
|
||||||
|
|
||||||
|
private static int[] getDnFailureNums() {
|
||||||
|
int[] dnFailureNums = new int[parityBlocks];
|
||||||
|
for (int i = 0; i < dnFailureNums.length; i++) {
|
||||||
|
dnFailureNums[i] = i + 1;
|
||||||
|
}
|
||||||
|
return dnFailureNums;
|
||||||
|
}
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public Timeout globalTimeout = new Timeout(300000);
|
public Timeout globalTimeout = new Timeout(300000);
|
||||||
@ -132,8 +140,9 @@ public void testReadWithDNFailure() throws Exception {
|
|||||||
@Test(timeout=300000)
|
@Test(timeout=300000)
|
||||||
public void testReadCorruptedData() throws IOException {
|
public void testReadCorruptedData() throws IOException {
|
||||||
for (int fileLength : fileLengths) {
|
for (int fileLength : fileLengths) {
|
||||||
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
|
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
|
||||||
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
|
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
|
||||||
|
parityDelNum++) {
|
||||||
String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
|
String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
|
||||||
testReadWithBlockCorrupted(src, fileLength,
|
testReadWithBlockCorrupted(src, fileLength,
|
||||||
dataDelNum, parityDelNum, false);
|
dataDelNum, parityDelNum, false);
|
||||||
@ -149,8 +158,9 @@ public void testReadCorruptedData() throws IOException {
|
|||||||
@Test(timeout=300000)
|
@Test(timeout=300000)
|
||||||
public void testReadCorruptedDataByDeleting() throws IOException {
|
public void testReadCorruptedDataByDeleting() throws IOException {
|
||||||
for (int fileLength : fileLengths) {
|
for (int fileLength : fileLengths) {
|
||||||
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
|
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
|
||||||
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
|
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
|
||||||
|
parityDelNum++) {
|
||||||
String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
|
String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
|
||||||
testReadWithBlockCorrupted(src, fileLength,
|
testReadWithBlockCorrupted(src, fileLength,
|
||||||
dataDelNum, parityDelNum, true);
|
dataDelNum, parityDelNum, true);
|
||||||
|
@ -21,18 +21,17 @@
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.rules.Timeout;
|
import org.junit.rules.Timeout;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||||
|
import static org.apache.hadoop.hdfs.StripedFileTestUtil.TEST_EC_POLICY;
|
||||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
||||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
||||||
|
|
||||||
@ -53,16 +52,15 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||||||
@Rule
|
@Rule
|
||||||
public Timeout globalTimeout = new Timeout(300000);
|
public Timeout globalTimeout = new Timeout(300000);
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setup() throws IOException {
|
public void setup() throws IOException {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
|
cluster.getFileSystem().getClient().setErasureCodingPolicy(
|
||||||
|
"/", TEST_EC_POLICY);
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
|
||||||
public void tearDown() throws IOException {
|
public void tearDown() throws IOException {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
@ -71,33 +69,19 @@ public void tearDown() throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReadFileWithMissingBlocks1() throws Exception {
|
public void testReadFileWithMissingBlocks() throws Exception {
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 0);
|
for (int missingData = 1; missingData <= NUM_PARITY_BLOCKS; missingData++) {
|
||||||
}
|
for (int missingParity = 0; missingParity <=
|
||||||
|
NUM_PARITY_BLOCKS - missingData; missingParity++) {
|
||||||
@Test
|
try {
|
||||||
public void testReadFileWithMissingBlocks2() throws Exception {
|
setup();
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 1);
|
readFileWithMissingBlocks(new Path("/foo"), fileLength,
|
||||||
}
|
missingData, missingParity);
|
||||||
|
} finally {
|
||||||
@Test
|
tearDown();
|
||||||
public void testReadFileWithMissingBlocks3() throws Exception {
|
}
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 2);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadFileWithMissingBlocks4() throws Exception {
|
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadFileWithMissingBlocks5() throws Exception {
|
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadFileWithMissingBlocks6() throws Exception {
|
|
||||||
readFileWithMissingBlocks(new Path("/foo"), fileLength, 3, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void readFileWithMissingBlocks(Path srcPath, int fileLength,
|
private void readFileWithMissingBlocks(Path srcPath, int fileLength,
|
||||||
|
@ -113,14 +113,14 @@ public void tearDown() {
|
|||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverOneParityBlock() throws Exception {
|
public void testRecoverOneParityBlock() throws Exception {
|
||||||
int fileLen = 10 * blockSize + blockSize/10;
|
int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverOneParityBlock", fileLen,
|
assertFileBlocksReconstruction("/testRecoverOneParityBlock", fileLen,
|
||||||
ReconstructionType.ParityOnly, 1);
|
ReconstructionType.ParityOnly, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverOneParityBlock1() throws Exception {
|
public void testRecoverOneParityBlock1() throws Exception {
|
||||||
int fileLen = cellSize + cellSize/10;
|
int fileLen = cellSize + cellSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverOneParityBlock1", fileLen,
|
assertFileBlocksReconstruction("/testRecoverOneParityBlock1", fileLen,
|
||||||
ReconstructionType.ParityOnly, 1);
|
ReconstructionType.ParityOnly, 1);
|
||||||
}
|
}
|
||||||
@ -134,35 +134,35 @@ public void testRecoverOneParityBlock2() throws Exception {
|
|||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverOneParityBlock3() throws Exception {
|
public void testRecoverOneParityBlock3() throws Exception {
|
||||||
int fileLen = 3 * blockSize + blockSize/10;
|
int fileLen = (dataBlkNum - 1) * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverOneParityBlock3", fileLen,
|
assertFileBlocksReconstruction("/testRecoverOneParityBlock3", fileLen,
|
||||||
ReconstructionType.ParityOnly, 1);
|
ReconstructionType.ParityOnly, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverThreeParityBlocks() throws Exception {
|
public void testRecoverAllParityBlocks() throws Exception {
|
||||||
int fileLen = 10 * blockSize + blockSize/10;
|
int fileLen = dataBlkNum * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverThreeParityBlocks", fileLen,
|
assertFileBlocksReconstruction("/testRecoverAllParityBlocks", fileLen,
|
||||||
ReconstructionType.ParityOnly, 3);
|
ReconstructionType.ParityOnly, parityBlkNum);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverThreeDataBlocks() throws Exception {
|
public void testRecoverAllDataBlocks() throws Exception {
|
||||||
int fileLen = 10 * blockSize + blockSize/10;
|
int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverThreeDataBlocks", fileLen,
|
assertFileBlocksReconstruction("/testRecoverAllDataBlocks", fileLen,
|
||||||
ReconstructionType.DataOnly, 3);
|
ReconstructionType.DataOnly, parityBlkNum);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverThreeDataBlocks1() throws Exception {
|
public void testRecoverAllDataBlocks1() throws Exception {
|
||||||
int fileLen = 3 * blockSize + blockSize/10;
|
int fileLen = parityBlkNum * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverThreeDataBlocks1", fileLen,
|
assertFileBlocksReconstruction("/testRecoverAllDataBlocks1", fileLen,
|
||||||
ReconstructionType.DataOnly, 3);
|
ReconstructionType.DataOnly, parityBlkNum);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverOneDataBlock() throws Exception {
|
public void testRecoverOneDataBlock() throws Exception {
|
||||||
int fileLen = 10 * blockSize + blockSize/10;
|
int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverOneDataBlock", fileLen,
|
assertFileBlocksReconstruction("/testRecoverOneDataBlock", fileLen,
|
||||||
ReconstructionType.DataOnly, 1);
|
ReconstructionType.DataOnly, 1);
|
||||||
}
|
}
|
||||||
@ -183,16 +183,16 @@ public void testRecoverOneDataBlock2() throws Exception {
|
|||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverAnyBlocks() throws Exception {
|
public void testRecoverAnyBlocks() throws Exception {
|
||||||
int fileLen = 3 * blockSize + blockSize/10;
|
int fileLen = parityBlkNum * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverAnyBlocks", fileLen,
|
assertFileBlocksReconstruction("/testRecoverAnyBlocks", fileLen,
|
||||||
ReconstructionType.Any, 2);
|
ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testRecoverAnyBlocks1() throws Exception {
|
public void testRecoverAnyBlocks1() throws Exception {
|
||||||
int fileLen = 10 * blockSize + blockSize/10;
|
int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
|
||||||
assertFileBlocksReconstruction("/testRecoverAnyBlocks1", fileLen,
|
assertFileBlocksReconstruction("/testRecoverAnyBlocks1", fileLen,
|
||||||
ReconstructionType.Any, 3);
|
ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
private int[] generateDeadDnIndices(ReconstructionType type, int deadNum,
|
private int[] generateDeadDnIndices(ReconstructionType type, int deadNum,
|
||||||
@ -259,6 +259,7 @@ private void assertFileBlocksReconstruction(String fileName, int fileLen,
|
|||||||
if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
|
if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
|
||||||
Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
|
Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
|
||||||
}
|
}
|
||||||
|
assertTrue("File length must be positive.", fileLen > 0);
|
||||||
|
|
||||||
Path file = new Path(fileName);
|
Path file = new Path(fileName);
|
||||||
|
|
||||||
@ -289,6 +290,7 @@ private void assertFileBlocksReconstruction(String fileName, int fileLen,
|
|||||||
int[] deadDnIndices = new int[toRecoverBlockNum];
|
int[] deadDnIndices = new int[toRecoverBlockNum];
|
||||||
ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
|
ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
|
||||||
File[] replicas = new File[toRecoverBlockNum];
|
File[] replicas = new File[toRecoverBlockNum];
|
||||||
|
long[] replicaLengths = new long[toRecoverBlockNum];
|
||||||
File[] metadatas = new File[toRecoverBlockNum];
|
File[] metadatas = new File[toRecoverBlockNum];
|
||||||
byte[][] replicaContents = new byte[toRecoverBlockNum][];
|
byte[][] replicaContents = new byte[toRecoverBlockNum][];
|
||||||
Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
|
Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
|
||||||
@ -301,9 +303,10 @@ private void assertFileBlocksReconstruction(String fileName, int fileLen,
|
|||||||
lastBlock.getBlock(), cellSize, dataBlkNum, indices[dead[i]]);
|
lastBlock.getBlock(), cellSize, dataBlkNum, indices[dead[i]]);
|
||||||
errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
|
errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
|
||||||
replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
|
replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
|
||||||
|
replicaLengths[i] = replicas[i].length();
|
||||||
metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
|
metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
|
||||||
// the block replica on the datanode should be the same as expected
|
// the block replica on the datanode should be the same as expected
|
||||||
assertEquals(replicas[i].length(),
|
assertEquals(replicaLengths[i],
|
||||||
StripedBlockUtil.getInternalBlockLength(
|
StripedBlockUtil.getInternalBlockLength(
|
||||||
lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[dead[i]]));
|
lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[dead[i]]));
|
||||||
assertTrue(metadatas[i].getName().
|
assertTrue(metadatas[i].getName().
|
||||||
@ -312,8 +315,10 @@ private void assertFileBlocksReconstruction(String fileName, int fileLen,
|
|||||||
replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
|
replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cellsNum = (fileLen - 1) / cellSize + 1;
|
int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
|
||||||
int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;
|
int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum :
|
||||||
|
Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
|
||||||
|
int groupSize = lastGroupNumBlk + parityBlkNum;
|
||||||
|
|
||||||
// shutdown datanodes or generate corruption
|
// shutdown datanodes or generate corruption
|
||||||
int stoppedDN = generateErrors(errorMap, type);
|
int stoppedDN = generateErrors(errorMap, type);
|
||||||
@ -342,7 +347,7 @@ private void assertFileBlocksReconstruction(String fileName, int fileLen,
|
|||||||
LOG.info("replica after reconstruction " + replicaAfterReconstruction);
|
LOG.info("replica after reconstruction " + replicaAfterReconstruction);
|
||||||
File metadataAfterReconstruction =
|
File metadataAfterReconstruction =
|
||||||
cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
|
cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
|
||||||
assertEquals(replicaAfterReconstruction.length(), replicas[i].length());
|
assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
|
||||||
LOG.info("replica before " + replicas[i]);
|
LOG.info("replica before " + replicas[i]);
|
||||||
assertTrue(metadataAfterReconstruction.getName().
|
assertTrue(metadataAfterReconstruction.getName().
|
||||||
endsWith(blocks[i].getGenerationStamp() + ".meta"));
|
endsWith(blocks[i].getGenerationStamp() + ".meta"));
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
@ -79,7 +78,8 @@ public void testStripedFile0() throws IOException {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStripedFile1() throws IOException {
|
public void testStripedFile1() throws IOException {
|
||||||
doTest(cellSize * 5, 5);
|
int numCell = DATA_BLK_NUM - 1;
|
||||||
|
doTest(cellSize * numCell, numCell);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -72,8 +72,9 @@ public void tearDown() throws IOException {
|
|||||||
@Test(timeout = 300000)
|
@Test(timeout = 300000)
|
||||||
public void testWriteStripedFileWithDNFailure() throws IOException {
|
public void testWriteStripedFileWithDNFailure() throws IOException {
|
||||||
for (int fileLength : fileLengths) {
|
for (int fileLength : fileLengths) {
|
||||||
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
|
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
|
||||||
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
|
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
|
||||||
|
parityDelNum++) {
|
||||||
try {
|
try {
|
||||||
// setup a new cluster with no dead datanode
|
// setup a new cluster with no dead datanode
|
||||||
setup();
|
setup();
|
||||||
@ -82,7 +83,7 @@ public void testWriteStripedFileWithDNFailure() throws IOException {
|
|||||||
String fileType = fileLength < (blockSize * dataBlocks) ?
|
String fileType = fileLength < (blockSize * dataBlocks) ?
|
||||||
"smallFile" : "largeFile";
|
"smallFile" : "largeFile";
|
||||||
LOG.error("Failed to write file with DN failure:"
|
LOG.error("Failed to write file with DN failure:"
|
||||||
+ " fileType = "+ fileType
|
+ " fileType = " + fileType
|
||||||
+ ", dataDelNum = " + dataDelNum
|
+ ", dataDelNum = " + dataDelNum
|
||||||
+ ", parityDelNum = " + parityDelNum);
|
+ ", parityDelNum = " + parityDelNum);
|
||||||
throw ioe;
|
throw ioe;
|
||||||
|
Loading…
Reference in New Issue
Block a user