HDFS-13524. Occasional "All datanodes are bad" error in TestLargeBlock#testLargeBlockSize. Contributed by Siyao Meng.

This commit is contained in:
Wei-Chiu Chuang 2018-07-16 10:51:23 -07:00
parent 359ea4e181
commit 88b2794244

View File

@ -50,6 +50,7 @@ public class TestLargeBlock {
// should we verify the data read back from the file? (slow)
static final boolean verifyData = true;
static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'};
static final int numDatanodes = 3;
// creates a file
static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
@ -158,7 +159,7 @@ static void checkFullFile(FileSystem fs, Path name, final long fileSize)
* timeout here.
* @throws IOException in case of errors
*/
@Test (timeout = 900000)
@Test (timeout = 1800000)
public void testLargeBlockSize() throws IOException {
final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
runTest(blockSize);
@ -175,7 +176,8 @@ public void runTest(final long blockSize) throws IOException {
final long fileSize = blockSize + 1L;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes).build();
FileSystem fs = cluster.getFileSystem();
try {