HDFS-10642. TestLazyPersistReplicaRecovery#testDnRestartWithSavedReplicas fails intermittently. (Contributed by Mingliang Liu)

This commit is contained in:
Arpit Agarwal 2016-07-26 12:27:46 -07:00
parent da6adf5151
commit d2cf8b54c5
5 changed files with 40 additions and 17 deletions

View File

@ -130,17 +130,33 @@ public void shutDownCluster() throws Exception {
public Timeout timeout = new Timeout(300000);
protected final LocatedBlocks ensureFileReplicasOnStorageType(
Path path, StorageType storageType) throws IOException {
Path path, StorageType storageType)
throws IOException, TimeoutException, InterruptedException {
// Ensure that returned block locations returned are correct!
LOG.info("Ensure path: " + path + " is on StorageType: " + storageType);
assertThat(fs.exists(path), is(true));
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
assertThat(locatedBlock.getStorageTypes()[0], is(storageType));
}
return locatedBlocks;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
LocatedBlocks locatedBlocks =
client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
if (locatedBlock.getStorageTypes()[0] != storageType) {
return false;
}
}
return true;
} catch (IOException ioe) {
LOG.warn("Exception got in ensureFileReplicasOnStorageType()", ioe);
return false;
}
}
}, 100, 30 * 1000);
return client.getLocatedBlocks(path.toString(), 0, fileLength);
}
/**

View File

@ -29,6 +29,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
@ -89,7 +90,7 @@ public void testTruncateIsDenied() throws IOException {
*/
@Test
public void testCorruptFilesAreDiscarded()
throws IOException, InterruptedException {
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
@ -123,7 +124,7 @@ public void testCorruptFilesAreDiscarded()
@Test
public void testDisableLazyPersistFileScrubber()
throws IOException, InterruptedException {
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).disableScrubber().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
@ -151,8 +152,8 @@ public void testDisableLazyPersistFileScrubber()
* If NN restarted then lazyPersist files should not deleted
*/
@Test
public void testFileShouldNotDiscardedIfNNRestarted() throws IOException,
InterruptedException {
public void testFileShouldNotDiscardedIfNNRestarted()
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setRamDiskReplicaCapacity(2).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");

View File

@ -53,7 +53,8 @@ public class TestLazyPersistLockedMemory extends LazyPersistTestCase {
* fall back to disk.
*/
@Test
public void testWithNoLockedMemory() throws IOException {
public void testWithNoLockedMemory()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setNumDatanodes(1)
.setMaxLockedMemory(0).build();

View File

@ -26,6 +26,7 @@
import org.junit.Test;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
@ -35,7 +36,8 @@
public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
@Test
public void testPlacementOnRamDisk() throws IOException {
public void testPlacementOnRamDisk()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
@ -45,7 +47,8 @@ public void testPlacementOnRamDisk() throws IOException {
}
@Test
public void testPlacementOnSizeLimitedRamDisk() throws IOException {
public void testPlacementOnSizeLimitedRamDisk()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setRamDiskReplicaCapacity(3).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
@ -64,7 +67,8 @@ public void testPlacementOnSizeLimitedRamDisk() throws IOException {
* @throws IOException
*/
@Test
public void testFallbackToDisk() throws IOException {
public void testFallbackToDisk()
throws IOException, TimeoutException, InterruptedException {
getClusterBuilder().setHasTransientStorage(false).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");

View File

@ -23,6 +23,7 @@
import org.junit.Test;
import java.io.IOException;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK;
@ -30,7 +31,7 @@
public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
@Test
public void testDnRestartWithSavedReplicas()
throws IOException, InterruptedException {
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
@ -55,7 +56,7 @@ public void testDnRestartWithSavedReplicas()
@Test
public void testDnRestartWithUnsavedReplicas()
throws IOException, InterruptedException {
throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().build();
FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));