HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

This commit is contained in:
Rakesh Radhakrishnan 2017-07-17 22:40:03 +05:30 committed by Uma Maheswara Rao Gangumalla
parent 68af4e199a
commit 9e82e5a86e
2 changed files with 13 additions and 12 deletions

View File

@ -1025,12 +1025,13 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
list.add(cluster.stopDataNode(0));
list.add(cluster.stopDataNode(0));
cluster.restartNameNodes();
cluster.restartDataNode(list.get(0), true);
cluster.restartDataNode(list.get(1), true);
cluster.restartDataNode(list.get(0), false);
cluster.restartDataNode(list.get(1), false);
cluster.waitActive();
fs.satisfyStoragePolicy(filePath);
Thread.sleep(3000 * 6);
cluster.restartDataNode(list.get(2), true);
DFSTestUtil.waitExpectedStorageType(filePath.toString(),
StorageType.ARCHIVE, 2, 30000, cluster.getFileSystem());
cluster.restartDataNode(list.get(2), false);
DFSTestUtil.waitExpectedStorageType(filePath.toString(),
StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
} finally {

View File

@ -308,8 +308,8 @@ public void testWhenOnlyFewTargetNodesAreAvailableToSatisfyStoragePolicy()
*/
@Test(timeout = 300000)
public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
// start 10 datanodes
int numOfDatanodes = 10;
// start 9 datanodes
int numOfDatanodes = 9;
int storagesPerDatanode = 2;
long capacity = 20 * defaultStripeBlockSize;
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@ -338,7 +338,6 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE}})
.storageCapacities(capacities)
.build();
@ -366,15 +365,16 @@ public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
}
cluster.restartNameNodes();
// Restart half datanodes
for (int i = 0; i < numOfDatanodes / 2; i++) {
cluster.restartDataNode(list.get(i), true);
for (int i = 0; i < 5; i++) {
cluster.restartDataNode(list.get(i), false);
}
cluster.waitActive();
fs.satisfyStoragePolicy(fooFile);
Thread.sleep(3000 * 6);
DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
StorageType.ARCHIVE, 5, 30000, cluster.getFileSystem());
//Start reaming datanodes
for (int i = numOfDatanodes - 1; i > numOfDatanodes / 2; i--) {
cluster.restartDataNode(list.get(i), true);
for (int i = numOfDatanodes - 1; i >= 5; i--) {
cluster.restartDataNode(list.get(i), false);
}
// verify storage types and locations.
waitExpectedStorageType(cluster, fooFile.toString(), fileLen,