HDFS-7171. Fix Jenkins failures in HDFS-6581 branch. (Arpit Agarwal)

This commit is contained in:
arp 2014-09-30 18:25:20 -07:00
parent 65ef35bbab
commit a45ad330fa
4 changed files with 6 additions and 55 deletions

View File

@ -92,4 +92,5 @@
HDFS-7129. Metrics to track usage of memory for writes. (Xiaoyu Yao HDFS-7129. Metrics to track usage of memory for writes. (Xiaoyu Yao
via Arpit Agarwal) via Arpit Agarwal)
HDFS-7171. Fix Jenkins failures in HDFS-6581 branch. (Arpit Agarwal)

View File

@ -2340,7 +2340,6 @@ private void setStoragePolicyInt(String src, final String policyName)
} }
src = FSDirectory.resolvePath(src, pathComponents, dir); src = FSDirectory.resolvePath(src, pathComponents, dir);
INode inode = dir.getINode(src);
// get the corresponding policy and make sure the policy name is valid // get the corresponding policy and make sure the policy name is valid
BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName); BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName);

View File

@ -69,7 +69,7 @@ public class TestBlockStoragePolicy {
static final byte COLD = (byte) 4; static final byte COLD = (byte) 4;
static final byte WARM = (byte) 8; static final byte WARM = (byte) 8;
static final byte HOT = (byte) 12; static final byte HOT = (byte) 12;
static final byte LAZY_PERSIST = (byte) 15;
@Test (timeout=300000) @Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException { public void testConfigKeyEnabled() throws IOException {
@ -116,6 +116,9 @@ public void testDefaultPolicies() {
expectedPolicyStrings.put(HOT, expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:12, storageTypes=[DISK], " + "BlockStoragePolicy{HOT:12, storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}"); "creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
expectedPolicyStrings.put(LAZY_PERSIST,
"BlockStoragePolicy{LAZY_PERSIST:15, storageTypes=[RAM_DISK, DISK], " +
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
for(byte i = 1; i < 16; i++) { for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
@ -1141,7 +1144,7 @@ public void testGetAllStoragePolicies() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem(); final DistributedFileSystem fs = cluster.getFileSystem();
try { try {
BlockStoragePolicy[] policies = fs.getStoragePolicies(); BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(3, policies.length); Assert.assertEquals(4, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(), Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString()); policies[0].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(), Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),

View File

@ -793,56 +793,4 @@ public void testNoSpaceArchive() throws Exception {
test.shutdownCluster(); test.shutdownCluster();
} }
} }
/**
* Test blocks of lazy_persist file on RAM_DISK will not be moved to other
* storage types by the Storage Mover.
*/
@Test
public void testRamDiskNotMoved() throws Exception {
LOG.info("testRamDiskNotMoved");
final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
final long diskCapacity = 100 * BLOCK_SIZE;
final long archiveCapacity = (6 + HdfsConstants.MIN_BLOCKS_FOR_WRITE)
* BLOCK_SIZE;
final long ramDiskCapacity = 10 * BLOCK_SIZE;
final long[][] capacities = genCapacities(1, 0, 0, 1,
diskCapacity, archiveCapacity, ramDiskCapacity);
final int LAZY_WRITER_INTERVAL_SEC = 1;
final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF,
1, (short)1, genStorageTypes(1, 0, 0, 1), capacities);
clusterScheme.conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
LAZY_WRITER_INTERVAL_SEC);
final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
try {
test.runBasicTest(false);
// test creating a hot RAM_DISK file
final int SEED = 0xFADED;
final Path foo_hot = new Path(pathPolicyMap.hot, "foo_hot");
DFSTestUtil.createFile(test.dfs, foo_hot, true, BLOCK_SIZE, BLOCK_SIZE,
BLOCK_SIZE, (short) 1, SEED, true);
Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
test.dfs.getClient(), foo_hot, StorageType.RAM_DISK));
// Sleep for a short time to allow the lazy writer thread to do its job
Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);
// Verify policy related name change is allowed
final Path foo_hot_new = new Path(pathPolicyMap.warm, "foo_hot");
test.dfs.rename(foo_hot, pathPolicyMap.warm);
Assert.assertTrue(test.dfs.exists(foo_hot_new));
// Verify blocks on ram disk will not be moved to other storage types by
// policy based Storage Mover.
test.migrate();
Assert.assertTrue(DFSTestUtil.verifyFileReplicasOnStorageType(test.dfs,
test.dfs.getClient(), foo_hot_new, StorageType.RAM_DISK));
} finally {
test.shutdownCluster();
}
}
} }