HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1360991 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Sanjay Radia 2012-07-12 22:46:53 +00:00
parent 4a5ba3b7bd
commit 6a59f08a15
2 changed files with 24 additions and 4 deletions

View File

@ -107,6 +107,8 @@ Trunk (unreleased changes)
HDFS-3190. Simple refactors in existing NN code to assist
QuorumJournalManager extension. (todd)
HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
OPTIMIZATIONS
BUG FIXES

View File

@ -72,10 +72,25 @@ public class TestPersistBlocks {
rand.nextBytes(DATA_BEFORE_RESTART);
rand.nextBytes(DATA_AFTER_RESTART);
}
/** check if DFS remains in proper condition after a restart
**/
@Test
public void TestRestartDfsWithFlush() throws Exception {
testRestartDfs(true);
}
/** check if DFS remains in proper condition after a restart */
@Test
public void testRestartDfs() throws Exception {
/** check if DFS remains in proper condition after a restart
**/
public void TestRestartDfsWithSync() throws Exception {
testRestartDfs(false);
}
/** check if DFS remains in proper condition after a restart
* @param useFlush - if true then flush is used instead of sync (ie hflush)
*/
void testRestartDfs(boolean useFlush) throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(
@ -92,7 +107,10 @@ public void testRestartDfs() throws Exception {
// Creating a file with 4096 blockSize to write multiple blocks
stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
if (useFlush)
stream.flush();
else
stream.hflush();
// Wait for at least a few blocks to get through
while (len <= BLOCK_SIZE) {