HDFS-3181. Fix a test case in TestLeaseRecovery2.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1331138 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-04-26 22:44:34 +00:00
parent 1a76c82a31
commit 3e835c47d1
3 changed files with 41 additions and 14 deletions

View File

@ -565,6 +565,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3319. Change DFSOutputStream to not to start a thread in constructors. HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
(szetszwo) (szetszwo)
HDFS-3181. Fix a test case in TestLeaseRecovery2. (szetszwo)
BREAKDOWN OF HDFS-1623 SUBTASKS BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -49,6 +50,7 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -90,7 +92,7 @@ public static void startUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive(); cluster.waitActive();
dfs = (DistributedFileSystem)cluster.getFileSystem(); dfs = cluster.getFileSystem();
} }
/** /**
@ -406,17 +408,26 @@ public void testSoftLeaseRecovery() throws Exception {
*/ */
@Test @Test
public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception { public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
hardLeaseRecoveryRestartHelper(false); hardLeaseRecoveryRestartHelper(false, -1);
}
@Test
public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
hardLeaseRecoveryRestartHelper(false, 1535);
} }
@Test @Test
public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart() public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
throws Exception { throws Exception {
hardLeaseRecoveryRestartHelper(true); hardLeaseRecoveryRestartHelper(true, -1);
} }
public void hardLeaseRecoveryRestartHelper(boolean doRename) public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
throws Exception { throws Exception {
if (size < 0) {
size = AppendTestUtil.nextInt(FILE_SIZE + 1);
}
//create a file //create a file
String fileStr = "/hardLeaseRecovery"; String fileStr = "/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + fileStr); AppendTestUtil.LOG.info("filestr=" + fileStr);
@ -426,7 +437,6 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename)
assertTrue(dfs.dfs.exists(fileStr)); assertTrue(dfs.dfs.exists(fileStr));
// write bytes into the file. // write bytes into the file.
int size = AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size); AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size); stm.write(buffer, 0, size);
@ -440,6 +450,11 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename)
AppendTestUtil.LOG.info("hflush"); AppendTestUtil.LOG.info("hflush");
stm.hflush(); stm.hflush();
// check visible length
final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
Assert.assertEquals(size, in.getVisibleLength());
in.close();
if (doRename) { if (doRename) {
fileStr += ".renamed"; fileStr += ".renamed";
Path renamedPath = new Path(fileStr); Path renamedPath = new Path(fileStr);
@ -463,14 +478,11 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename)
// Make sure lease recovery begins. // Make sure lease recovery begins.
Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2); Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER, checkLease(fileStr, size);
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
cluster.restartNameNode(false); cluster.restartNameNode(false);
assertEquals("lease holder should still be the NN after restart", checkLease(fileStr, size);
HdfsServerConstants.NAMENODE_LEASE_HOLDER,
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
// Let the DNs send heartbeats again. // Let the DNs send heartbeats again.
for (DataNode dn : cluster.getDataNodes()) { for (DataNode dn : cluster.getDataNodes()) {
@ -492,12 +504,12 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename)
assertEquals(size, locatedBlocks.getFileLength()); assertEquals(size, locatedBlocks.getFileLength());
// make sure that the client can't write data anymore. // make sure that the client can't write data anymore.
stm.write('b');
try { try {
stm.write('b');
stm.hflush(); stm.hflush();
fail("Should not be able to flush after we've lost the lease"); fail("Should not be able to flush after we've lost the lease");
} catch (IOException e) { } catch (IOException e) {
LOG.info("Expceted exception on hflush", e); LOG.info("Expceted exception on write/hflush", e);
} }
try { try {
@ -512,4 +524,16 @@ public void hardLeaseRecoveryRestartHelper(boolean doRename)
"File size is good. Now validating sizes from datanodes..."); "File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr); AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
} }
static void checkLease(String f, int size) {
final String holder = NameNodeAdapter.getLeaseHolderForPath(
cluster.getNameNode(), f);
if (size == 0) {
assertEquals("lease holder should null, file is closed", null, holder);
} else {
assertEquals("lease holder should now be the NN",
HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
}
}
} }

View File

@ -126,7 +126,8 @@ public static void setLeasePeriod(final FSNamesystem namesystem, long soft, long
} }
public static String getLeaseHolderForPath(NameNode namenode, String path) { public static String getLeaseHolderForPath(NameNode namenode, String path) {
return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder(); Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
return l == null? null: l.getHolder();
} }
/** /**