diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 419fbf9e25..348ade7607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -536,6 +536,9 @@ Release 2.0.3-alpha - Unreleased HDFS-3809. Make BKJM use protobufs for all serialization with ZK. (Ivan Kelly via umamahesh) + HDFS-3804. TestHftpFileSystem fails intermittently with JDK7 + (Trevor Robinson via daryn) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index 6cb0ad1ce8..af62f3ca30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -42,18 +43,17 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.util.ServletUtil; import org.apache.log4j.Level; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.*; public class TestHftpFileSystem { private static final Random RAN = new Random(); private static Configuration config = null; private static MiniDFSCluster cluster = null; - private static FileSystem hdfs = null; - private static HftpFileSystem hftpFs = null; private static String blockPoolId = null; + private static String hftpUri = null; + private FileSystem hdfs = null; + private HftpFileSystem hftpFs = null; private static Path[] TEST_PATHS = new Path[] { // URI does not encode, Request#getPathInfo returns /foo @@ -93,26 +93,33 @@ public static void setUp() throws IOException { config = new Configuration(); cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build(); - hdfs = cluster.getFileSystem(); blockPoolId = cluster.getNamesystem().getBlockPoolId(); - final String hftpUri = + hftpUri = "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config); } @AfterClass public static void tearDown() throws IOException { - if (hdfs != null) { - hdfs.close(); - } - if (hftpFs != null) { - hftpFs.close(); - } if (cluster != null) { cluster.shutdown(); } } + + @Before + public void initFileSystems() throws IOException { + hdfs = cluster.getFileSystem(); + hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config); + // clear out the namespace + for (FileStatus stat : hdfs.listStatus(new Path("/"))) { + hdfs.delete(stat.getPath(), true); + } + } + @After + public void resetFileSystems() throws IOException { + FileSystem.closeAll(); + } + /** * Test file creation and access with file names that need encoding. */ @@ -280,19 +287,8 @@ private void checkClosedStream(InputStream is) { assertEquals("Stream closed", ioe.getMessage()); } - public void resetFileSystem() throws IOException { - // filesystem caching has a quirk/bug that it caches based on the user's - // given uri. the result is if a filesystem is instantiated with no port, - // it gets the default port. then if the default port is changed, - // and another filesystem is instantiated with no port, the prior fs - // is returned, not a new one using the changed port. so let's flush - // the cache between tests... - FileSystem.closeAll(); - } - @Test public void testHftpDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); URI uri = URI.create("hftp://localhost"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); @@ -309,7 +305,6 @@ public void testHftpDefaultPorts() throws IOException { @Test public void testHftpCustomDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); conf.setInt("dfs.http.port", 123); conf.setInt("dfs.https.port", 456); @@ -329,7 +324,6 @@ public void testHftpCustomDefaultPorts() throws IOException { @Test public void testHftpCustomUriPortWithDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); URI uri = URI.create("hftp://localhost:123"); HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); @@ -346,7 +340,6 @@ public void testHftpCustomUriPortWithDefaultPorts() throws IOException { @Test public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); conf.setInt("dfs.http.port", 123); conf.setInt("dfs.https.port", 456); @@ -368,7 +361,6 @@ public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException { @Test public void testHsftpDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); URI uri = URI.create("hsftp://localhost"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); @@ -385,7 +377,6 @@ public void testHsftpDefaultPorts() throws IOException { @Test public void testHsftpCustomDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); conf.setInt("dfs.http.port", 123); conf.setInt("dfs.https.port", 456); @@ -405,7 +396,6 @@ public void testHsftpCustomDefaultPorts() throws IOException { @Test public void testHsftpCustomUriPortWithDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); URI uri = URI.create("hsftp://localhost:123"); HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); @@ -422,7 +412,6 @@ public void testHsftpCustomUriPortWithDefaultPorts() throws IOException { @Test public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException { - resetFileSystem(); Configuration conf = new Configuration(); conf.setInt("dfs.http.port", 123); conf.setInt("dfs.https.port", 456);