From 76ea55caeba77ec8b7d07ffa32115bc7477652e9 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 11 Apr 2014 15:30:18 +0000 Subject: [PATCH] HDFS-6234. TestDatanodeConfig#testMemlockLimit fails on Windows due to invalid file path. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1586682 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../apache/hadoop/hdfs/TestDatanodeConfig.java | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ecf918092f..5fbbb9d1a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -375,6 +375,9 @@ Release 2.4.1 - UNRELEASED HDFS-6231. DFSClient hangs infinitely if using hedged reads and all eligible datanodes die. (cnauroth) + HDFS-6234. TestDatanodeConfig#testMemlockLimit fails on Windows due to + invalid file path. (cnauroth) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index 9a33106913..3507d6f354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -86,9 +86,11 @@ public void testDataDirectories() throws IOException { fail(); } catch(Exception e) { // expecting exception here + } finally { + if (dn != null) { + dn.shutdown(); + } } - if(dn != null) - dn.shutdown(); assertNull("Data-node startup should have failed.", dn); // 2. Test "file:" schema and no schema (path-only). Both should work. @@ -121,17 +123,21 @@ public void testMemlockLimit() throws Exception { // Can't increase the memlock limit past the maximum. assumeTrue(memlockLimit != Long.MAX_VALUE); + File dataDir = new File(BASE_DIR, "data").getCanonicalFile(); Configuration conf = cluster.getConfiguration(0); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + makeURI("file", null, fileAsURI(dataDir).getPath())); long prevLimit = conf. getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT); + DataNode dn = null; try { // Try starting the DN with limit configured to the ulimit conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, memlockLimit); - DataNode dn = null; dn = DataNode.createDataNode(new String[]{}, conf); dn.shutdown(); + dn = null; // Try starting the DN with a limit > ulimit conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, memlockLimit+1); @@ -142,6 +148,9 @@ public void testMemlockLimit() throws Exception { "more than the datanode's available RLIMIT_MEMLOCK", e); } } finally { + if (dn != null) { + dn.shutdown(); + } conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, prevLimit); }