diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 932b5001eb..2e51086d7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED HDFS-8380. Always call addStoredBlock on blocks which have been shifted from one storage to another (cmccabe) + HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are + causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index 1806d82de0..a93cc2a562 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; import java.lang.management.ManagementFactory; +import java.io.File; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileSystem; @@ -126,7 +128,12 @@ public class TestHostsFiles { assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned")); } finally { - cluster.shutdown(); + if (cluster != null) { + cluster.shutdown(); + } + if (localFileSys.exists(dir)) { + FileUtils.deleteQuietly(new File(dir.toUri().getPath())); + } } } @@ -167,6 +174,9 @@ public class TestHostsFiles { if (cluster != null) { cluster.shutdown(); } + if (localFileSys.exists(dir)) { + FileUtils.deleteQuietly(new File(dir.toUri().getPath())); + } } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index c649621db7..681e8a4ed5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.commons.io.FileUtils; import com.google.common.util.concurrent.Uninterruptibles; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -214,6 +215,8 @@ public class TestNameNodeMXBean { conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); MiniDFSCluster cluster = null; + FileSystem localFileSys = null; + Path dir = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); @@ -226,10 +229,9 @@ public class TestNameNodeMXBean { "Hadoop:service=NameNode,name=NameNodeInfo"); // Define include file to generate deadNodes metrics - FileSystem localFileSys = FileSystem.getLocal(conf); + localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, - "build/test/data/temp/TestNameNodeMXBean"); + dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); @@ -258,8 +260,10 @@ public class TestNameNodeMXBean { assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } - } finally { + if ((localFileSys != null) && localFileSys.exists(dir)) { + FileUtils.deleteQuietly(new File(dir.toUri().getPath())); + } if (cluster != null) { cluster.shutdown(); }