HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are causing Release Audit Warnings. (Contributed by Ruth Wisniewski)
This commit is contained in:
parent
4356e8a5ef
commit
54fa9b4217
@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED
|
|||||||
HDFS-8380. Always call addStoredBlock on blocks which have been shifted
|
HDFS-8380. Always call addStoredBlock on blocks which have been shifted
|
||||||
from one storage to another (cmccabe)
|
from one storage to another (cmccabe)
|
||||||
|
|
||||||
|
HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are
|
||||||
|
causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal)
|
||||||
|
|
||||||
Release 2.7.1 - UNRELEASED
|
Release 2.7.1 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -20,9 +20,11 @@
|
|||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
|
import java.io.File;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -126,7 +128,12 @@ public void testHostsExcludeInUI() throws Exception {
|
|||||||
assertTrue("Live nodes should contain the decommissioned node",
|
assertTrue("Live nodes should contain the decommissioned node",
|
||||||
nodes.contains("Decommissioned"));
|
nodes.contains("Decommissioned"));
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
if (localFileSys.exists(dir)) {
|
||||||
|
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,6 +174,9 @@ public void testHostsIncludeForDeadCount() throws Exception {
|
|||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
if (localFileSys.exists(dir)) {
|
||||||
|
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
import com.google.common.util.concurrent.Uninterruptibles;
|
import com.google.common.util.concurrent.Uninterruptibles;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -214,6 +215,8 @@ public void testLastContactTime() throws Exception {
|
|||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
FileSystem localFileSys = null;
|
||||||
|
Path dir = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
@ -226,10 +229,9 @@ public void testLastContactTime() throws Exception {
|
|||||||
"Hadoop:service=NameNode,name=NameNodeInfo");
|
"Hadoop:service=NameNode,name=NameNodeInfo");
|
||||||
|
|
||||||
// Define include file to generate deadNodes metrics
|
// Define include file to generate deadNodes metrics
|
||||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
localFileSys = FileSystem.getLocal(conf);
|
||||||
Path workingDir = localFileSys.getWorkingDirectory();
|
Path workingDir = localFileSys.getWorkingDirectory();
|
||||||
Path dir = new Path(workingDir,
|
dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
|
||||||
"build/test/data/temp/TestNameNodeMXBean");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
Path includeFile = new Path(dir, "include");
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
assertTrue(localFileSys.mkdirs(dir));
|
||||||
StringBuilder includeHosts = new StringBuilder();
|
StringBuilder includeHosts = new StringBuilder();
|
||||||
@ -258,8 +260,10 @@ public void testLastContactTime() throws Exception {
|
|||||||
assertTrue(deadNode.containsKey("decommissioned"));
|
assertTrue(deadNode.containsKey("decommissioned"));
|
||||||
assertTrue(deadNode.containsKey("xferaddr"));
|
assertTrue(deadNode.containsKey("xferaddr"));
|
||||||
}
|
}
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
if ((localFileSys != null) && localFileSys.exists(dir)) {
|
||||||
|
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
||||||
|
}
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user