diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a7b436419b..7a85de2ca6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -506,6 +506,8 @@ Release 2.3.0 - UNRELEASED HDFS-5073. TestListCorruptFileBlocks fails intermittently. (Arpit Agarwal) + HDFS-1386. TestJMXGet fails in jdk7 (jeagles) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 4ed4244ac1..34d5db004f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -24,6 +24,8 @@ import java.util.HashMap; import java.util.Map; +import javax.management.ObjectName; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -61,6 +63,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean { private JournalNodeRpcServer rpcServer; private JournalNodeHttpServer httpServer; private Map journalsById = Maps.newHashMap(); + private ObjectName journalNodeInfoBeanName; private File localDir; @@ -181,6 +184,11 @@ public void stop(int rc) { for (Journal j : journalsById.values()) { IOUtils.cleanup(LOG, j); } + + if (journalNodeInfoBeanName != null) { + MBeans.unregister(journalNodeInfoBeanName); + journalNodeInfoBeanName = null; + } } /** @@ -256,7 +264,7 @@ public boolean accept(File file) { * Register JournalNodeMXBean */ private void registerJNMXBean() { - MBeans.register("JournalNode", "JournalNodeInfo", this); + journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this); } private class ErrorReporter implements StorageErrorReporter { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 7529b9f37b..8b258e8269 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -96,6 +96,8 @@ import java.util.*; import java.util.concurrent.atomic.AtomicInteger; +import javax.management.ObjectName; + import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.util.ExitUtil.terminate; @@ -210,6 +212,7 @@ public static InetSocketAddress createSocketAddr(String target) { private boolean connectToDnViaHostname; ReadaheadPool readaheadPool; private final boolean getHdfsBlockLocationsEnabled; + private ObjectName dataNodeInfoBeanName; /** * Create the DataNode given a configuration and an array of dataDirs. @@ -879,7 +882,7 @@ public static InetSocketAddress getInfoAddr(Configuration conf) { } private void registerMXBean() { - MBeans.register("DataNode", "DataNodeInfo", this); + dataNodeInfoBeanName = MBeans.register("DataNode", "DataNodeInfo", this); } @VisibleForTesting @@ -1236,6 +1239,10 @@ public void shutdown() { if (metrics != null) { metrics.shutdown(); } + if (dataNodeInfoBeanName != null) { + MBeans.unregister(dataNodeInfoBeanName); + dataNodeInfoBeanName = null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 73a831544e..472b41a006 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5517,6 +5517,7 @@ public String getFSState() { } private ObjectName mbeanName; + private ObjectName mxbeanName; /** * Register the FSNamesystem MBean using the name @@ -5540,6 +5541,11 @@ private void registerMBean() { void shutdown() { if (mbeanName != null) { MBeans.unregister(mbeanName); + mbeanName = null; + } + if (mxbeanName != null) { + MBeans.unregister(mxbeanName); + mxbeanName = null; } if (dir != null) { dir.shutdown(); @@ -6353,7 +6359,7 @@ void logFsckEvent(String src, InetAddress remoteAddress) throws IOException { * Register NameNodeMXBean */ private void registerMXBean() { - MBeans.register("NameNode", "NameNodeInfo", this); + mxbeanName = MBeans.register("NameNode", "NameNodeInfo", this); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d3e63837da..99e31c7a4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -26,6 +26,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; + +import javax.management.ObjectName; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -261,6 +264,7 @@ public long getProtocolVersion(String protocol, private NameNodeRpcServer rpcServer; private JvmPauseMonitor pauseMonitor; + private ObjectName nameNodeStatusBeanName; /** Format a new filesystem. Destroys any filesystem that may already * exist at this location. **/ @@ -745,6 +749,10 @@ public void stop() { if (namesystem != null) { namesystem.shutdown(); } + if (nameNodeStatusBeanName != null) { + MBeans.unregister(nameNodeStatusBeanName); + nameNodeStatusBeanName = null; + } } } @@ -1414,7 +1422,7 @@ synchronized HAServiceState getServiceState() { * Register NameNodeStatusMXBean */ private void registerNNSMXBean() { - MBeans.register("NameNode", "NameNodeStatus", this); + nameNodeStatusBeanName = MBeans.register("NameNode", "NameNodeStatus", this); } @Override // NameNodeStatusMXBean diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index 9e5e97f342..eb439f420e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -28,7 +28,12 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.io.PrintStream; +import java.lang.management.ManagementFactory; import java.util.Random; +import java.util.Set; + +import javax.management.MBeanServerConnection; +import javax.management.ObjectName; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -92,9 +97,8 @@ public void testNameNode() throws Exception { fileSize, fileSize, blockSize, (short) 2, seed); JMXGet jmx = new JMXGet(); - //jmx.setService("*"); // list all hadoop services - //jmx.init(); - //jmx = new JMXGet(); + String serviceName = "NameNode"; + jmx.setService(serviceName); jmx.init(); // default lists namenode mbeans only assertTrue("error printAllValues", checkPrintAllValues(jmx)); @@ -107,6 +111,10 @@ public void testNameNode() throws Exception { jmx.getValue("NumOpenConnections"))); cluster.shutdown(); + MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer(); + ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*"); + Set names = mbsc.queryNames(query, null); + assertTrue("No beans should be registered for " + serviceName, names.isEmpty()); } private static boolean checkPrintAllValues(JMXGet jmx) throws Exception { @@ -140,13 +148,15 @@ public void testDataNode() throws Exception { fileSize, fileSize, blockSize, (short) 2, seed); JMXGet jmx = new JMXGet(); - //jmx.setService("*"); // list all hadoop services - //jmx.init(); - //jmx = new JMXGet(); - jmx.setService("DataNode"); + String serviceName = "DataNode"; + jmx.setService(serviceName); jmx.init(); assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten"))); cluster.shutdown(); + MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer(); + ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*"); + Set names = mbsc.queryNames(query, null); + assertTrue("No beans should be registered for " + serviceName, names.isEmpty()); } }