From 02b19e0738d9df1e4d38280c5575e1d3ba49f8cb Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 18:22:52 +0000 Subject: [PATCH] HDFS-5076. Add MXBean methods to query NN's transaction information and JournalNode's journal status. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514422 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/server/JournalNode.java | 52 ++++++++- .../qjournal/server/JournalNodeMXBean.java | 36 ++++++ .../hdfs/server/namenode/FSNamesystem.java | 10 ++ .../hdfs/server/namenode/NameNodeMXBean.java | 6 + .../server/TestJournalNodeMXBean.java | 107 ++++++++++++++++++ .../server/namenode/TestNameNodeMXBean.java | 5 + 7 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e720915987..4430867402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -288,6 +288,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli) + HDFS-5076 Add MXBean methods to query NN's transaction information and + JournalNode's journal status. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 8291b5932e..4ed4244ac1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; +import java.io.FileFilter; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.Log; @@ -34,11 +36,13 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; @@ -51,7 +55,7 @@ * in the quorum protocol. */ @InterfaceAudience.Private -public class JournalNode implements Tool, Configurable { +public class JournalNode implements Tool, Configurable, JournalNodeMXBean { public static final Log LOG = LogFactory.getLog(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; @@ -128,6 +132,8 @@ public void start() throws IOException { SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName()); + registerJNMXBean(); + httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); @@ -208,6 +214,50 @@ private File getLogDir(String jid) { return new File(new File(dir), jid); } + @Override // JournalNodeMXBean + public String getJournalsStatus() { + // jid:{Formatted:True/False} + Map> status = + new HashMap>(); + synchronized (this) { + for (Map.Entry entry : journalsById.entrySet()) { + Map jMap = new HashMap(); + jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted())); + status.put(entry.getKey(), jMap); + } + } + + // It is possible that some journals have been formatted before, while the + // corresponding journals are not in journalsById yet (because of restarting + // JN, e.g.). For simplicity, let's just assume a journal is formatted if + // there is a directory for it. We can also call analyzeStorage method for + // these directories if necessary. + // Also note that we do not need to check localDir here since + // validateAndCreateJournalDir has been called before we register the + // MXBean. + File[] journalDirs = localDir.listFiles(new FileFilter() { + @Override + public boolean accept(File file) { + return file.isDirectory(); + } + }); + for (File journalDir : journalDirs) { + String jid = journalDir.getName(); + if (!status.containsKey(jid)) { + Map jMap = new HashMap(); + jMap.put("Formatted", "true"); + status.put(jid, jMap); + } + } + return JSON.toString(status); + } + + /** + * Register JournalNodeMXBean + */ + private void registerJNMXBean() { + MBeans.register("JournalNode", "JournalNodeInfo", this); + } private class ErrorReporter implements StorageErrorReporter { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java new file mode 100644 index 0000000000..4e8d9da50f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is the JMX management interface for JournalNode information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface JournalNodeMXBean { + + /** + * Get status information (e.g., whether formatted) of JournalNode's journals. + * + * @return A string presenting status for each journal + */ + public String getJournalsStatus(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 989f688a0f..2f230d7350 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6364,6 +6364,16 @@ public String getNameJournalStatus() { return JSON.toString(jasList); } + @Override // NameNodeMxBean + public String getJournalTransactionInfo() { + Map txnIdMap = new HashMap(); + txnIdMap.put("LastAppliedOrWrittenTxId", + Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId())); + txnIdMap.put("MostRecentCheckpointTxId", + Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); + return JSON.toString(txnIdMap); + } + @Override // NameNodeMXBean public String getNNStarted() { return getStartTime().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 50315a4ae6..173d5aea4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -188,6 +188,12 @@ public interface NameNodeMXBean { * @return the name journal status information, as a JSON string. */ public String getNameJournalStatus(); + + /** + * Get information about the transaction ID, including the last applied + * transaction ID and the most recent checkpoint's transaction ID + */ + public String getJournalTransactionInfo(); /** * Gets the NN start time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java new file mode 100644 index 0000000000..347184870f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mortbay.util.ajax.JSON; + +/** + * Test {@link JournalNodeMXBean} + */ +public class TestJournalNodeMXBean { + + private static final String NAMESERVICE = "ns1"; + private static final int NUM_JN = 1; + + private MiniJournalCluster jCluster; + private JournalNode jn; + + @Before + public void setup() throws IOException { + // start 1 journal node + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + } + + @After + public void cleanup() throws IOException { + if (jCluster != null) { + jCluster.shutdown(); + } + } + + @Test + public void testJournalNodeMXBean() throws Exception { + // we have not formatted the journals yet, and the journal status in jmx + // should be empty since journal objects are created lazily + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=JournalNode,name=JournalNodeInfo"); + + // getJournalsStatus + String journalStatus = (String) mbs.getAttribute(mxbeanName, + "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + assertFalse(journalStatus.contains(NAMESERVICE)); + + // format the journal ns1 + final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", + "my-bp", 0L); + jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); + + // check again after format + // getJournalsStatus + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + Map> jMap = new HashMap>(); + Map infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + + // restart journal node without formatting + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + // re-check + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + jMap = new HashMap>(); + infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 227d2cef40..8d188d7b65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -120,6 +120,11 @@ public void testNameNodeMXBeanInfo() throws Exception { String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus")); assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus); + // get attribute JournalTransactionInfo + String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, + "JournalTransactionInfo"); + assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), + journalTxnInfo); // get attribute "NNStarted" String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted"); assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);