diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 82d4638053..4079cd63be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -19,8 +19,10 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.util.VersionInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -57,7 +59,9 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * The JournalNode is a daemon which allows namenodes using @@ -392,7 +396,25 @@ public boolean accept(File file) { return JSON.toString(status); } - + + @Override // JournalNodeMXBean + public String getHostAndPort() { + return NetUtils.getHostPortString(rpcServer.getAddress()); + } + + @Override // JournalNodeMXBean + public List getClusterIds() { + return journalsById.values().stream() + .map(j -> j.getStorage().getClusterID()) + .filter(cid -> !Strings.isNullOrEmpty(cid)) + .distinct().collect(Collectors.toList()); + } + + @Override // JournalNodeMXBean + public String getVersion() { + return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision(); + } + /** * Register JournalNodeMXBean */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java index 4e8d9da50f..f265c31a34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java @@ -20,6 +20,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import java.util.List; + /** * This is the JMX management interface for JournalNode information */ @@ -32,5 +34,27 @@ public interface JournalNodeMXBean { * * @return A string presenting status for each journal */ - public String getJournalsStatus(); + String getJournalsStatus(); + + /** + * Get host and port of JournalNode. + * + * @return colon separated host and port. + */ + String getHostAndPort(); + + /** + * Get list of the clusters of JournalNode's journals + * as one JournalNode may support multiple clusters. + * + * @return list of clusters. + */ + List getClusterIds(); + + /** + * Gets the version of Hadoop. + * + * @return the version of Hadoop. + */ + String getVersion(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html index 023c3aa0b7..3add5867ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html @@ -1,5 +1,3 @@ - + - - - -JournalNode Information + + Hadoop Administration - - - - -
- -
-
- -
-
- -
-
-

Hadoop, {release-year-token}.

-
-
- - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/jn.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/jn.js new file mode 100644 index 0000000000..7be48f1e04 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/jn.js @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +(function () { + "use strict"; + + var data = {}; + + dust.loadSource(dust.compile($('#tmpl-jn').html(), 'jn')); + + var BEANS = [ + {"name": "jn", "url": "/jmx?qry=Hadoop:service=JournalNode,name=JournalNodeInfo"}, + {"name": "journals", "url": "/jmx?qry=Hadoop:service=JournalNode,name=Journal-*"} + + ]; + + var HELPERS = { + 'helper_date_tostring' : function (chunk, ctx, bodies, params) { + var value = dust.helpers.tap(params.value, chunk, ctx); + return chunk.write('' + moment(Number(value)).format('ddd MMM DD HH:mm:ss ZZ YYYY')); + } + }; + + load_json( + BEANS, + guard_with_startup_progress(function(d) { + for (var k in d) { + data[k] = k === 'journals' ? workaround(d[k].beans) : d[k].beans[0]; + } + render(); + }), + function (url, jqxhr, text, err) { + show_err_msg('

Failed to retrieve data from ' + url + ', cause: ' + err + '

'); + }); + + function guard_with_startup_progress(fn) { + return function() { + try { + fn.apply(this, arguments); + } catch (err) { + if (err instanceof TypeError) { + show_err_msg('JournalNode error: ' + err); + } + } + }; + } + + function workaround(journals) { + for (var i in journals){ + journals[i]['NameService']= journals[i]['modelerType'].split("-")[1]; + } + + return journals; + } + + function render() { + var base = dust.makeBase(HELPERS); + dust.render('jn', base.push(data), function(err, out) { + $('#tab-overview').html(out); + $('#tab-overview').addClass('active'); + }); + } + + function show_err_msg() { + $('#alert-panel-body').html("Failed to load journalnode information"); + $('#alert-panel').show(); + } +})(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/journalnode.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/journalnode.html new file mode 100644 index 0000000000..2e81fd4546 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/journalnode.html @@ -0,0 +1,108 @@ + + + + + + + + JournalNode Information + + + + + +
+ +
+
+ +
+
+
+ +
+
+
+ +
+
+

Hadoop, {release-year-token}.

+
+
+ + + + + + + + + + + + + \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java index 7550c4e9e7..e571fbce3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -96,7 +96,17 @@ public void testJournalNodeMXBean() throws Exception { infoMap1.put("Formatted", "false"); jMap.put(MiniJournalCluster.CLUSTER_WAITACTIVE_URI, infoMap1); assertEquals(JSON.toString(jMap), journalStatus); - + + // check attributes + String hostAndPort = (String) mbs.getAttribute(mxbeanName, "HostAndPort"); + assertEquals(jn.getHostAndPort(), hostAndPort); + assertTrue(hostAndPort.matches("localhost:\\d+")); + String[] clusterId = (String[]) mbs.getAttribute(mxbeanName, "ClusterIds"); + assertEquals(jn.getClusterIds().size(), clusterId.length); + assertEquals("mycluster", clusterId[0]); + String version = (String) mbs.getAttribute(mxbeanName, "Version"); + assertEquals(jn.getVersion(), version); + // restart journal node without formatting jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false) .numJournalNodes(NUM_JN).build();