HDFS-5334. Implement dfshealth.jsp in HTML pages. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1532949 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
76c49b6e95
commit
909ab360c0
@ -255,7 +255,9 @@ Release 2.3.0 - UNRELEASED
|
||||
|
||||
HDFS-5342. Provide more information in the FSNamesystem JMX interfaces.
|
||||
(Haohui Mai via jing9)
|
||||
|
||||
|
||||
HDFS-5334. Implement dfshealth.jsp in HTML pages. (Haohui Mai via jing9)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
|
||||
|
@ -542,6 +542,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<exclude>src/main/docs/releasenotes.html</exclude>
|
||||
<exclude>src/contrib/**</exclude>
|
||||
<exclude>src/site/resources/images/*</exclude>
|
||||
<exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude>
|
||||
<exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude>
|
||||
<exclude>src/main/webapps/hdfs/dfshealth.dust.html</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
@ -0,0 +1,116 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
(function ($, dust, exports) {
|
||||
"use strict";
|
||||
|
||||
var filters = {
|
||||
'fmt_bytes': function (v) {
|
||||
var UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'ZB'];
|
||||
var prev = 0, i = 0;
|
||||
while (Math.floor(v) > 0 && i < UNITS.length) {
|
||||
prev = v;
|
||||
v /= 1024;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if (i > 0 && i < UNITS.length) {
|
||||
v = prev;
|
||||
i -= 1;
|
||||
}
|
||||
return Math.round(v * 100) / 100 + ' ' + UNITS[i];
|
||||
},
|
||||
|
||||
'fmt_percentage': function (v) {
|
||||
return Math.round(v * 100) / 100 + '%';
|
||||
},
|
||||
|
||||
'fmt_time': function (v) {
|
||||
var s = Math.floor(v / 1000), h = Math.floor(s / 3600);
|
||||
s -= h * 3600;
|
||||
var m = Math.floor(s / 60);
|
||||
s -= m * 60;
|
||||
|
||||
var res = s + " sec";
|
||||
if (m !== 0) {
|
||||
res = m + " mins, " + res;
|
||||
}
|
||||
|
||||
if (h !== 0) {
|
||||
res = h + " hrs, " + res;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
};
|
||||
$.extend(dust.filters, filters);
|
||||
|
||||
/**
|
||||
* Load templates from external sources in sequential orders, and
|
||||
* compile them. The loading order is important to resolve dependency.
|
||||
*
|
||||
* The code compile the templates on the client sides, which should be
|
||||
* precompiled once we introduce the infrastructure in the building
|
||||
* system.
|
||||
*
|
||||
* templates is an array of tuples in the format of {url, name}.
|
||||
*/
|
||||
function load_templates(dust, templates, success_cb, error_cb) {
|
||||
if (templates.length === 0) {
|
||||
success_cb();
|
||||
return;
|
||||
}
|
||||
|
||||
var t = templates.shift();
|
||||
$.get(t.url, function (tmpl) {
|
||||
var c = dust.compile(tmpl, t.name);
|
||||
dust.loadSource(c);
|
||||
load_templates(dust, templates, success_cb, error_cb);
|
||||
}).error(function (jqxhr, text, err) {
|
||||
error_cb(t.url, jqxhr, text, err);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a sequence of JSON.
|
||||
*
|
||||
* beans is an array of tuples in the format of {url, name}.
|
||||
*/
|
||||
function load_json(beans, success_cb, error_cb) {
|
||||
var data = {}, error = false, to_be_completed = beans.length;
|
||||
|
||||
$.each(beans, function(idx, b) {
|
||||
if (error) {
|
||||
return false;
|
||||
}
|
||||
$.get(b.url, function (resp) {
|
||||
data[b.name] = resp;
|
||||
to_be_completed -= 1;
|
||||
if (to_be_completed === 0) {
|
||||
success_cb(data);
|
||||
}
|
||||
}).error(function (jqxhr, text, err) {
|
||||
error = true;
|
||||
error_cb(b.url, jqxhr, text, err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
exports.load_templates = load_templates;
|
||||
exports.load_json = load_json;
|
||||
|
||||
}($, dust, window));
|
@ -0,0 +1,266 @@
|
||||
<div class="page-header">
|
||||
{#nnstat}
|
||||
<h1>NameNode '{HostAndPort}' ({State})</h1>
|
||||
{/nnstat}
|
||||
</div>
|
||||
|
||||
{#nn}
|
||||
{@if cond="{DistinctVersionCount} > 1"}
|
||||
<div class="alert alert-dismissable alert-success">
|
||||
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
|
||||
|
||||
There are {DistinctVersionCount} versions of datanodes currently live:
|
||||
{#DistinctVersions}
|
||||
{key} ({value}) {@sep},{/sep}
|
||||
{/DistinctVersions}
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
{@if cond="{NumberOfMissingBlocks} > 0"}
|
||||
<div class="alert alert-dismissable alert-warning">
|
||||
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
|
||||
|
||||
<p>There are {NumberOfMissingBlocks} missing blocks. The following files may be corrupted:</p>
|
||||
<br/>
|
||||
<div class="well">
|
||||
{#CorruptFiles}
|
||||
{.}<br/>
|
||||
{/CorruptFiles}
|
||||
</div>
|
||||
<p>Please check the logs or run fsck in order to identify the missing blocks. See the Hadoop FAQ for common causes and potential solutions.</p>
|
||||
</div>
|
||||
{/if}
|
||||
{/nn}
|
||||
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">Overview</div>
|
||||
<div class="panel-body">
|
||||
{#nn}
|
||||
<table class="table table-bordered">
|
||||
<tr><th>Started:</th><td>{NNStarted}</td></tr>
|
||||
<tr><th>Version:</th><td>{Version}</td></tr>
|
||||
<tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
|
||||
<tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
|
||||
<tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
|
||||
</table>
|
||||
{/nn}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<a id="browse-dir-first" style="cursor:pointer">Browse the filesystem</a> <a href="/logs/">NameNode Logs</a>
|
||||
|
||||
<hr/>
|
||||
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">Cluster Summary</div>
|
||||
<div class="panel-body">
|
||||
|
||||
<p>
|
||||
Security is {#nnstat}{#SecurityModeEnabled}on{:else}off{/SecurityModeEnabled}{/nnstat}.</p>
|
||||
<p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p>
|
||||
|
||||
<p>
|
||||
{#fs}
|
||||
{TotalLoad} files and directories, {BlocksTotal} blocks = {FilesTotal} total filesystem object(s).
|
||||
{#helper_fs_max_objects/}
|
||||
{/fs}
|
||||
</p>
|
||||
{#mem.HeapMemoryUsage}
|
||||
<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}. </p>
|
||||
{/mem.HeapMemoryUsage}
|
||||
|
||||
{#mem.NonHeapMemoryUsage}
|
||||
<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}. </p>
|
||||
{/mem.NonHeapMemoryUsage}
|
||||
|
||||
{#nn}
|
||||
<table class="table table-bordered table-striped">
|
||||
<tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
|
||||
<tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
|
||||
<tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
|
||||
<tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
|
||||
<tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
|
||||
<tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
|
||||
<tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
|
||||
<tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
|
||||
<tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
|
||||
<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td></tr>
|
||||
{/nn}
|
||||
|
||||
{#fs}
|
||||
<tr><th><a href="dfsnodelist.jsp?whatNodes=LIVE">Live Nodes</a></th><td>{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})</td></tr>
|
||||
<tr><th><a href="dfsnodelist.jsp?whatNodes=DEAD">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
|
||||
<tr><th><a href="dfsnodelist.jsp?whatNodes=DECOMMISSIONING">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
|
||||
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
|
||||
{/fs}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">NameNode Journal Status</div>
|
||||
<div class="panel-body">
|
||||
<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
|
||||
<table class="table" title="NameNode Journals">
|
||||
<thead>
|
||||
<tr><th>Journal Manager</th><th>State</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#nn.NameJournalStatus}
|
||||
<tr><td>{manager}</td><td>{stream}</td></tr>
|
||||
{/nn.NameJournalStatus}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">NameNode Storage</div>
|
||||
<div class="panel-body">
|
||||
<table class="table" title="NameNode Storage">
|
||||
<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>
|
||||
{#nn.NameDirStatuses}
|
||||
{#active}{#helper_dir_status type="Active"/}{/active}
|
||||
{#failed}{#helper_dir_status type="Failed"/}{/failed}
|
||||
{/nn.NameDirStatuses}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">Snapshot Summary</div>
|
||||
<div class="panel-body">
|
||||
{#fs.SnapshotStats}
|
||||
<table class="table" title="Snapshot Summary">
|
||||
<thead><tr><td><b>Snapshottable directories</b></td>
|
||||
<td><b>Snapshotted directories</b></td></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>{SnapshottableDirectories}</td>
|
||||
<td>{Snapshots}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
{/fs.SnapshotStats}
|
||||
</div>
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
{#startup}
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">Startup Progress</div>
|
||||
<div class="panel-body">
|
||||
<p>Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}</p>
|
||||
<table class="table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Phase</th>
|
||||
<th>Completion</th>
|
||||
<th>Elapsed Time</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{#phases}
|
||||
<tr class="phase">
|
||||
<td class="startupdesc">{desc} {file} {size|fmt_bytes}</td>
|
||||
<td>{percentComplete|fmt_percentage}</td>
|
||||
<td>{elapsedTime|fmt_time}</td>
|
||||
</tr>
|
||||
{#steps root_file=file}
|
||||
<tr class="step">
|
||||
<td class="startupdesc">{stepDesc} {stepFile} {stepSize|fmt_bytes} ({count}/{total})</td>
|
||||
<td>{percentComplete|fmt_percentage}</td>
|
||||
<td></td>
|
||||
</tr>
|
||||
{/steps}
|
||||
{/phases}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{/startup}
|
||||
|
||||
<hr/>
|
||||
<div class="panel panel-primary">
|
||||
<div class="panel-heading">Datanode Information</div>
|
||||
<div class="panel-body">
|
||||
<div class="panel panel-default" id="nodelist-operation">
|
||||
<div class="panel-heading">Nodes in operation</div>
|
||||
<div class="panel-body">
|
||||
<table class="table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>Last contact</th>
|
||||
<th>Admin State</th>
|
||||
<th>Capacity</th>
|
||||
<th>Used</th>
|
||||
<th>Non DFS Used</th>
|
||||
<th>Remaining</th>
|
||||
<th>Blocks</th>
|
||||
<th>Block pool used</th>
|
||||
<th>Failed Volumes</th>
|
||||
</tr>
|
||||
</thead>
|
||||
{#nn.LiveNodes}
|
||||
<tr>
|
||||
<td><a class="browse-dir-links" info-http-addr="{infoAddr}" info-https-addr="{infoSecureAddr}">{name}</a> ({xferaddr})</td>
|
||||
<td>{lastContact}</td>
|
||||
<td>{adminState}</td>
|
||||
<td>{capacity|fmt_bytes}</td>
|
||||
<td>{used|fmt_bytes}</td>
|
||||
<td>{nonDfsUsedSpace|fmt_bytes}</td>
|
||||
<td>{remaining|fmt_bytes}</td>
|
||||
<td>{numBlocks}</td>
|
||||
<td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
|
||||
<td>{volfails}</td>
|
||||
</tr>
|
||||
{/nn.LiveNodes}
|
||||
{#nn.DeadNodes}
|
||||
<tr class="danger">
|
||||
<td>{name} ({xferaddr})</td>
|
||||
<td>{lastContact}</td>
|
||||
<td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
{/nn.DeadNodes}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
<div class="panel panel-default" id="nodelist-decom">
|
||||
<div class="panel-heading">Nodes being decomissioned</div>
|
||||
<div class="panel-body">
|
||||
<table class="table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Node</th>
|
||||
<th>Last contact</th>
|
||||
<th>Under replicated blocks</th>
|
||||
<th>Blocks with no live replicas</th>
|
||||
<th>Under Replicated Blocks <br/>In files under construction</th>
|
||||
</tr>
|
||||
</thead>
|
||||
{#nn.DecomNodes}
|
||||
<tr>
|
||||
<td>{name} ({xferaddr})</td>
|
||||
<td>{lastContact}</td>
|
||||
<td>{underReplicatedBlocks}</td>
|
||||
<td>{decommissionOnlyReplicas}</td>
|
||||
<td>{underReplicateInOpenFiles}</td>
|
||||
</tr>
|
||||
{/nn.DecomNodes}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
>>>>>>> 801ea4f... Fix for dfshealth
|
@ -0,0 +1,43 @@
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<link rel="stylesheet" type="text/css" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
|
||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css" />
|
||||
<title>Namenode information</title>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="alert alert-danger" id="alert-panel" style="display:none">
|
||||
<button type="button" class="close" onclick="$('#alert-panel').hide();">×</button>
|
||||
<div class="alert-body" id="alert-panel-body"></div>
|
||||
</div>
|
||||
<div id="panel"></div>
|
||||
</div>
|
||||
<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js">
|
||||
</script><script type="text/javascript" src="//netdna.bootstrapcdn.com/bootstrap/3.0.0/js/bootstrap.min.js">
|
||||
</script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js">
|
||||
</script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js">
|
||||
</script><script type="text/javascript" src="dfs-dust.js">
|
||||
</script><script type="text/javascript" src="dfshealth.js">
|
||||
</script>
|
||||
<hr />
|
||||
<p><a href="http://hadoop.apache.org/core">Hadoop</a>, 2013.</p>
|
||||
</body>
|
||||
</html>
|
@ -0,0 +1,156 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
(function () {
|
||||
"use strict";
|
||||
|
||||
var data = {};
|
||||
function generate_browse_dn_link(info_http_addr, info_https_addr) {
|
||||
var is_https = window.location.protocol === 'https:';
|
||||
var authority = is_https ? info_https_addr : info_http_addr;
|
||||
|
||||
var nn_info_port = window.location.port;
|
||||
if (nn_info_port === "") {
|
||||
nn_info_port = is_https ? 443 : 80;
|
||||
}
|
||||
|
||||
var l = '//' + authority + '/browseDirectory.jsp?dir=%2F&namenodeInfoPort=' +
|
||||
nn_info_port + '&nnaddr=' + data.nnstat.HostAndPort;
|
||||
return l;
|
||||
}
|
||||
|
||||
function render() {
|
||||
var helpers = {
|
||||
'helper_fs_max_objects': function (chunk, ctx, bodies, params) {
|
||||
var o = ctx.current();
|
||||
if (o.MaxObjects > 0) {
|
||||
chunk.write('(' + Math.round((o.FilesTotal + o.BlockTotal) / o.MaxObjects * 100) * 100 + ')%');
|
||||
}
|
||||
},
|
||||
|
||||
'helper_dir_status': function (chunk, ctx, bodies, params) {
|
||||
var j = ctx.current();
|
||||
for (var i in j) {
|
||||
chunk.write('<tr><td>' + i + '</td><td>' + j[i] + '</td><td>' + params.type + '</td></tr>');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var base = dust.makeBase(helpers);
|
||||
|
||||
var TEMPLATES = [ { 'name': 'dfshealth', 'url': 'dfshealth.dust.html' } ];
|
||||
|
||||
load_templates(dust, TEMPLATES, function() {
|
||||
dust.render('dfshealth', base.push(data), function(err, out) {
|
||||
|
||||
$('#panel').append(out);
|
||||
|
||||
$('#browse-dir-first').click(function () {
|
||||
var len = data.nn.LiveNodes.length;
|
||||
if (len < 1) {
|
||||
show_err_msg('Cannot browse the DFS since there are no live nodes available.');
|
||||
return false;
|
||||
}
|
||||
|
||||
var dn = data.nn.LiveNodes[Math.floor(Math.random() * len)];
|
||||
window.location.href = generate_browse_dn_link(dn.infoAddr, dn.infoSecureAddr);
|
||||
});
|
||||
|
||||
$('.browse-dir-links').click(function () {
|
||||
var http_addr = $(this).attr('info-http-addr'), https_addr = $(this).attr('info-https-addr');
|
||||
window.location.href = generate_browse_dn_link(http_addr, https_addr);
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
show_err_msg('Failed to load the page.');
|
||||
});
|
||||
}
|
||||
|
||||
var BEANS = [
|
||||
{"name": "nn", "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"},
|
||||
{"name": "nnstat", "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
|
||||
{"name": "fs", "url": "/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState"},
|
||||
{"name": "mem", "url": "/jmx?qry=java.lang:type=Memory"},
|
||||
{"name": "startup", "url": "/startupProgress"}
|
||||
];
|
||||
|
||||
// Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
|
||||
function data_workaround(d) {
|
||||
function node_map_to_array(nodes) {
|
||||
var res = [];
|
||||
for (var n in nodes) {
|
||||
var p = nodes[n];
|
||||
p.name = n;
|
||||
res.push(p);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
function startup_progress_workaround(r) {
|
||||
function rename_property(o, s, d) {
|
||||
if (o[s] !== undefined) {
|
||||
o[d] = o[s];
|
||||
delete o[s];
|
||||
}
|
||||
}
|
||||
r.percentComplete *= 100;
|
||||
$.each(r.phases, function (idx, p) {
|
||||
p.percentComplete *= 100;
|
||||
$.each(p.steps, function (idx2, s) {
|
||||
s.percentComplete *= 100;
|
||||
// dust.js is confused by these optional keys in nested
|
||||
// structure, rename them
|
||||
rename_property(s, "desc", "stepDesc");
|
||||
rename_property(s, "file", "stepFile");
|
||||
rename_property(s, "size", "stepSize");
|
||||
});
|
||||
});
|
||||
return r;
|
||||
}
|
||||
|
||||
d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo);
|
||||
d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus);
|
||||
d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses);
|
||||
d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage);
|
||||
d.nn.LiveNodes = node_map_to_array(JSON.parse(d.nn.LiveNodes));
|
||||
d.nn.DeadNodes = node_map_to_array(JSON.parse(d.nn.DeadNodes));
|
||||
d.nn.DecomNodes = node_map_to_array(JSON.parse(d.nn.DecomNodes));
|
||||
d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles);
|
||||
|
||||
d.fs.SnapshotStats = JSON.parse(d.fs.SnapshotStats);
|
||||
d.startup = startup_progress_workaround(d.startup);
|
||||
return d;
|
||||
}
|
||||
|
||||
function show_err_msg(msg) {
|
||||
$('#alert-panel-body').html(msg);
|
||||
$('#alert-panel').show();
|
||||
}
|
||||
|
||||
load_json(
|
||||
BEANS,
|
||||
function(d) {
|
||||
for (var k in d) {
|
||||
data[k] = k === "startup" ? d[k] : d[k].beans[0];
|
||||
}
|
||||
data = data_workaround(data);
|
||||
render();
|
||||
},
|
||||
function (url, jqxhr, text, err) {
|
||||
show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
|
||||
});
|
||||
})();
|
1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js
vendored
Normal file
1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
8
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js
vendored
Normal file
8
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
(function(k){function n(b){b=b.f();return"object"===typeof b&&!0===b.h}function p(b,c){return"function"===typeof c?c.toString():c}function l(b,c,d,a,e){a=a||{};var m=d.a,g,f,l=a.d||"";if("undefined"!==typeof a.key)g=k.b.c(a.key,b,c);else if(n(c))g=c.f().i,c.f().g&&(e=function(){return!1});else return h.log("No key specified for filter in:"+l+" helper "),b;f=k.b.c(a.value,b,c);if(e(q(f,a.type,c),q(g,a.type,c))){n(c)&&(c.f().g=!0);if(m)return b.e(m,c);h.log("Missing body block in the "+l+" helper ")}else if(d["else"])return b.e(d["else"],
|
||||
c);return b}function q(b,c,d){if(b)switch(c||typeof b){case "number":return+b;case "string":return String(b);case "boolean":return Boolean("false"===b?!1:b);case "date":return new Date(b);case "context":return d.get(b)}return b}var h="undefined"!==typeof console?console:{log:function(){}};k.b={tap:function(b,c,d){var a=b;"function"===typeof b&&(!0===b.l?a=b():(a="",c.c(function(b){a+=b;return""}).e(b,d).p(),""===a&&(a=!1)));return a},sep:function(b,c,d){return c.stack.index===c.stack.m-1?b:d.a?d.a(b,
|
||||
c):b},idx:function(b,c,d){return d.a?d.a(b,c.push(c.stack.index)):b},contextDump:function(b,c,d,a){a=a||{};d=a.o||"output";a=a.key||"current";d=k.b.c(d,b,c);a=k.b.c(a,b,c);c="full"===a?JSON.stringify(c.stack,p,2):JSON.stringify(c.stack.head,p,2);return"console"===d?(h.log(c),b):b.write(c)},"if":function(b,c,d,a){var e=d.a,m=d["else"];if(a&&a.j){a=a.j;a=k.b.c(a,b,c);if(eval(a)){if(e)return b.e(d.a,c);h.log("Missing body block in the if helper!");return b}if(m)return b.e(d["else"],c)}else h.log("No condition given in the if helper!");
|
||||
return b},math:function(b,c,d,a){if(a&&"undefined"!==typeof a.key&&a.method){var e=a.key,m=a.method,g=a.n;a=a.round;var f=null,e=k.b.c(e,b,c),g=k.b.c(g,b,c);switch(m){case "mod":0!==g&&-0!==g||h.log("operand for divide operation is 0/-0: expect Nan!");f=parseFloat(e)%parseFloat(g);break;case "add":f=parseFloat(e)+parseFloat(g);break;case "subtract":f=parseFloat(e)-parseFloat(g);break;case "multiply":f=parseFloat(e)*parseFloat(g);break;case "divide":0!==g&&-0!==g||h.log("operand for divide operation is 0/-0: expect Nan/Infinity!");
|
||||
f=parseFloat(e)/parseFloat(g);break;case "ceil":f=Math.ceil(parseFloat(e));break;case "floor":f=Math.floor(parseFloat(e));break;case "round":f=Math.round(parseFloat(e));break;case "abs":f=Math.abs(parseFloat(e));break;default:h.log("method passed is not supported")}if(null!==f)return a&&(f=Math.round(f)),d&&d.a?b.e(d.a,c.push({h:!0,g:!1,i:f})):b.write(f)}else h.log("Key is a required parameter for math helper along with method/operand!");return b},select:function(b,c,d,a){var e=d.a;if(a&&"undefined"!==
|
||||
typeof a.key){a=k.b.c(a.key,b,c);if(e)return b.e(d.a,c.push({h:!0,g:!1,i:a}));h.log("Missing body block in the select helper ")}else h.log("No key given in the select helper!");return b},eq:function(b,c,d,a){a&&(a.d="eq");return l(b,c,d,a,function(a,b){return b===a})},ne:function(b,c,d,a){return a?(a.d="ne",l(b,c,d,a,function(a,b){return b!==a})):b},lt:function(b,c,d,a){if(a)return a.d="lt",l(b,c,d,a,function(a,b){return b<a})},lte:function(b,c,d,a){return a?(a.d="lte",l(b,c,d,a,function(a,b){return b<=
|
||||
a})):b},gt:function(b,c,d,a){return a?(a.d="gt",l(b,c,d,a,function(a,b){return b>a})):b},gte:function(b,c,d,a){return a?(a.d="gte",l(b,c,d,a,function(a,b){return b>=a})):b},"default":function(b,c,d,a){a&&(a.d="default");return l(b,c,d,a,function(){return!0})},size:function(b,c,d,a){c=0;var e;a=a||{};if((a=a.key)&&!0!==a)if(k.isArray(a))c=a.length;else if(!isNaN(parseFloat(a))&&isFinite(a))c=a;else if("object"===typeof a)for(e in c=0,a)Object.hasOwnProperty.call(a,e)&&c++;else c=(a+"").length;else c=
|
||||
0;return b.write(c)}}})("undefined"!==typeof exports?module.k=require("dustjs-linkedin"):dust);
|
Loading…
Reference in New Issue
Block a user