HDFS-6722. Display readable last contact time for dead nodes on NN webUI. Contributed by Ming Ma.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1616669 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Haohui Mai 2014-08-08 04:27:47 +00:00
parent 17e4365756
commit 2ac640ec75
4 changed files with 56 additions and 5 deletions

View File

@ -376,6 +376,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
Xu via atm) Xu via atm)
HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
(Ming Ma via wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -281,7 +281,7 @@
{#DeadNodes} {#DeadNodes}
<tr class="danger"> <tr class="danger">
<td>{name} ({xferaddr})</td> <td>{name} ({xferaddr})</td>
<td>{lastContact}</td> <td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
<td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td> <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
<td>-</td> <td>-</td>
<td>-</td> <td>-</td>

View File

@ -139,6 +139,14 @@
} }
function load_datanode_info() { function load_datanode_info() {
var HELPERS = {
'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
return chunk.write('' + new Date(Date.now()-1000*Number(value)));
}
};
function workaround(r) { function workaround(r) {
function node_map_to_array(nodes) { function node_map_to_array(nodes) {
var res = []; var res = [];
@ -160,7 +168,8 @@
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', '/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
guard_with_startup_progress(function (resp) { guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]); var data = workaround(resp.beans[0]);
dust.render('datanode-info', data, function(err, out) { var base = dust.makeBase(HELPERS);
dust.render('datanode-info', base.push(data), function(err, out) {
$('#tab-datanode').html(out); $('#tab-datanode').html(out);
$('#ui-tabs a[href="#tab-datanode"]').tab('show'); $('#ui-tabs a[href="#tab-datanode"]').tab('show');
}); });

View File

@ -30,9 +30,13 @@
import javax.management.ObjectName; import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
@ -58,11 +62,14 @@ public class TestNameNodeMXBean {
public void testNameNodeMXBeanInfo() throws Exception { public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive(); cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem; FSNamesystem fsn = cluster.getNameNode().namesystem;
@ -70,6 +77,29 @@ public void testNameNodeMXBeanInfo() throws Exception {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName( ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo"); "Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir,
"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getNumDatanodesInService() != 2) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {}
}
// get attribute "ClusterId" // get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId"); String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId); assertEquals(fsn.getClusterId(), clusterId);
@ -121,6 +151,15 @@ public void testNameNodeMXBeanInfo() throws Exception {
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes")); "DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo); assertEquals(fsn.getDeadNodes(), deadnodeinfo);
Map<String, Map<String, Object>> deadNodes =
(Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for (Map<String, Object> deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
// get attribute NodeUsage // get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName, String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage")); "NodeUsage"));
@ -181,7 +220,7 @@ public void testNameNodeMXBeanInfo() throws Exception {
assertEquals(1, statusMap.get("active").size()); assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size()); assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed")); assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
cluster.getDataNodes().size(), cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity")); mbs.getAttribute(mxbeanName, "CacheCapacity"));
} finally { } finally {