diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index f90939acab..aeed5e678a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -476,6 +477,7 @@ private String getNodesImpl(final DatanodeReportType type) { innerinfo.put("infoSecureAddr", node.getInfoSecureAddr()); innerinfo.put("xferaddr", node.getXferAddr()); innerinfo.put("location", node.getNetworkLocation()); + innerinfo.put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse("")); innerinfo.put("lastContact", getLastContact(node)); innerinfo.put("usedSpace", node.getDfsUsed()); innerinfo.put("adminState", node.getAdminState().toString()); @@ -492,6 +494,7 @@ private String getNodesImpl(final DatanodeReportType type) { innerinfo.put("volfails", -1); // node.getVolumeFailures() innerinfo.put("blockPoolUsedPercentStdDev", Util.getBlockPoolUsedPercentStdDev(storageReports)); + innerinfo.put("lastBlockReport", getLastBlockReport(node)); info.put(node.getXferAddrWithHostname(), Collections.unmodifiableMap(innerinfo)); } @@ -795,6 +798,10 @@ private long getLastContact(DatanodeInfo node) { return (now() - node.getLastUpdate()) / 1000; } + private long getLastBlockReport(DatanodeInfo node) { + return (now() - node.getLastBlockReportTime()) / 60000; + } + ///////////////////////////////////////////////////////// // NameNodeStatusMXBean ///////////////////////////////////////////////////////// diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java index c84dd2ceb2..7edb549233 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java @@ -135,6 +135,8 @@ import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.LambdaTestUtils; + +import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.After; @@ -1880,6 +1882,22 @@ public void testNamenodeMetrics() throws Exception { JSONObject jsonObject = new JSONObject(jsonString0); assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length()); + JSONObject jsonObjectNn = + new JSONObject(cluster.getRandomNamenode().getNamenode().getNamesystem().getLiveNodes()); + // DN report by NN and router should be the same + String randomDn = (String) jsonObjectNn.names().get(0); + JSONObject randomReportNn = jsonObjectNn.getJSONObject(randomDn); + JSONObject randomReportRouter = jsonObject.getJSONObject(randomDn); + JSONArray keys = randomReportNn.names(); + for (int i = 0; i < keys.length(); i++) { + String key = keys.getString(i); + // Skip the 2 keys that always return -1 + if (key.equals("blockScheduled") || key.equals("volfails")) { + continue; + } + assertEquals(randomReportRouter.get(key), randomReportNn.get(key)); + } + // We should be caching this information String jsonString1 = metrics.getLiveNodes(); assertEquals(jsonString0, jsonString1);