HDFS-17559. Fix the uuid as null in NameNodeMXBean (#6906). Contributed by Haiyang Hu.

Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
huhaiyang 2024-07-06 15:46:25 +08:00 committed by GitHub
parent 25e28b41cc
commit 5a8f70a72e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 69 additions and 4 deletions

View File

@ -97,6 +97,7 @@
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.text.CaseUtils; import org.apache.commons.text.CaseUtils;
@ -6674,7 +6675,7 @@ public String getLiveNodes() {
.put("infoSecureAddr", node.getInfoSecureAddr()) .put("infoSecureAddr", node.getInfoSecureAddr())
.put("xferaddr", node.getXferAddr()) .put("xferaddr", node.getXferAddr())
.put("location", node.getNetworkLocation()) .put("location", node.getNetworkLocation())
.put("uuid", node.getDatanodeUuid()) .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""))
.put("lastContact", getLastContact(node)) .put("lastContact", getLastContact(node))
.put("usedSpace", getDfsUsed(node)) .put("usedSpace", getDfsUsed(node))
.put("adminState", node.getAdminState().toString()) .put("adminState", node.getAdminState().toString())
@ -6728,7 +6729,7 @@ public String getDeadNodes() {
.put("adminState", node.getAdminState().toString()) .put("adminState", node.getAdminState().toString())
.put("xferaddr", node.getXferAddr()) .put("xferaddr", node.getXferAddr())
.put("location", node.getNetworkLocation()) .put("location", node.getNetworkLocation())
.put("uuid", node.getDatanodeUuid()) .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""))
.build(); .build();
info.put(node.getXferAddrWithHostname(), innerinfo); info.put(node.getXferAddrWithHostname(), innerinfo);
} }
@ -6751,7 +6752,7 @@ public String getDecomNodes() {
.<String, Object> builder() .<String, Object> builder()
.put("xferaddr", node.getXferAddr()) .put("xferaddr", node.getXferAddr())
.put("location", node.getNetworkLocation()) .put("location", node.getNetworkLocation())
.put("uuid", node.getDatanodeUuid()) .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""))
.put("underReplicatedBlocks", .put("underReplicatedBlocks",
node.getLeavingServiceStatus().getUnderReplicatedBlocks()) node.getLeavingServiceStatus().getUnderReplicatedBlocks())
.put("decommissionOnlyReplicas", .put("decommissionOnlyReplicas",
@ -6782,7 +6783,7 @@ public String getEnteringMaintenanceNodes() {
.<String, Object> builder() .<String, Object> builder()
.put("xferaddr", node.getXferAddr()) .put("xferaddr", node.getXferAddr())
.put("location", node.getNetworkLocation()) .put("location", node.getNetworkLocation())
.put("uuid", node.getDatanodeUuid()) .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""))
.put("underReplicatedBlocks", .put("underReplicatedBlocks",
node.getLeavingServiceStatus().getUnderReplicatedBlocks()) node.getLeavingServiceStatus().getUnderReplicatedBlocks())
.put("maintenanceOnlyReplicas", .put("maintenanceOnlyReplicas",

View File

@ -18,7 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Optional;
import java.util.function.Supplier; import java.util.function.Supplier;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -1132,6 +1136,66 @@ public void testTotalBlocksMetrics() throws Exception {
} }
} }
@SuppressWarnings({ "unchecked" })
@Test
public void testDeadNodesInNameNodeMXBean() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
try {
cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
List<String> hosts = new ArrayList<>();
for(DataNode dn : cluster.getDataNodes()) {
hosts.add(dn.getDisplayName());
}
DatanodeDescriptor mockNode = new DatanodeDescriptor(
new DatanodeID("127.0.0.2", "127.0.0.2", "",
5000, 5001, 5002, 5003));
assertEquals("", Optional.ofNullable(mockNode.getDatanodeUuid()).orElse(""));
hosts.add(mockNode.getXferAddrWithHostname());
hostsFileWriter.initIncludeHosts(hosts.toArray(
new String[hosts.size()]));
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
DatanodeManager dm = cluster.getNameNode().getNamesystem().
getBlockManager().getDatanodeManager();
LOG.info("Get all include nodes: {}", dm.getHostConfigManager().getIncludes());
// get attribute DeadNodes
String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadNodeInfo);
LOG.info("Get deadNode info: {}", deadNodeInfo);
Map<String, Map<String, Object>> deadNodes =
(Map<String, Map<String, Object>>) JSON.parse(deadNodeInfo);
assertEquals(1, deadNodes.size());
for (Map<String, Object> deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("adminState"));
assertTrue(deadNode.containsKey("xferaddr"));
assertEquals("", deadNode.get("uuid"));
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
void verifyTotalBlocksMetrics(long expectedTotalReplicatedBlocks, void verifyTotalBlocksMetrics(long expectedTotalReplicatedBlocks,
long expectedTotalECBlockGroups, long actualTotalBlocks) long expectedTotalECBlockGroups, long actualTotalBlocks)
throws Exception { throws Exception {