From 3ca66e24c879e38b8664fb144b427d6eb02753cc Mon Sep 17 00:00:00 2001 From: zeekling Date: Tue, 7 Nov 2023 15:48:59 +0000 Subject: [PATCH] =?UTF-8?q?namenode=E5=90=AF=E5=8A=A8=E6=BA=90=E7=A0=81?= =?UTF-8?q?=E5=88=86=E6=9E=90=20(#12)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-on: https://git.zeekling.cn/big-data/hadoop_book/pulls/12 --- hdfs/nameNode启动过程.md | 72 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 67 insertions(+), 5 deletions(-) diff --git a/hdfs/nameNode启动过程.md b/hdfs/nameNode启动过程.md index cae11ca..023bbd2 100644 --- a/hdfs/nameNode启动过程.md +++ b/hdfs/nameNode启动过程.md @@ -160,11 +160,6 @@ private void startCommonServices(Configuration conf) throws IOException { LOG.warn("ServicePlugin " + p + " could not be started", t); } } - LOG.info(getRole() + " RPC up at: " + getNameNodeAddress()); - if (rpcServer.getServiceRpcAddress() != null) { - LOG.info(getRole() + " service RPC up at: " - + rpcServer.getServiceRpcAddress()); - } } ``` @@ -233,6 +228,73 @@ public void activate(Configuration conf, long blockTotal) { ``` +### datanodeManager.activate + +datanodeManager 启动。 + +```java +void activate(final Configuration conf) { + datanodeAdminManager.activate(conf); + // beartbeat 启动。 + heartbeatManager.activate(); +} +``` + +### datanodeAdminManager.activate + +```java +void activate(Configuration conf) { + final int intervalSecs = (int) conf.getTimeDuration( + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT, + TimeUnit.SECONDS); + checkArgument(intervalSecs >= 0, "Cannot set a negative " + + "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY); + + int blocksPerInterval = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT); + + final String deprecatedKey = "dfs.namenode.decommission.nodes.per.interval"; + final String strNodes = conf.get(deprecatedKey); + if (strNodes != null) { + LOG.warn("Deprecated configuration key {} will be ignored.", deprecatedKey); + LOG.warn("Please update your configuration to use {} instead.", + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); + } + + checkArgument(blocksPerInterval > 0, + "Must set a positive value for " + + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); + + final int maxConcurrentTrackedNodes = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, + DFSConfigKeys + .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); + checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " + + "value for " + + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES); + + Class cls = null; + try { + cls = conf.getClass( + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MONITOR_CLASS, + DatanodeAdminDefaultMonitor.class); + monitor = + (DatanodeAdminMonitorInterface)ReflectionUtils.newInstance(cls, conf); + monitor.setBlockManager(blockManager); + monitor.setNameSystem(namesystem); + monitor.setDatanodeAdminManager(this); + } catch (Exception e) { + throw new RuntimeException("Unable to create the Decommission monitor " + + "from "+cls, e); + } + executor.scheduleWithFixedDelay(monitor, intervalSecs, intervalSecs, + TimeUnit.SECONDS); + } +``` + + 详细参见: ![pic](https://pan.zeekling.cn/zeekling/hadoop/nn_0010.png)