From affa2ee36c81c5146fbfea0f750c411c5350fe6a Mon Sep 17 00:00:00 2001 From: zeekling Date: Fri, 28 Jun 2024 21:27:15 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 + .../hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 5 ++++- .../apache/hadoop/hdfs/server/namenode/FSImageFormat.java | 1 + .../hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java | 2 ++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index e3f4bfcde8..8063d74518 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1121,6 +1121,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = HdfsClientConfigKeys .DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT; + // namenode 启动的keytab public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file"; public static final String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 07381fc696..4ff2987786 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1772,7 +1772,10 @@ private void addCacheCommands(String blockPoolId, DatanodeDescriptor nodeinfo, } } - /** Handle heartbeat from datanodes. */ + /** + * Handle heartbeat from datanodes. + * 万恶的heartbeat,啥都干。。。 + * */ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, final String blockPoolId, long cacheCapacity, long cacheUsed, int xceiverCount, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 478cec55d0..5441fc3706 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -225,6 +225,7 @@ public void load(File file, boolean requireSameLayoutVersion) FSImageFormatProtobuf.Loader loader = new FSImageFormatProtobuf.Loader( conf, fsn, requireSameLayoutVersion); impl = loader; + // 开始加载fsimage loader.load(file); } else { Loader loader = new Loader(conf, fsn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 183449f574..d084b10bd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -360,6 +360,7 @@ private void loadInternal(RandomAccessFile raFile, FileInputStream fin) if (!FSImageUtil.checkFileFormat(raFile)) { throw new IOException("Unrecognized file format"); } + // 加载summary FileSummary summary = FSImageUtil.loadSummary(raFile); if (requireSameLayoutVersion && summary.getLayoutVersion() != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { @@ -399,6 +400,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { * a particular step to be started for once. */ Step currentStep = null; + // 是否开启并发加载 boolean loadInParallel = enableParallelSaveAndLoad(conf); ExecutorService executorService = null;