From 9659d0720dc1a6ec510ca7362bfc9f8bea7811e1 Mon Sep 17 00:00:00 2001 From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com> Date: Mon, 3 Jul 2023 23:41:37 +0800 Subject: [PATCH] HDFS-17065. Fix typos in hadoop-hdfs-project (#5796) --- .../hadoop/hdfs/server/federation/metrics/RBFMetrics.java | 2 +- .../org/apache/hadoop/hdfs/server/datanode/DataXceiver.java | 4 ++-- .../apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 2 +- .../hadoop/hdfs/TestClientProtocolForPipelineRecovery.java | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java index 41b58c4a16..0a28688c91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/RBFMetrics.java @@ -874,7 +874,7 @@ private BigInteger getNameserviceAggregatedBigInt( /** * Fetches the most active namenode memberships for all known nameservices. - * The fetched membership may not or may not be active. Excludes expired + * The fetched membership may or may not be active. Excludes expired * memberships. * @throws IOException if the query could not be performed. * @return List of the most active NNs from each known nameservice. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index ab706fb173..d8c55a54d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -186,8 +186,8 @@ public void sendOOB() throws IOException, InterruptedException { if (br == null) { return; } - // This doesn't need to be in a critical section. Althogh the client - // can resue the connection to issue a different request, trying sending + // This doesn't need to be in a critical section. Although the client + // can reuse the connection to issue a different request, trying sending // an OOB through the recently closed block receiver is harmless. LOG.info("Sending OOB to peer: {}", peer); br.sendOOB(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 7b7f4a0f9c..0d3cb89ac0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -624,7 +624,7 @@ private static FileState analyzeFileState( // timeout, or because of an HA failover. In that case, we know // by the fact that the client is re-issuing the RPC that it // never began to write to the old block. Hence it is safe to - // to return the existing block. + // return the existing block. // 3) This is an entirely bogus request/bug -- we should error out // rather than potentially appending a new block with an empty // one in the middle, etc diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 70aa9d7d63..9962e8c9de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -327,7 +327,7 @@ public void testPipelineRecoveryOnOOB() throws Exception { // Wait long enough to receive an OOB ack before closing the file. GenericTestUtils.waitForThreadTermination( "Async datanode shutdown thread", 100, 10000); - // Retart the datanode + // Restart the datanode cluster.restartDataNode(0, true); // The following forces a data packet and end of block packets to be sent. out.close();