diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 028252460b..77555f800d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -814,7 +814,7 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler, attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug); if (readCount < count) { - LOG.info("Partical read. Asked offset: " + offset + " count: " + count + LOG.info("Partial read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 80d373644e..f869008b3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -201,7 +201,7 @@ public void run() { + " " + bpid + " on volume " + v + ": " + timeTaken + "ms"); } catch (ClosedChannelException e) { FsDatasetImpl.LOG.info("The volume " + v + " is closed while " + - "addng replicas, ignored."); + "adding replicas, ignored."); } catch (IOException ioe) { FsDatasetImpl.LOG.info("Caught exception while adding replicas " + "from " + v + ". Will throw later.", ioe); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 340d491b36..c913c23706 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -326,7 +326,7 @@ public int getWriteHoldCount() { int threshold = conf.getInt( DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY, DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT); - NameNode.LOG.info("Caching file names occuring more than " + threshold + NameNode.LOG.info("Caching file names occurring more than " + threshold + " times"); nameCache = new NameCache(threshold); namesystem = ns; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index ac7d1468c5..5135838d8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -245,7 +245,7 @@ void attemptRestoreRemovedStorage() { /* We don't want more than one thread trying to restore at a time */ synchronized (this.restorationLock) { LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+ - "storarge. removedStorages size = " + removedStorageDirs.size()); + "storage. removedStorages size = " + removedStorageDirs.size()); for (StorageDirectory sd : this.removedStorageDirs) { File root = sd.getRoot(); LOG.info("currently disabled dir " + root.getAbsolutePath() +