diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 5783f9074a..aabd6fd231 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1052,7 +1052,7 @@ void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block, reader.getNetworkDistance(), nread); if (nread != len) { throw new IOException("truncated return from reader.read(): " + - "excpected " + len + ", got " + nread); + "expected " + len + ", got " + nread); } DFSClientFaultInjector.get().readFromDatanodeDelay(); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java index 51ad08fc95..db064e4019 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java @@ -57,11 +57,11 @@ public long retrieve(long record) { public long combine(long value, long record) { if (value < MIN) { throw new IllegalArgumentException( - "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN); + "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN); } if (value > MAX) { throw new IllegalArgumentException( - "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX); + "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX); } return (record & ~MASK) | (value << OFFSET); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java index e3759ce787..fdca64e5db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java @@ -38,7 +38,7 @@ public static enum ERROR implements XException.ERROR { S04("Service [{0}] does not implement declared interface [{1}]"), S05("[{0}] is not a file"), S06("Could not load file [{0}], {1}"), - S07("Could not instanciate service class [{0}], {1}"), + S07("Could not instantiate service class [{0}], {1}"), S08("Could not load service classes, {0}"), S09("Could not set service [{0}] programmatically -server shutting down-, {1}"), S10("Service [{0}] requires service [{1}]"), diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index e26fac5ab8..2d8f676f2d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -130,7 +130,7 @@ boolean put(FileHandle h, OpenFileCtx context) { } toEvict = openFileMap.remove(pairs.getKey()); Preconditions.checkState(toEvict == pairs.getValue(), - "The deleted entry is not the same as odlest found."); + "The deleted entry is not the same as oldest found."); } } openFileMap.put(h, context); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 23166e26f1..46646997d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1364,7 +1364,7 @@ public static HttpConfig.Policy getHttpPolicy(Configuration conf) { DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); if (policy == null) { - throw new HadoopIllegalArgumentException("Unregonized value '" + throw new HadoopIllegalArgumentException("Unrecognized value '" + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e60703be4a..05d538aa86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2772,7 +2772,7 @@ private void processQueuedMessages(Iterable rbis) throws IOException { for (ReportedBlockInfo rbi : rbis) { if (LOG.isDebugEnabled()) { - LOG.debug("Processing previouly queued message " + rbi); + LOG.debug("Processing previously queued message " + rbi); } if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 8323140e4d..69dc9f9d86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -836,7 +836,7 @@ private boolean readReplicasFromCache(ReplicaMap volumeMap, } catch (Exception e) { // Any exception we need to revert back to read from disk // Log the error and return false - LOG.info("Exception occured while reading the replicas cache file: " + LOG.info("Exception occurred while reading the replicas cache file: " + replicaFile.getPath(), e ); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 08564de689..d41f9c379d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -1292,7 +1292,7 @@ private LinkedList compileReport(File bpFinalizedDir, try { fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE); } catch (IOException ioe) { - LOG.warn("Exception occured while compiling report: ", ioe); + LOG.warn("Exception occurred while compiling report: ", ioe); // Initiate a check on disk failure. dataset.datanode.checkDiskErrorAsync(); // Ignore this directory and proceed. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index a8adcbd562..9e60e48bdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -97,7 +97,7 @@ public void printHelp() { String header = "Query Plan queries a given data node about the " + "current state of disk balancer execution.\n\n"; - String footer = "\nQuery command retrievs the plan ID and the current " + + String footer = "\nQuery command retrieves the plan ID and the current " + "running state. "; HelpFormatter helpFormatter = new HelpFormatter(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index f2a1ee5254..41ec8e9513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -134,7 +134,7 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, if (!onBlockBoundary) { // Open file for write, but don't log into edits long lastBlockDelta = file.computeFileSize() - newLength; - assert lastBlockDelta > 0 : "delta is 0 only if on block bounday"; + assert lastBlockDelta > 0 : "delta is 0 only if on block boundary"; truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 2990344958..11cdbc63e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -1082,7 +1082,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock( fsDir, path, iip, file, oldBlock); if (!removed && !(op instanceof UpdateBlocksOp)) { - throw new IOException("Trying to delete non-existant block " + oldBlock); + throw new IOException("Trying to delete non-existent block " + oldBlock); } } else if (newBlocks.length > oldBlocks.length) { final boolean isStriped = ecPolicy != null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index d6dd8eead7..f79130db7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -1025,7 +1025,7 @@ private void setBlockPoolID(File storage, String bpid) if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) { throw new InconsistentFSStateException(storage, - "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID); + "Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID); } setBlockPoolID(bpid); } @@ -1154,4 +1154,4 @@ public void writeAll() throws IOException { } } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 107d4ed081..2c31cd9e99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -219,7 +219,7 @@ static DatanodeInfo chooseDatanode(final NameNode namenode, final String remoteAddr) throws IOException { FSNamesystem fsn = namenode.getNamesystem(); if (fsn == null) { - throw new IOException("Namesystem has not been intialized yet."); + throw new IOException("Namesystem has not been initialized yet."); } final BlockManager bm = fsn.getBlockManager();