diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 800a1b5fa8..2828a58469 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -385,6 +385,8 @@ Release 2.0.4-beta - UNRELEASED
HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
(Colin Patrick McCabe via atm)
+ HADOOP-9323. Fix typos in API documentation. (suresh)
+
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 42ee870268..2a8db698d4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -21,8 +21,6 @@
import java.io.*;
import java.util.Arrays;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -32,7 +30,7 @@
/****************************************************************
* Abstract Checksumed FileSystem.
- * It provide a basice implementation of a Checksumed FileSystem,
+ * It provide a basic implementation of a Checksumed FileSystem,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
*
@@ -118,9 +116,6 @@ private int getSumBufferSize(int bytesPerSum, int bufferSize) {
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
- public static final Log LOG
- = LogFactory.getLog(FSInputChecker.class);
-
private ChecksumFileSystem fs;
private FSDataInputStream datas;
private FSDataInputStream sums;
@@ -374,19 +369,6 @@ private static class ChecksumFSOutputSummer extends FSOutputSummer {
private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f;
- public ChecksumFSOutputSummer(ChecksumFileSystem fs,
- Path file,
- boolean overwrite,
- short replication,
- long blockSize,
- Configuration conf)
- throws IOException {
- this(fs, file, overwrite,
- conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
- LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
- replication, blockSize, null);
- }
-
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index d4ff03785c..26f50503fe 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -1326,7 +1326,7 @@ public FsStatus next(final AbstractFileSystem fs, final Path p)
*
* 2. Partially qualified URIs (eg scheme but no host)
*
- * fs:///A/B/file Resolved according to the target file sytem. Eg resolving
+ * fs:///A/B/file Resolved according to the target file system. Eg resolving
* a symlink to hdfs:///A results in an exception because
* HDFS URIs must be fully qualified, while a symlink to
* file:///A will not since Hadoop's local file systems
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 00a54f70cd..a26d357058 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1864,7 +1864,7 @@ public Path getHomeDirectory() {
*
* Some file systems like LocalFileSystem have an initial workingDir
* that we use as the starting workingDir. For other file systems
- * like HDFS there is no built in notion of an inital workingDir.
+ * like HDFS there is no built in notion of an initial workingDir.
*
* @return if there is built in notion of workingDir then it
* is returned; else a null is returned.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
index a79157b65d..a2384cd8b0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
@@ -43,7 +43,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException;
/**
- * Read number of bytes equalt to the length of the buffer, from a given
+ * Read number of bytes equal to the length of the buffer, from a given
* position within a file. This does not
* change the current offset of a file, and is thread-safe.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
index a168f7012e..eab83b3ca3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
@@ -79,19 +79,17 @@ public abstract class TrashPolicy extends Configured {
/**
* Get an instance of the configured TrashPolicy based on the value
- * of the configuration paramater fs.trash.classname.
+ * of the configuration parameter fs.trash.classname.
*
* @param conf the configuration to be used
* @param fs the file system to be used
* @param home the home directory
* @return an instance of TrashPolicy
*/
- public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home)
- throws IOException {
- Class extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname",
- TrashPolicyDefault.class,
- TrashPolicy.class);
- TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
+ public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
+ Class extends TrashPolicy> trashClass = conf.getClass(
+ "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+ TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs, home); // initialize TrashPolicy
return trash;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
index 7e42a36cb7..155df3a34c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java
@@ -27,7 +27,7 @@
/**
* A byte sequence that is usable as a key or value.
- * It is resizable and distinguishes between the size of the seqeunce and
+ * It is resizable and distinguishes between the size of the sequence and
* the current capacity. The hash function is the front of the md5 of the
* buffer. The sort order is the same as memcmp.
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 95fb174a9d..a5c8b1ecd5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -128,7 +128,7 @@ public int getLength() {
/**
* Returns the Unicode Scalar Value (32-bit integer value)
* for the character at position
. Note that this
- * method avoids using the converter or doing String instatiation
+ * method avoids using the converter or doing String instantiation
* @return the Unicode scalar value at position or -1
* if the position is invalid or points to a
* trailing byte
@@ -527,7 +527,7 @@ public static void validateUTF8(byte[] utf8, int start, int len)
int length = 0;
int state = LEAD_BYTE;
while (count < start+len) {
- int aByte = ((int) utf8[count] & 0xFF);
+ int aByte = utf8[count] & 0xFF;
switch (state) {
case LEAD_BYTE:
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java
index eb569271d2..50cc1a1912 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java
@@ -192,7 +192,7 @@ public int hashCode() {
int hash = 1;
byte[] b = this.get();
for (int i = 0; i < count; i++)
- hash = (31 * hash) + (int)b[i];
+ hash = (31 * hash) + b[i];
return hash;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java
index b2f9f349dd..503ea35f79 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.classification.InterfaceStability;
/**
- * Interface that alll the serializers have to implement.
+ * Interface that all the serializers have to implement.
*
* @deprecated Replaced by Avro.
*/