HADOOP-9323. Fix typos in API documentation. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1449977 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-02-26 00:10:35 +00:00
parent 63522b3224
commit 4840775e3d
10 changed files with 16 additions and 34 deletions

View File

@ -385,6 +385,8 @@ Release 2.0.4-beta - UNRELEASED
HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE. HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
(Colin Patrick McCabe via atm) (Colin Patrick McCabe via atm)
HADOOP-9323. Fix typos in API documentation. (suresh)
Release 2.0.3-alpha - 2013-02-06 Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -21,8 +21,6 @@
import java.io.*; import java.io.*;
import java.util.Arrays; import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -32,7 +30,7 @@
/**************************************************************** /****************************************************************
* Abstract Checksumed FileSystem. * Abstract Checksumed FileSystem.
* It provide a basice implementation of a Checksumed FileSystem, * It provide a basic implementation of a Checksumed FileSystem,
* which creates a checksum file for each raw file. * which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side. * It generates & verifies checksums at the client side.
* *
@ -118,9 +116,6 @@ private int getSumBufferSize(int bytesPerSum, int bufferSize) {
* It verifies that data matches checksums. * It verifies that data matches checksums.
*******************************************************/ *******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker { private static class ChecksumFSInputChecker extends FSInputChecker {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
private ChecksumFileSystem fs; private ChecksumFileSystem fs;
private FSDataInputStream datas; private FSDataInputStream datas;
private FSDataInputStream sums; private FSDataInputStream sums;
@ -374,19 +369,6 @@ private static class ChecksumFSOutputSummer extends FSOutputSummer {
private FSDataOutputStream sums; private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f; private static final float CHKSUM_AS_FRACTION = 0.01f;
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
short replication,
long blockSize,
Configuration conf)
throws IOException {
this(fs, file, overwrite,
conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
replication, blockSize, null);
}
public ChecksumFSOutputSummer(ChecksumFileSystem fs, public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file, Path file,
boolean overwrite, boolean overwrite,

View File

@ -1326,7 +1326,7 @@ public FsStatus next(final AbstractFileSystem fs, final Path p)
* *
* 2. Partially qualified URIs (eg scheme but no host) * 2. Partially qualified URIs (eg scheme but no host)
* *
* fs:///A/B/file Resolved according to the target file sytem. Eg resolving * fs:///A/B/file Resolved according to the target file system. Eg resolving
* a symlink to hdfs:///A results in an exception because * a symlink to hdfs:///A results in an exception because
* HDFS URIs must be fully qualified, while a symlink to * HDFS URIs must be fully qualified, while a symlink to
* file:///A will not since Hadoop's local file systems * file:///A will not since Hadoop's local file systems

View File

@ -1864,7 +1864,7 @@ public Path getHomeDirectory() {
* *
* Some file systems like LocalFileSystem have an initial workingDir * Some file systems like LocalFileSystem have an initial workingDir
* that we use as the starting workingDir. For other file systems * that we use as the starting workingDir. For other file systems
* like HDFS there is no built in notion of an inital workingDir. * like HDFS there is no built in notion of an initial workingDir.
* *
* @return if there is built in notion of workingDir then it * @return if there is built in notion of workingDir then it
* is returned; else a null is returned. * is returned; else a null is returned.

View File

@ -43,7 +43,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException; throws IOException;
/** /**
* Read number of bytes equalt to the length of the buffer, from a given * Read number of bytes equal to the length of the buffer, from a given
* position within a file. This does not * position within a file. This does not
* change the current offset of a file, and is thread-safe. * change the current offset of a file, and is thread-safe.
*/ */

View File

@ -79,19 +79,17 @@ public abstract class TrashPolicy extends Configured {
/** /**
* Get an instance of the configured TrashPolicy based on the value * Get an instance of the configured TrashPolicy based on the value
* of the configuration paramater fs.trash.classname. * of the configuration parameter fs.trash.classname.
* *
* @param conf the configuration to be used * @param conf the configuration to be used
* @param fs the file system to be used * @param fs the file system to be used
* @param home the home directory * @param home the home directory
* @return an instance of TrashPolicy * @return an instance of TrashPolicy
*/ */
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
throws IOException { Class<? extends TrashPolicy> trashClass = conf.getClass(
Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname", "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
TrashPolicyDefault.class, TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
TrashPolicy.class);
TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs, home); // initialize TrashPolicy trash.initialize(conf, fs, home); // initialize TrashPolicy
return trash; return trash;
} }

View File

@ -27,7 +27,7 @@
/** /**
* A byte sequence that is usable as a key or value. * A byte sequence that is usable as a key or value.
* It is resizable and distinguishes between the size of the seqeunce and * It is resizable and distinguishes between the size of the sequence and
* the current capacity. The hash function is the front of the md5 of the * the current capacity. The hash function is the front of the md5 of the
* buffer. The sort order is the same as memcmp. * buffer. The sort order is the same as memcmp.
*/ */

View File

@ -128,7 +128,7 @@ public int getLength() {
/** /**
* Returns the Unicode Scalar Value (32-bit integer value) * Returns the Unicode Scalar Value (32-bit integer value)
* for the character at <code>position</code>. Note that this * for the character at <code>position</code>. Note that this
* method avoids using the converter or doing String instatiation * method avoids using the converter or doing String instantiation
* @return the Unicode scalar value at position or -1 * @return the Unicode scalar value at position or -1
* if the position is invalid or points to a * if the position is invalid or points to a
* trailing byte * trailing byte
@ -527,7 +527,7 @@ public static void validateUTF8(byte[] utf8, int start, int len)
int length = 0; int length = 0;
int state = LEAD_BYTE; int state = LEAD_BYTE;
while (count < start+len) { while (count < start+len) {
int aByte = ((int) utf8[count] & 0xFF); int aByte = utf8[count] & 0xFF;
switch (state) { switch (state) {
case LEAD_BYTE: case LEAD_BYTE:

View File

@ -192,7 +192,7 @@ public int hashCode() {
int hash = 1; int hash = 1;
byte[] b = this.get(); byte[] b = this.get();
for (int i = 0; i < count; i++) for (int i = 0; i < count; i++)
hash = (31 * hash) + (int)b[i]; hash = (31 * hash) + b[i];
return hash; return hash;
} }

View File

@ -26,7 +26,7 @@
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
/** /**
* Interface that alll the serializers have to implement. * Interface that all the serializers have to implement.
* *
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
*/ */