HADOOP-9323. Fix typos in API documentation. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1449977 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
63522b3224
commit
4840775e3d
@ -385,6 +385,8 @@ Release 2.0.4-beta - UNRELEASED
|
||||
HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
|
||||
(Colin Patrick McCabe via atm)
|
||||
|
||||
HADOOP-9323. Fix typos in API documentation. (suresh)
|
||||
|
||||
Release 2.0.3-alpha - 2013-02-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -21,8 +21,6 @@
|
||||
import java.io.*;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -32,7 +30,7 @@
|
||||
|
||||
/****************************************************************
|
||||
* Abstract Checksumed FileSystem.
|
||||
* It provide a basice implementation of a Checksumed FileSystem,
|
||||
* It provide a basic implementation of a Checksumed FileSystem,
|
||||
* which creates a checksum file for each raw file.
|
||||
* It generates & verifies checksums at the client side.
|
||||
*
|
||||
@ -118,9 +116,6 @@ private int getSumBufferSize(int bytesPerSum, int bufferSize) {
|
||||
* It verifies that data matches checksums.
|
||||
*******************************************************/
|
||||
private static class ChecksumFSInputChecker extends FSInputChecker {
|
||||
public static final Log LOG
|
||||
= LogFactory.getLog(FSInputChecker.class);
|
||||
|
||||
private ChecksumFileSystem fs;
|
||||
private FSDataInputStream datas;
|
||||
private FSDataInputStream sums;
|
||||
@ -374,19 +369,6 @@ private static class ChecksumFSOutputSummer extends FSOutputSummer {
|
||||
private FSDataOutputStream sums;
|
||||
private static final float CHKSUM_AS_FRACTION = 0.01f;
|
||||
|
||||
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
|
||||
Path file,
|
||||
boolean overwrite,
|
||||
short replication,
|
||||
long blockSize,
|
||||
Configuration conf)
|
||||
throws IOException {
|
||||
this(fs, file, overwrite,
|
||||
conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
|
||||
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
|
||||
replication, blockSize, null);
|
||||
}
|
||||
|
||||
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
|
||||
Path file,
|
||||
boolean overwrite,
|
||||
|
@ -1326,7 +1326,7 @@ public FsStatus next(final AbstractFileSystem fs, final Path p)
|
||||
*
|
||||
* 2. Partially qualified URIs (eg scheme but no host)
|
||||
*
|
||||
* fs:///A/B/file Resolved according to the target file sytem. Eg resolving
|
||||
* fs:///A/B/file Resolved according to the target file system. Eg resolving
|
||||
* a symlink to hdfs:///A results in an exception because
|
||||
* HDFS URIs must be fully qualified, while a symlink to
|
||||
* file:///A will not since Hadoop's local file systems
|
||||
|
@ -1864,7 +1864,7 @@ public Path getHomeDirectory() {
|
||||
*
|
||||
* Some file systems like LocalFileSystem have an initial workingDir
|
||||
* that we use as the starting workingDir. For other file systems
|
||||
* like HDFS there is no built in notion of an inital workingDir.
|
||||
* like HDFS there is no built in notion of an initial workingDir.
|
||||
*
|
||||
* @return if there is built in notion of workingDir then it
|
||||
* is returned; else a null is returned.
|
||||
|
@ -43,7 +43,7 @@ public void readFully(long position, byte[] buffer, int offset, int length)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Read number of bytes equalt to the length of the buffer, from a given
|
||||
* Read number of bytes equal to the length of the buffer, from a given
|
||||
* position within a file. This does not
|
||||
* change the current offset of a file, and is thread-safe.
|
||||
*/
|
||||
|
@ -79,19 +79,17 @@ public abstract class TrashPolicy extends Configured {
|
||||
|
||||
/**
|
||||
* Get an instance of the configured TrashPolicy based on the value
|
||||
* of the configuration paramater fs.trash.classname.
|
||||
* of the configuration parameter fs.trash.classname.
|
||||
*
|
||||
* @param conf the configuration to be used
|
||||
* @param fs the file system to be used
|
||||
* @param home the home directory
|
||||
* @return an instance of TrashPolicy
|
||||
*/
|
||||
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home)
|
||||
throws IOException {
|
||||
Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname",
|
||||
TrashPolicyDefault.class,
|
||||
TrashPolicy.class);
|
||||
TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
|
||||
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
|
||||
Class<? extends TrashPolicy> trashClass = conf.getClass(
|
||||
"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
|
||||
TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
|
||||
trash.initialize(conf, fs, home); // initialize TrashPolicy
|
||||
return trash;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
/**
|
||||
* A byte sequence that is usable as a key or value.
|
||||
* It is resizable and distinguishes between the size of the seqeunce and
|
||||
* It is resizable and distinguishes between the size of the sequence and
|
||||
* the current capacity. The hash function is the front of the md5 of the
|
||||
* buffer. The sort order is the same as memcmp.
|
||||
*/
|
||||
|
@ -128,7 +128,7 @@ public int getLength() {
|
||||
/**
|
||||
* Returns the Unicode Scalar Value (32-bit integer value)
|
||||
* for the character at <code>position</code>. Note that this
|
||||
* method avoids using the converter or doing String instatiation
|
||||
* method avoids using the converter or doing String instantiation
|
||||
* @return the Unicode scalar value at position or -1
|
||||
* if the position is invalid or points to a
|
||||
* trailing byte
|
||||
@ -527,7 +527,7 @@ public static void validateUTF8(byte[] utf8, int start, int len)
|
||||
int length = 0;
|
||||
int state = LEAD_BYTE;
|
||||
while (count < start+len) {
|
||||
int aByte = ((int) utf8[count] & 0xFF);
|
||||
int aByte = utf8[count] & 0xFF;
|
||||
|
||||
switch (state) {
|
||||
case LEAD_BYTE:
|
||||
|
@ -192,7 +192,7 @@ public int hashCode() {
|
||||
int hash = 1;
|
||||
byte[] b = this.get();
|
||||
for (int i = 0; i < count; i++)
|
||||
hash = (31 * hash) + (int)b[i];
|
||||
hash = (31 * hash) + b[i];
|
||||
return hash;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Interface that alll the serializers have to implement.
|
||||
* Interface that all the serializers have to implement.
|
||||
*
|
||||
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user