diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 94285a4dfb..f7bf2b8703 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -119,7 +119,7 @@ /** * Provides access to configuration parameters. * - *
Configurations are specified by resources. A resource contains a set of * name/value pairs as XML data. Each resource is named by either a @@ -130,16 +130,16 @@ * *
Unless explicitly turned off, Hadoop by default specifies two * resources, loaded in-order from the classpath:
*
- * core-default.xml
core-site.xml
: Site-specific configuration for a given hadoop
* installation.Configuration parameters may be declared final.
* Once a resource declares a value final, no subsequently-loaded
@@ -153,9 +153,9 @@
* </property>
*
* Administrators typically define parameters as final in
- * core-site.xml for values that user applications may not alter.
+ * core-site.xml
for values that user applications may not alter.
*
- *
Value strings are first processed for variable expansion. The * available properties are:
When conf.get("tempdir") is called, then ${basedir} + *
When conf.get("tempdir")
is called, then ${basedir}
* will be resolved to another property in this Configuration, while
- * ${user.name} would then ordinarily be resolved to the value
+ * ${user.name}
would then ordinarily be resolved to the value
* of the System property with that name.
- *
When conf.get("otherdir") is called, then ${env.BASE_DIR} - * will be resolved to the value of the ${BASE_DIR} environment variable. - * It supports ${env.NAME:-default} and ${env.NAME-default} notations. - * The former is resolved to "default" if ${NAME} environment variable is undefined + *
When conf.get("otherdir")
is called, then ${env.BASE_DIR}
+ * will be resolved to the value of the ${BASE_DIR}
environment variable.
+ * It supports ${env.NAME:-default}
and ${env.NAME-default}
notations.
+ * The former is resolved to "default" if ${NAME}
environment variable is undefined
* or its value is empty.
- * The latter behaves the same way only if ${NAME} is undefined.
+ * The latter behaves the same way only if ${NAME}
is undefined.
*
By default, warnings will be given to any deprecated configuration
* parameters and these are suppressible by configuring
- * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in
+ * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation
in
* log4j.properties file.
*
- *
Optionally we can tag related properties together by using tag * attributes. System tags are defined by hadoop.tags.system property. Users @@ -220,9 +220,9 @@ * <tag>HDFS,SECURITY</tag> * </property> * - *
Properties marked with tags can be retrieved with conf - * .getAllPropertiesByTag("HDFS") or conf.getAllPropertiesByTags - * (Arrays.asList("YARN","SECURITY")).
+ * Properties marked with tags can be retrieved with conf
+ * .getAllPropertiesByTag("HDFS")
or conf.getAllPropertiesByTags
+ * (Arrays.asList("YARN","SECURITY"))
.
UnsupportedOperationException
*
* If a key is deprecated in favor of multiple keys, they are all treated as
* aliases of each other, and setting any one of them resets all the others
@@ -604,7 +604,7 @@ public static void addDeprecation(String key, String[] newKeys,
* It does not override any existing entries in the deprecation map.
* This is to be used only by the developers in order to add deprecation of
* keys, and attempts to call this method after loading resources once,
- * would lead to UnsupportedOperationException
+ * would lead to UnsupportedOperationException
*
* If you have multiple deprecation entries to add, it is more efficient to
* use #addDeprecations(DeprecationDelta[] deltas) instead.
@@ -624,7 +624,7 @@ public static void addDeprecation(String key, String newKey,
* It does not override any existing entries in the deprecation map.
* This is to be used only by the developers in order to add deprecation of
* keys, and attempts to call this method after loading resources once,
- * would lead to UnsupportedOperationException
+ * would lead to UnsupportedOperationException
*
* If a key is deprecated in favor of multiple keys, they are all treated as
* aliases of each other, and setting any one of them resets all the others
@@ -648,7 +648,7 @@ public static void addDeprecation(String key, String[] newKeys) {
* It does not override any existing entries in the deprecation map.
* This is to be used only by the developers in order to add deprecation of
* keys, and attempts to call this method after loading resources once,
- * would lead to UnsupportedOperationException
+ * would lead to UnsupportedOperationException
*
* If you have multiple deprecation entries to add, it is more efficient to
* use #addDeprecations(DeprecationDelta[] deltas) instead.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 63b5bc7d94..7988ebb790 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -247,7 +247,7 @@ protected static synchronized Mapuri
* determines a configuration property name,
- * fs.AbstractFileSystem.scheme.impl whose value names the
+ * fs.AbstractFileSystem.scheme.impl
whose value names the
* AbstractFileSystem class.
*
* The entire URI and conf is passed to the AbstractFileSystem factory method.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index 716c6c5004..586350d843 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -866,7 +866,7 @@ boolean apply(Path p) throws IOException {
/**
* Set replication for an existing file.
- * Implement the abstract setReplication of FileSystem
+ * Implement the abstract setReplication
of FileSystem
* @param src file name
* @param replication new replication
* @throws IOException if an I/O error occurs.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 4820c5c304..5f3e5d9b8e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -453,7 +453,7 @@ private boolean isDirectory(Path f)
}
/**
* Set replication for an existing file.
- * Implement the abstract setReplication of FileSystem
+ * Implement the abstract setReplication
of FileSystem
* @param src file name
* @param replication new replication
* @throws IOException if an I/O error occurs.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index eb5983f098..a903e337de 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -1977,9 +1977,9 @@ public RemoteIteratortrue
if the iterator has more files.
*
- * @return true if the iterator has more files.
+ * @return true
if the iterator has more files.
* @throws AccessControlException if not allowed to access next
* file's status or locations
* @throws FileNotFoundException if next file does not exist any more
@@ -2071,34 +2071,34 @@ public LocatedFileStatus next() throws IOException {
* ?
* *
* [abc]
* {a,b,c}
.
*
- * [a-b]
* {a...b}
. Note: character a
must be
+ * lexicographically less than or equal to character b
.
*
- * [^a]
* {a}
. Note that the ^
character must occur
* immediately to the right of the opening bracket.
*
- * \c
* {ab,cd}
+ * {ab, cd}
*
- * {ab,c{de,fh}}
+ * {ab, cde, cfh}
*
* ?
* *
* [abc]
* {a,b,c}
.
*
- * [a-b]
* {a...b}
. Note that character a
must be
+ * lexicographically less than or equal to character b
.
*
- * [^a]
* {a}
. Note that the ^
character must occur
* immediately to the right of the opening bracket.
*
- * \c
* {ab,cd}
+ * {ab, cd}
*
- * {ab,c{de,fh}}
+ * {ab, cde, cfh}
*
* true
if the iteration has more elements.
*
- * @return true if the iterator has more elements.
+ * @return true
if the iterator has more elements.
* @throws IOException if any IO error occurs
*/
boolean hasNext() throws IOException;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
index 4b1dc7513d..f2c8b76e2a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java
@@ -59,10 +59,10 @@ public boolean add(E e) {
}
/**
- * Construct a new EnumSetWritable. If the value argument is null or
- * its size is zero, the elementType argument must not be null. If
- * the argument value's size is bigger than zero, the argument
- * elementType is not be used.
+ * Construct a new EnumSetWritable. If the value
argument is null or
+ * its size is zero, the elementType
argument must not be null. If
+ * the argument value
's size is bigger than zero, the argument
+ * elementType
is not be used.
*
* @param value enumSet value.
* @param elementType elementType.
@@ -72,7 +72,7 @@ public EnumSetWritable(EnumSetvalue
should not be null
* or empty.
*
* @param value enumSet value.
@@ -83,10 +83,10 @@ public EnumSetWritable(EnumSetvalue
and elementType
. If the value
argument
+ * is null or its size is zero, the elementType
argument must not be
+ * null. If the argument value
's size is bigger than zero, the
+ * argument elementType
is not be used.
*
* @param value enumSet Value.
* @param elementType elementType.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
index 29c06a01ad..831931bdac 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
@@ -401,8 +401,8 @@ static Method getStaticProtobufMethod(Class> declaredClass, String method,
}
/**
- * Find and load the class with given name className by first finding
- * it in the specified conf. If the specified conf is null,
+ * Find and load the class with given name className
by first finding
+ * it in the specified conf
. If the specified conf
is null,
* try load it directly.
*
* @param conf configuration.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 9d6727c159..325820d11c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -91,19 +91,19 @@
* The actual compression algorithm used to compress key and/or values can be * specified by using the appropriate {@link CompressionCodec}.
* - *The recommended way is to use the static createWriter methods + *
The recommended way is to use the static createWriter
methods
* provided by the SequenceFile
to chose the preferred format.
The {@link SequenceFile.Reader} acts as the bridge and can read any of the
* above SequenceFile
formats.
Essentially there are 3 different formats for SequenceFile
s
* depending on the CompressionType
specified. All of them share a
* common header described below.
*
- *
* The decompression requires large amounts of memory. Thus you should call the
* {@link #close() close()} method as soon as possible, to force
- * CBZip2InputStream to release the allocated memory. See
+ * CBZip2InputStream
to release the allocated memory. See
* {@link CBZip2OutputStream CBZip2OutputStream} for information about memory
* usage.
*
- * CBZip2InputStream reads bytes from the compressed source stream via
+ * CBZip2InputStream
reads bytes from the compressed source stream via
* the single byte {@link java.io.InputStream#read() read()} method exclusively.
* Thus you should consider to use a buffered source stream.
*
- * Although BZip2 headers are marked with the magic "Bz" this
+ * Although BZip2 headers are marked with the magic "Bz"
this
* constructor expects the next byte in the stream to be the first one after
* the magic. Thus callers have to skip the first two bytes. Otherwise this
* constructor will throw an exception.
@@ -289,7 +289,7 @@ private void makeMaps() {
* @throws IOException
* if the stream content is malformed or an I/O error occurs.
* @throws NullPointerException
- * if in == null
+ * if in == null
*/
public CBZip2InputStream(final InputStream in, READ_MODE readMode)
throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
index 50bdddb813..f94d1387eb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
@@ -37,7 +37,7 @@
*
* The compression requires large amounts of memory. Thus you should call the
* {@link #close() close()} method as soon as possible, to force
- * CBZip2OutputStream to release the allocated memory.
+ * CBZip2OutputStream
to release the allocated memory.
*
@@ -64,64 +64,64 @@ * <code>65k + (5 * blocksize)</code>. * * - *
Blocksize | Compression - * memory usage | Decompression + * | Blocksize | Compression + * memory usage | Decompression * memory usage |
*
---|---|---|---|---|---|
100k | - *1300k | - *565k | + *100k | + *1300k | + *565k | *
200k | - *2200k | - *1065k | + *200k | + *2200k | + *1065k | *
300k | - *3100k | - *1565k | + *300k | + *3100k | + *1565k | *
400k | - *4000k | - *2065k | + *400k | + *4000k | + *2065k | *
500k | - *4900k | - *2565k | + *500k | + *4900k | + *2565k | *
600k | - *5800k | - *3065k | + *600k | + *5800k | + *3065k | *
700k | - *6700k | - *3565k | + *700k | + *6700k | + *3565k | *
800k | - *7600k | - *4065k | + *800k | + *7600k | + *4065k | *
900k | - *8500k | - *4565k | + *900k | + *8500k | + *4565k | *
- * For decompression CBZip2InputStream allocates less memory if the
+ * For decompression CBZip2InputStream
allocates less memory if the
* bzipped input is smaller than one block.
*
== 1
.
*/
public static final int MIN_BLOCKSIZE = 1;
/**
- * The maximum supported blocksize == 9.
+ * The maximum supported blocksize == 9
.
*/
public static final int MAX_BLOCKSIZE = 9;
@@ -566,12 +566,12 @@ private static void hbMakeCodeLengths(final byte[] len, final int[] freq,
*
* @return The blocksize, between {@link #MIN_BLOCKSIZE} and
* {@link #MAX_BLOCKSIZE} both inclusive. For a negative
- * inputLength this method returns MAX_BLOCKSIZE
+ * inputLength
this method returns MAX_BLOCKSIZE
* always.
*
* @param inputLength
* The length of the data which will be compressed by
- * CBZip2OutputStream.
+ * CBZip2OutputStream
.
*/
public static int chooseBlockSize(long inputLength) {
return (inputLength > 0) ? (int) Math
@@ -579,11 +579,11 @@ public static int chooseBlockSize(long inputLength) {
}
/**
- * Constructs a new CBZip2OutputStream with a blocksize of 900k.
+ * Constructs a new CBZip2OutputStream
with a blocksize of 900k.
*
*
* Attention: The caller is resonsible to write the two BZip2 magic
- * bytes "BZ" to the specified stream prior to calling this
+ * bytes "BZ"
to the specified stream prior to calling this
* constructor.
*
CBZip2OutputStream
with specified blocksize.
*
*
* Attention: The caller is resonsible to write the two BZip2 magic
- * bytes "BZ" to the specified stream prior to calling this
+ * bytes "BZ"
to the specified stream prior to calling this
* constructor.
*
ZlibCompressor
, BuiltInZlibDeflater
only support three
* kind of compression strategy: FILTERED, HUFFMAN_ONLY and DEFAULT_STRATEGY.
* It will use DEFAULT_STRATEGY as default if the configured compression
* strategy is not supported.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java
index 05e3d48a46..ec508c0204 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java
@@ -219,8 +219,8 @@ static public class ChunkEncoder extends OutputStream {
/**
* The number of valid bytes in the buffer. This value is always in the
- * range 0 through buf.length; elements buf[0]
- * through buf[count-1] contain valid byte data.
+ * range 0
through buf.length
; elements buf[0]
+ * through buf[count-1]
contain valid byte data.
*/
private int count;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java
index 7f8d9707f9..107899a9c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java
@@ -38,7 +38,7 @@ public class RpcClientException extends RpcException {
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
- * (A null value is permitted, and indicates that the cause
+ * (A null
value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
RpcClientException(final String message, final Throwable cause) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java
index 8141333d71..ac687050d7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java
@@ -40,7 +40,7 @@ public class RpcException extends IOException {
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
- * (A null value is permitted, and indicates that the cause
+ * (A null
value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
RpcException(final String message, final Throwable cause) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
index ce4aac54b6..31f62d4f06 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
@@ -39,7 +39,7 @@ public RpcServerException(final String message) {
*
* @param message message.
* @param cause the cause (can be retried by the {@link #getCause()} method).
- * (A null value is permitted, and indicates that the cause
+ * (A null
value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
public RpcServerException(final String message, final Throwable cause) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java
index f00948d5d5..c683010a88 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java
@@ -39,7 +39,7 @@ public class UnexpectedServerException extends RpcException {
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
- * (A null value is permitted, and indicates that the cause
+ * (A null
value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
UnexpectedServerException(final String message, final Throwable cause) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
index 196469be9d..3830a58865 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
@@ -17,7 +17,7 @@
*/
/**
- This package provides a framework for metrics instrumentation and publication.
@@ -46,7 +46,7 @@ metrics from sources to sinks based on (per source/sink) configuration design document for architecture and implementation notes. -org.apache.hadoop.metrics2.annotation
- The Bits of π+
|