diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 94285a4dfb..f7bf2b8703 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -119,7 +119,7 @@ /** * Provides access to configuration parameters. * - *

Resources

+ *

Resources

* *

Configurations are specified by resources. A resource contains a set of * name/value pairs as XML data. Each resource is named by either a @@ -130,16 +130,16 @@ * *

Unless explicitly turned off, Hadoop by default specifies two * resources, loaded in-order from the classpath:

    - *
  1. + *
  2. * - * core-default.xml: Read-only defaults for hadoop.
  3. - *
  4. core-site.xml: Site-specific configuration for a given hadoop + * core-default.xml: Read-only defaults for hadoop.
  5. + *
  6. core-site.xml: Site-specific configuration for a given hadoop * installation.
  7. *
* Applications may add additional resources, which are loaded * subsequent to these resources in the order they are added. * - *

Final Parameters

+ *

Final Parameters

* *

Configuration parameters may be declared final. * Once a resource declares a value final, no subsequently-loaded @@ -153,9 +153,9 @@ * </property> * * Administrators typically define parameters as final in - * core-site.xml for values that user applications may not alter. + * core-site.xml for values that user applications may not alter. * - *

Variable Expansion

+ *

Variable Expansion

* *

Value strings are first processed for variable expansion. The * available properties are:

    @@ -185,22 +185,22 @@ * </property> * * - *

    When conf.get("tempdir") is called, then ${basedir} + *

    When conf.get("tempdir") is called, then ${basedir} * will be resolved to another property in this Configuration, while - * ${user.name} would then ordinarily be resolved to the value + * ${user.name} would then ordinarily be resolved to the value * of the System property with that name. - *

    When conf.get("otherdir") is called, then ${env.BASE_DIR} - * will be resolved to the value of the ${BASE_DIR} environment variable. - * It supports ${env.NAME:-default} and ${env.NAME-default} notations. - * The former is resolved to "default" if ${NAME} environment variable is undefined + *

    When conf.get("otherdir") is called, then ${env.BASE_DIR} + * will be resolved to the value of the ${BASE_DIR} environment variable. + * It supports ${env.NAME:-default} and ${env.NAME-default} notations. + * The former is resolved to "default" if ${NAME} environment variable is undefined * or its value is empty. - * The latter behaves the same way only if ${NAME} is undefined. + * The latter behaves the same way only if ${NAME} is undefined. *

    By default, warnings will be given to any deprecated configuration * parameters and these are suppressible by configuring - * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in + * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in * log4j.properties file. * - *

    Tags

    + *

    Tags

    * *

    Optionally we can tag related properties together by using tag * attributes. System tags are defined by hadoop.tags.system property. Users @@ -220,9 +220,9 @@ * <tag>HDFS,SECURITY</tag> * </property> * - *

    Properties marked with tags can be retrieved with conf - * .getAllPropertiesByTag("HDFS") or conf.getAllPropertiesByTags - * (Arrays.asList("YARN","SECURITY")).

    + *

    Properties marked with tags can be retrieved with conf + * .getAllPropertiesByTag("HDFS") or conf.getAllPropertiesByTags + * (Arrays.asList("YARN","SECURITY")).

    */ @InterfaceAudience.Public @InterfaceStability.Stable @@ -576,7 +576,7 @@ public static void addDeprecations(DeprecationDelta[] deltas) { * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, - * would lead to UnsupportedOperationException + * would lead to UnsupportedOperationException * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others @@ -604,7 +604,7 @@ public static void addDeprecation(String key, String[] newKeys, * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, - * would lead to UnsupportedOperationException + * would lead to UnsupportedOperationException * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. @@ -624,7 +624,7 @@ public static void addDeprecation(String key, String newKey, * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, - * would lead to UnsupportedOperationException + * would lead to UnsupportedOperationException * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others @@ -648,7 +648,7 @@ public static void addDeprecation(String key, String[] newKeys) { * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, - * would lead to UnsupportedOperationException + * would lead to UnsupportedOperationException * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 63b5bc7d94..7988ebb790 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -247,7 +247,7 @@ protected static synchronized Map getAllStatistics() { * The main factory method for creating a file system. Get a file system for * the URI's scheme and authority. The scheme of the uri * determines a configuration property name, - * fs.AbstractFileSystem.scheme.impl whose value names the + * fs.AbstractFileSystem.scheme.impl whose value names the * AbstractFileSystem class. * * The entire URI and conf is passed to the AbstractFileSystem factory method. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 716c6c5004..586350d843 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -866,7 +866,7 @@ boolean apply(Path p) throws IOException { /** * Set replication for an existing file. - * Implement the abstract setReplication of FileSystem + * Implement the abstract setReplication of FileSystem * @param src file name * @param replication new replication * @throws IOException if an I/O error occurs. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java index 4820c5c304..5f3e5d9b8e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java @@ -453,7 +453,7 @@ private boolean isDirectory(Path f) } /** * Set replication for an existing file. - * Implement the abstract setReplication of FileSystem + * Implement the abstract setReplication of FileSystem * @param src file name * @param replication new replication * @throws IOException if an I/O error occurs. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index eb5983f098..a903e337de 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -1977,9 +1977,9 @@ public RemoteIterator listFiles( LocatedFileStatus curFile; /** - * Returns true if the iterator has more files. + * Returns true if the iterator has more files. * - * @return true if the iterator has more files. + * @return true if the iterator has more files. * @throws AccessControlException if not allowed to access next * file's status or locations * @throws FileNotFoundException if next file does not exist any more @@ -2071,34 +2071,34 @@ public LocatedFileStatus next() throws IOException { *
    *
    *
    - *
    ? + *
    ? *
    Matches any single character. * - *
    * + *
    * *
    Matches zero or more characters. * - *
    [abc] + *
    [abc] *
    Matches a single character from character set - * {a,b,c}. + * {a,b,c}. * - *
    [a-b] + *
    [a-b] *
    Matches a single character from the character range - * {a...b}. Note: character a must be - * lexicographically less than or equal to character b. + * {a...b}. Note: character a must be + * lexicographically less than or equal to character b. * - *
    [^a] + *
    [^a] *
    Matches a single char that is not from character set or range - * {a}. Note that the ^ character must occur + * {a}. Note that the ^ character must occur * immediately to the right of the opening bracket. * - *
    \c + *
    \c *
    Removes (escapes) any special meaning of character c. * - *
    {ab,cd} - *
    Matches a string from the string set {ab, cd} + *
    {ab,cd} + *
    Matches a string from the string set {ab, cd} * - *
    {ab,c{de,fh}} - *
    Matches a string from string set {ab, cde, cfh} + *
    {ab,c{de,fh}} + *
    Matches a string from string set {ab, cde, cfh} * *
    *
    diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 38ec611451..930abf0b5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2178,34 +2178,34 @@ public FileStatus[] listStatus(Path[] files, PathFilter filter) *
    *
    *
    - *
    ? + *
    ? *
    Matches any single character. * - *
    * + *
    * *
    Matches zero or more characters. * - *
    [abc] + *
    [abc] *
    Matches a single character from character set - * {a,b,c}. + * {a,b,c}. * - *
    [a-b] + *
    [a-b] *
    Matches a single character from the character range - * {a...b}. Note that character a must be - * lexicographically less than or equal to character b. + * {a...b}. Note that character a must be + * lexicographically less than or equal to character b. * - *
    [^a] + *
    [^a] *
    Matches a single character that is not from character set or range - * {a}. Note that the ^ character must occur + * {a}. Note that the ^ character must occur * immediately to the right of the opening bracket. * - *
    \c + *
    \c *
    Removes (escapes) any special meaning of character c. * - *
    {ab,cd} - *
    Matches a string from the string set {ab, cd} + *
    {ab,cd} + *
    Matches a string from the string set {ab, cd} * - *
    {ab,c{de,fh}} - *
    Matches a string from the string set {ab, cde, cfh} + *
    {ab,c{de,fh}} + *
    Matches a string from the string set {ab, cde, cfh} * *
    *
    diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RemoteIterator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RemoteIterator.java index 9238c3f6fb..06b7728ae3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RemoteIterator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RemoteIterator.java @@ -24,9 +24,9 @@ */ public interface RemoteIterator { /** - * Returns true if the iteration has more elements. + * Returns true if the iteration has more elements. * - * @return true if the iterator has more elements. + * @return true if the iterator has more elements. * @throws IOException if any IO error occurs */ boolean hasNext() throws IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java index 4b1dc7513d..f2c8b76e2a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/EnumSetWritable.java @@ -59,10 +59,10 @@ public boolean add(E e) { } /** - * Construct a new EnumSetWritable. If the value argument is null or - * its size is zero, the elementType argument must not be null. If - * the argument value's size is bigger than zero, the argument - * elementType is not be used. + * Construct a new EnumSetWritable. If the value argument is null or + * its size is zero, the elementType argument must not be null. If + * the argument value's size is bigger than zero, the argument + * elementType is not be used. * * @param value enumSet value. * @param elementType elementType. @@ -72,7 +72,7 @@ public EnumSetWritable(EnumSet value, Class elementType) { } /** - * Construct a new EnumSetWritable. Argument value should not be null + * Construct a new EnumSetWritable. Argument value should not be null * or empty. * * @param value enumSet value. @@ -83,10 +83,10 @@ public EnumSetWritable(EnumSet value) { /** * reset the EnumSetWritable with specified - * value and elementType. If the value argument - * is null or its size is zero, the elementType argument must not be - * null. If the argument value's size is bigger than zero, the - * argument elementType is not be used. + * value and elementType. If the value argument + * is null or its size is zero, the elementType argument must not be + * null. If the argument value's size is bigger than zero, the + * argument elementType is not be used. * * @param value enumSet Value. * @param elementType elementType. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java index 29c06a01ad..831931bdac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java @@ -401,8 +401,8 @@ static Method getStaticProtobufMethod(Class declaredClass, String method, } /** - * Find and load the class with given name className by first finding - * it in the specified conf. If the specified conf is null, + * Find and load the class with given name className by first finding + * it in the specified conf. If the specified conf is null, * try load it directly. * * @param conf configuration. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java index 9d6727c159..325820d11c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java @@ -91,19 +91,19 @@ *

    The actual compression algorithm used to compress key and/or values can be * specified by using the appropriate {@link CompressionCodec}.

    * - *

    The recommended way is to use the static createWriter methods + *

    The recommended way is to use the static createWriter methods * provided by the SequenceFile to chose the preferred format.

    * *

    The {@link SequenceFile.Reader} acts as the bridge and can read any of the * above SequenceFile formats.

    * - *

    SequenceFile Formats

    + *

    SequenceFile Formats

    * *

    Essentially there are 3 different formats for SequenceFiles * depending on the CompressionType specified. All of them share a * common header described below. * - *

    + * *
      *
    • * version - 3 bytes of magic header SEQ, followed by 1 byte of actual @@ -136,7 +136,7 @@ *
    • *
    * - *
    Uncompressed SequenceFile Format
    + *

    Uncompressed SequenceFile Format

    *
      *
    • * Header @@ -155,7 +155,7 @@ *
    • *
    * - *
    Record-Compressed SequenceFile Format
    + *

    Record-Compressed SequenceFile Format

    *
      *
    • * Header diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java index 61e88d80d8..116a74963a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java @@ -38,13 +38,13 @@ *

      * The decompression requires large amounts of memory. Thus you should call the * {@link #close() close()} method as soon as possible, to force - * CBZip2InputStream to release the allocated memory. See + * CBZip2InputStream to release the allocated memory. See * {@link CBZip2OutputStream CBZip2OutputStream} for information about memory * usage. *

      * *

      - * CBZip2InputStream reads bytes from the compressed source stream via + * CBZip2InputStream reads bytes from the compressed source stream via * the single byte {@link java.io.InputStream#read() read()} method exclusively. * Thus you should consider to use a buffered source stream. *

      @@ -279,7 +279,7 @@ private void makeMaps() { * specified stream. * *

      - * Although BZip2 headers are marked with the magic "Bz" this + * Although BZip2 headers are marked with the magic "Bz" this * constructor expects the next byte in the stream to be the first one after * the magic. Thus callers have to skip the first two bytes. Otherwise this * constructor will throw an exception. @@ -289,7 +289,7 @@ private void makeMaps() { * @throws IOException * if the stream content is malformed or an I/O error occurs. * @throws NullPointerException - * if in == null + * if in == null */ public CBZip2InputStream(final InputStream in, READ_MODE readMode) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java index 50bdddb813..f94d1387eb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java @@ -37,7 +37,7 @@ *

      * The compression requires large amounts of memory. Thus you should call the * {@link #close() close()} method as soon as possible, to force - * CBZip2OutputStream to release the allocated memory. + * CBZip2OutputStream to release the allocated memory. *

      * *

      @@ -64,64 +64,64 @@ * <code>65k + (5 * blocksize)</code>. * * - * + *
      * - * + * * * - * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * * - * - * - * + * + * + * * *
      Memory usage by blocksize
      Blocksize Compression
      - * memory usage
      Decompression
      + *
      Blocksize Compression
      + * memory usage
      Decompression
      * memory usage
      100k1300k565k100k1300k565k
      200k2200k1065k200k2200k1065k
      300k3100k1565k300k3100k1565k
      400k4000k2065k400k4000k2065k
      500k4900k2565k500k4900k2565k
      600k5800k3065k600k5800k3065k
      700k6700k3565k700k6700k3565k
      800k7600k4065k800k7600k4065k
      900k8500k4565k900k8500k4565k
      * *

      - * For decompression CBZip2InputStream allocates less memory if the + * For decompression CBZip2InputStream allocates less memory if the * bzipped input is smaller than one block. *

      * @@ -137,12 +137,12 @@ public class CBZip2OutputStream extends OutputStream implements BZip2Constants { /** - * The minimum supported blocksize == 1. + * The minimum supported blocksize == 1. */ public static final int MIN_BLOCKSIZE = 1; /** - * The maximum supported blocksize == 9. + * The maximum supported blocksize == 9. */ public static final int MAX_BLOCKSIZE = 9; @@ -566,12 +566,12 @@ private static void hbMakeCodeLengths(final byte[] len, final int[] freq, * * @return The blocksize, between {@link #MIN_BLOCKSIZE} and * {@link #MAX_BLOCKSIZE} both inclusive. For a negative - * inputLength this method returns MAX_BLOCKSIZE + * inputLength this method returns MAX_BLOCKSIZE * always. * * @param inputLength * The length of the data which will be compressed by - * CBZip2OutputStream. + * CBZip2OutputStream. */ public static int chooseBlockSize(long inputLength) { return (inputLength > 0) ? (int) Math @@ -579,11 +579,11 @@ public static int chooseBlockSize(long inputLength) { } /** - * Constructs a new CBZip2OutputStream with a blocksize of 900k. + * Constructs a new CBZip2OutputStream with a blocksize of 900k. * *

      * Attention: The caller is resonsible to write the two BZip2 magic - * bytes "BZ" to the specified stream prior to calling this + * bytes "BZ" to the specified stream prior to calling this * constructor. *

      * @@ -600,11 +600,11 @@ public CBZip2OutputStream(final OutputStream out) throws IOException { } /** - * Constructs a new CBZip2OutputStream with specified blocksize. + * Constructs a new CBZip2OutputStream with specified blocksize. * *

      * Attention: The caller is resonsible to write the two BZip2 magic - * bytes "BZ" to the specified stream prior to calling this + * bytes "BZ" to the specified stream prior to calling this * constructor. *

      * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java index 739788fa5f..e98980f0f2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java @@ -57,7 +57,7 @@ public synchronized int compress(byte[] b, int off, int len) /** * reinit the compressor with the given configuration. It will reset the * compressor's compression level and compression strategy. Different from - * ZlibCompressor, BuiltInZlibDeflater only support three + * ZlibCompressor, BuiltInZlibDeflater only support three * kind of compression strategy: FILTERED, HUFFMAN_ONLY and DEFAULT_STRATEGY. * It will use DEFAULT_STRATEGY as default if the configured compression * strategy is not supported. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java index 05e3d48a46..ec508c0204 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Chunk.java @@ -219,8 +219,8 @@ static public class ChunkEncoder extends OutputStream { /** * The number of valid bytes in the buffer. This value is always in the - * range 0 through buf.length; elements buf[0] - * through buf[count-1] contain valid byte data. + * range 0 through buf.length; elements buf[0] + * through buf[count-1] contain valid byte data. */ private int count; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java index 7f8d9707f9..107899a9c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java @@ -38,7 +38,7 @@ public class RpcClientException extends RpcException { * @param message message. * @param cause that cause this exception * @param cause the cause (can be retried by the {@link #getCause()} method). - * (A null value is permitted, and indicates that the cause + * (A null value is permitted, and indicates that the cause * is nonexistent or unknown.) */ RpcClientException(final String message, final Throwable cause) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java index 8141333d71..ac687050d7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java @@ -40,7 +40,7 @@ public class RpcException extends IOException { * @param message message. * @param cause that cause this exception * @param cause the cause (can be retried by the {@link #getCause()} method). - * (A null value is permitted, and indicates that the cause + * (A null value is permitted, and indicates that the cause * is nonexistent or unknown.) */ RpcException(final String message, final Throwable cause) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java index ce4aac54b6..31f62d4f06 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java @@ -39,7 +39,7 @@ public RpcServerException(final String message) { * * @param message message. * @param cause the cause (can be retried by the {@link #getCause()} method). - * (A null value is permitted, and indicates that the cause + * (A null value is permitted, and indicates that the cause * is nonexistent or unknown.) */ public RpcServerException(final String message, final Throwable cause) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java index f00948d5d5..c683010a88 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java @@ -39,7 +39,7 @@ public class UnexpectedServerException extends RpcException { * @param message message. * @param cause that cause this exception * @param cause the cause (can be retried by the {@link #getCause()} method). - * (A null value is permitted, and indicates that the cause + * (A null value is permitted, and indicates that the cause * is nonexistent or unknown.) */ UnexpectedServerException(final String message, final Throwable cause) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java index 196469be9d..3830a58865 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java @@ -17,7 +17,7 @@ */ /** -

      Metrics 2.0

      +

      Metrics 2.0

      -

      Overview

      +

      Overview

      This package provides a framework for metrics instrumentation and publication.

      @@ -46,7 +46,7 @@ metrics from sources to sinks based on (per source/sink) configuration design document for architecture and implementation notes.

      -

      Sub-packages

      +

      Sub-packages

      org.apache.hadoop.metrics2.annotation
      Public annotation interfaces for simpler metrics instrumentation. @@ -84,9 +84,9 @@ usually does not need to reference any class here.
      -

      Getting started

      -

      Implementing metrics sources

      - +

      Getting started

      +

      Implementing metrics sources

      +
      @@ -153,7 +153,7 @@ record named "CacheStat" for reporting a number of statistics relating to allowing generated metrics names and multiple records. In fact, the annotation interface is implemented with the MetricsSource interface internally.

      -

      Implementing metrics sinks

      +

      Implementing metrics sinks

         public class MySink implements MetricsSink {
           public void putMetrics(MetricsRecord record) {
      @@ -187,7 +187,7 @@ they need to be hooked up to a metrics system. In this case (and most
         
         DefaultMetricsSystem.initialize("test"); // called once per application
         DefaultMetricsSystem.register(new MyStat());
      -

      Metrics system configuration

      +

      Metrics system configuration

      Sinks are usually specified in a configuration file, say, "hadoop-metrics2-test.properties", as:

      @@ -209,7 +209,7 @@ identify a particular sink instance. The asterisk (*) can be for more examples.

      -

      Metrics Filtering

      +

      Metrics Filtering

      One of the features of the default metrics system is metrics filtering configuration by source, context, record/tags and metrics. The least expensive way to filter out metrics would be at the source level, e.g., @@ -241,7 +241,7 @@ identify a particular sink instance. The asterisk (*) can be level, respectively. Filters can be combined to optimize the filtering efficiency.

      -

      Metrics instrumentation strategy

      +

      Metrics instrumentation strategy

      In previous examples, we showed a minimal example to use the metrics framework. In a larger system (like Hadoop) that allows @@ -279,7 +279,7 @@ instrumentation interface (incrCounter0 etc.) that allows different -

      Migration from previous system

      +

      Migration from previous system

      Users of the previous metrics system would notice the lack of context prefix in the configuration examples. The new metrics system decouples the concept for context (for grouping) with the @@ -289,7 +289,7 @@ metrics system decouples the concept for context (for grouping) with the configure an implementation instance per context, even if you have a backend that can handle multiple contexts (file, gangalia etc.):

      -
      Implementing metrics sources
      +
      @@ -311,7 +311,7 @@ backend that can handle multiple contexts (file, gangalia etc.):

      In the new metrics system, you can simulate the previous behavior by using the context option in the sink options like the following:

      -
      Migration from previous system
      +
      diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index a647bb0410..3a4f4fd37d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -83,9 +83,9 @@ public class NetUtils { /** * Get the socket factory for the given class according to its * configuration parameter - * hadoop.rpc.socket.factory.class.<ClassName>. When no + * hadoop.rpc.socket.factory.class.<ClassName>. When no * such parameter exists then fall back on the default socket factory as - * configured by hadoop.rpc.socket.factory.class.default. If + * configured by hadoop.rpc.socket.factory.class.default. If * this default socket factory is not configured, then fall back on the JVM * default socket factory. * @@ -111,7 +111,7 @@ public static SocketFactory getSocketFactory(Configuration conf, /** * Get the default socket factory as specified by the configuration - * parameter hadoop.rpc.socket.factory.default + * parameter hadoop.rpc.socket.factory.default * * @param conf the configuration * @return the default socket factory as specified in the configuration or diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AccessControlException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AccessControlException.java index d0a3620d6d..1ed121f961 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AccessControlException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AccessControlException.java @@ -48,10 +48,10 @@ public AccessControlException() { /** * Constructs a new exception with the specified cause and a detail - * message of (cause==null ? null : cause.toString()) (which - * typically contains the class and detail message of cause). + * message of (cause==null ? null : cause.toString()) (which + * typically contains the class and detail message of cause). * @param cause the cause (which is saved for later retrieval by the - * {@link #getCause()} method). (A null value is + * {@link #getCause()} method). (A null value is * permitted, and indicates that the cause is nonexistent or * unknown.) */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AuthorizationException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AuthorizationException.java index 79c7d1814d..e9c3323bb5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AuthorizationException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AuthorizationException.java @@ -44,10 +44,10 @@ public AuthorizationException(String message) { /** * Constructs a new exception with the specified cause and a detail - * message of (cause==null ? null : cause.toString()) (which - * typically contains the class and detail message of cause). + * message of (cause==null ? null : cause.toString()) (which + * typically contains the class and detail message of cause). * @param cause the cause (which is saved for later retrieval by the - * {@link #getCause()} method). (A null value is + * {@link #getCause()} method). (A null value is * permitted, and indicates that the cause is nonexistent or * unknown.) */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java index d95878b567..105a8cdcef 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java @@ -54,7 +54,7 @@ * line arguments, enabling applications to easily specify a namenode, a * ResourceManager, additional configuration resources etc. * - *

      Generic Options

      + *

      Generic Options

      * *

      The supported generic options are:

      *
      diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java index c99290bc3d..e001d6775c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; /** - * This is a wrap class of a ReadLock. + * This is a wrap class of a ReadLock. * It extends the class {@link InstrumentedLock}, and can be used to track * whether a specific read lock is being held for too long and log * warnings if so. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java index 758f1ff87c..caceb31cfb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java @@ -28,7 +28,7 @@ /** * This is a wrap class of a {@link ReentrantReadWriteLock}. * It implements the interface {@link ReadWriteLock}, and can be used to - * create instrumented ReadLock and WriteLock. + * create instrumented ReadLock and WriteLock. */ @InterfaceAudience.Private @InterfaceStability.Unstable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java index 4637b5efe5..f1cb5feb52 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; /** - * This is a wrap class of a WriteLock. + * This is a wrap class of a WriteLock. * It extends the class {@link InstrumentedLock}, and can be used to track * whether a specific write lock is being held for too long and log * warnings if so. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java index dc13697f15..f026585be2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownThreadsHelper.java @@ -37,8 +37,8 @@ public class ShutdownThreadsHelper { /** * @param thread {@link Thread to be shutdown} - * @return true if the thread is successfully interrupted, - * false otherwise + * @return true if the thread is successfully interrupted, + * false otherwise */ public static boolean shutdownThread(Thread thread) { return shutdownThread(thread, SHUTDOWN_WAIT_MS); @@ -48,8 +48,8 @@ public static boolean shutdownThread(Thread thread) { * @param thread {@link Thread to be shutdown} * @param timeoutInMilliSeconds time to wait for thread to join after being * interrupted - * @return true if the thread is successfully interrupted, - * false otherwise + * @return true if the thread is successfully interrupted, + * false otherwise */ public static boolean shutdownThread(Thread thread, long timeoutInMilliSeconds) { @@ -71,8 +71,8 @@ public static boolean shutdownThread(Thread thread, * shutdownExecutorService. * * @param service {@link ExecutorService to be shutdown} - * @return true if the service is terminated, - * false otherwise + * @return true if the service is terminated, + * false otherwise * @throws InterruptedException if the thread is interrupted. */ public static boolean shutdownExecutorService(ExecutorService service) @@ -87,8 +87,8 @@ public static boolean shutdownExecutorService(ExecutorService service) * @param timeoutInMs time to wait for {@link * ExecutorService#awaitTermination(long, java.util.concurrent.TimeUnit)} * calls in milli seconds. - * @return true if the service is terminated, - * false otherwise + * @return true if the service is terminated, + * false otherwise * @throws InterruptedException if the thread is interrupted. */ public static boolean shutdownExecutorService(ExecutorService service, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 14a7458157..95e683e837 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -245,7 +245,7 @@ public static String uriToString(URI[] uris){ /** * @param str * The string array to be parsed into an URI array. - * @return null if str is null, else the URI array + * @return null if str is null, else the URI array * equivalent to str. * @throws IllegalArgumentException * If any string in str violates RFC 2396. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java index 70ae639091..4234f24006 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java @@ -53,18 +53,18 @@ * The benchmark supports three authentication methods: *
        *
      1. simple - no authentication. In order to enter this mode - * the configuration file core-site.xml should specify - * hadoop.security.authentication = simple. + * the configuration file core-site.xml should specify + * hadoop.security.authentication = simple. * This is the default mode.
      2. *
      3. kerberos - kerberos authentication. In order to enter this mode - * the configuration file core-site.xml should specify - * hadoop.security.authentication = kerberos and + * the configuration file core-site.xml should specify + * hadoop.security.authentication = kerberos and * the argument string should provide qualifying - * keytabFile and userName parameters. + * keytabFile and userName parameters. *
      4. delegation token - authentication using delegation token. * In order to enter this mode the benchmark should provide all the * mentioned parameters for kerberos authentication plus the - * useToken argument option. + * useToken argument option. *
      * Input arguments: *
        diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java index 1d002b6e4c..a69ae329c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java @@ -503,7 +503,7 @@ public synchronized int read(ByteBuffer buf) throws IOException { * byte buffer to write bytes to. If checksums are not required, buf * can have any number of bytes remaining, otherwise there must be a * multiple of the checksum chunk size remaining. - * @return max(min(totalBytesRead, len) - offsetFromChunkBoundary, 0) + * @return max(min(totalBytesRead, len) - offsetFromChunkBoundary, 0) * that is, the the number of useful bytes (up to the amount * requested) readable from the buffer by the client. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index a11fa1bac2..1ec63e0ca8 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -107,7 +107,7 @@ synchronized List poll(int numBlocks) { } /** - * Returns true if the queue contains the specified element. + * Returns true if the queue contains the specified element. */ synchronized boolean contains(E e) { return blockq.contains(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 7bf5879971..2118b1d03f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -369,7 +369,7 @@ String getFullPathName(Long nodeId) { } /** - * Get the key name for an encryption zone. Returns null if iip is + * Get the key name for an encryption zone. Returns null if iip is * not within an encryption zone. *

        * Called while holding the FSDirectory lock. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index a48cfdbe59..bcf56a8644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -1120,7 +1120,7 @@ private void stopHttpServer() { *

      • {@link StartupOption#IMPORT IMPORT} - import checkpoint
      • *
      * The option is passed via configuration field: - * dfs.namenode.startup + * dfs.namenode.startup * * The conf will be modified to reflect the actual ports on which * the NameNode is up and running if the user passes the port as diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java index 80ef538000..7ad3981d9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffList.java @@ -100,7 +100,7 @@ public List getMinListForRange(int startIndex, int endIndex, * @param index index of the element to return * @return the element at the specified position in this list * @throws IndexOutOfBoundsException if the index is out of range - * (index < 0 || index >= size()) + * (index < 0 || index >= size()) */ T get(int index); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java index fbeea0f673..6586d42f92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java @@ -37,20 +37,20 @@ /** * This is the tool for analyzing file sizes in the namespace image. In order to - * run the tool one should define a range of integers [0, maxSize] by - * specifying maxSize and a step. The range of integers is - * divided into segments of size step: - * [0, s1, ..., sn-1, maxSize], and the visitor + * run the tool one should define a range of integers [0, maxSize] by + * specifying maxSize and a step. The range of integers is + * divided into segments of size step: + * [0, s1, ..., sn-1, maxSize], and the visitor * calculates how many files in the system fall into each segment - * [si-1, si). Note that files larger than - * maxSize always fall into the very last segment. + * [si-1, si). Note that files larger than + * maxSize always fall into the very last segment. * *

      Input.

      *
        - *
      • filename specifies the location of the image file;
      • - *
      • maxSize determines the range [0, maxSize] of files + *
      • filename specifies the location of the image file;
      • + *
      • maxSize determines the range [0, maxSize] of files * sizes considered by the visitor;
      • - *
      • step the range is divided into segments of size step.
      • + *
      • step the range is divided into segments of size step.
      • *
      * *

      Output.

      The output file is formatted as a tab separated two column diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java index 7dcc29998f..a7e93fe586 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java @@ -28,20 +28,20 @@ *

      Description.

      * This is the tool for analyzing file sizes in the namespace image. * In order to run the tool one should define a range of integers - * [0, maxSize] by specifying maxSize and a step. - * The range of integers is divided into segments of size step: - * [0, s1, ..., sn-1, maxSize], + * [0, maxSize] by specifying maxSize and a step. + * The range of integers is divided into segments of size step: + * [0, s1, ..., sn-1, maxSize], * and the visitor calculates how many files in the system fall into - * each segment [si-1, si). - * Note that files larger than maxSize always fall into + * each segment [si-1, si). + * Note that files larger than maxSize always fall into * the very last segment. * *

      Input.

      *
        - *
      • filename specifies the location of the image file;
      • - *
      • maxSize determines the range [0, maxSize] of files + *
      • filename specifies the location of the image file;
      • + *
      • maxSize determines the range [0, maxSize] of files * sizes considered by the visitor;
      • - *
      • step the range is divided into segments of size step.
      • + *
      • step the range is divided into segments of size step.
      • *
      * *

      Output.

      diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 905e3bf44f..7264e182bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -110,7 +110,7 @@ public void tearDown() throws IOException { * Name-node should stay in automatic safe-mode. *
    • Enter safe mode manually.
    • *
    • Start the data-node.
    • - *
    • Wait longer than dfs.namenode.safemode.extension and + *
    • Wait longer than dfs.namenode.safemode.extension and * verify that the name-node is still in safe mode.
    • * * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java index 106c515d49..788e91b025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReadOnlySharedStorage.java @@ -205,7 +205,7 @@ private void validateNumberReplicas(int expectedReplicas) throws IOException { } /** - * Verify that READ_ONLY_SHARED replicas are not counted towards the overall + * Verify that READ_ONLY_SHARED replicas are not counted towards the overall * replication count, but are included as replica locations returned to clients for reads. */ @Test @@ -221,7 +221,7 @@ public void testReplicaCounting() throws Exception { } /** - * Verify that the NameNode is able to still use READ_ONLY_SHARED replicas even + * Verify that the NameNode is able to still use READ_ONLY_SHARED replicas even * when the single NORMAL replica is offline (and the effective replication count is 0). */ @Test @@ -253,7 +253,7 @@ public void testNormalReplicaOffline() throws Exception { } /** - * Verify that corrupt READ_ONLY_SHARED replicas aren't counted + * Verify that corrupt READ_ONLY_SHARED replicas aren't counted * towards the corrupt replicas total. */ @Test diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/AMPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/AMPreemptionPolicy.java index 85211f958d..a49700d8e5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/AMPreemptionPolicy.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/AMPreemptionPolicy.java @@ -109,7 +109,7 @@ public abstract class Context { * TaskId}. Assigning a null is akin to remove all previous checkpoints for * this task. * @param taskId TaskID - * @param cid Checkpoint to assign or null to remove it. + * @param cid Checkpoint to assign or null to remove it. */ public void setCheckpointID(TaskId taskId, TaskCheckpointID cid); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java index 3932e5849e..a89f1f1cee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputFormat.java @@ -185,7 +185,7 @@ public static Path getOutputPath(JobConf conf) { * is {@link FileOutputCommitter}. If OutputCommitter is not * a FileOutputCommitter, the task's temporary output * directory is same as {@link #getOutputPath(JobConf)} i.e. - * ${mapreduce.output.fileoutputformat.outputdir}$

      + * ${mapreduce.output.fileoutputformat.outputdir}$

      * *

      Some applications need to create/write-to side-files, which differ from * the actual job-outputs. @@ -194,27 +194,27 @@ public static Path getOutputPath(JobConf conf) { * (running simultaneously e.g. speculative tasks) trying to open/write-to the * same file (path) on HDFS. Hence the application-writer will have to pick * unique names per task-attempt (e.g. using the attemptid, say - * attempt_200709221812_0001_m_000000_0), not just per TIP.

      + * attempt_200709221812_0001_m_000000_0), not just per TIP.

      * *

      To get around this the Map-Reduce framework helps the application-writer * out by maintaining a special - * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} + * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} * sub-directory for each task-attempt on HDFS where the output of the * task-attempt goes. On successful completion of the task-attempt the files - * in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) - * are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the + * in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) + * are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the * framework discards the sub-directory of unsuccessful task-attempts. This * is completely transparent to the application.

      * *

      The application-writer can take advantage of this by creating any - * side-files required in ${mapreduce.task.output.dir} during execution + * side-files required in ${mapreduce.task.output.dir} during execution * of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the * framework will move them out similarly - thus she doesn't have to pick * unique paths per task-attempt.

      * - *

      Note: the value of ${mapreduce.task.output.dir} during + *

      Note: the value of ${mapreduce.task.output.dir} during * execution of a particular task-attempt is actually - * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}, and this value is + * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}, and this value is * set by the map-reduce framework. So, just create any side-files in the * path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce * task to take advantage of this feature.

      diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java index db398e8dbd..d6d3c9ebfa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java @@ -1873,8 +1873,8 @@ public String getJobEndNotificationURI() { * Set the uri to be invoked in-order to send a notification after the job * has completed (success/failure). * - *

      The uri can contain 2 special parameters: $jobId and - * $jobStatus. Those, if present, are replaced by the job's + *

      The uri can contain 2 special parameters: $jobId and + * $jobStatus. Those, if present, are replaced by the job's * identifier and completion-status respectively.

      * *

      This is typically used by application-writers to implement chaining of diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java index 7aa4f336ae..e5f585e0fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapRunnable.java @@ -37,7 +37,7 @@ public interface MapRunnable extends JobConfigurable { /** - * Start mapping input <key, value> pairs. + * Start mapping input <key, value> pairs. * *

      Mapping of input records to output records is complete when this method * returns.

      diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java index fd078372fd..0b1a9786ca 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/jobcontrol/Job.java @@ -143,7 +143,7 @@ protected synchronized void setState(int state) { * is waiting to run, not during or afterwards. * * @param dependingJob Job that this Job depends on. - * @return true if the Job was added. + * @return true if the Job was added. */ public synchronized boolean addDependingJob(Job dependingJob) { return super.addDependingJob(dependingJob); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java index 40690e7541..226363ac8c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeInputFormat.java @@ -38,10 +38,10 @@ * and partitioned the same way. * * A user may define new join types by setting the property - * mapred.join.define.<ident> to a classname. In the expression - * mapred.join.expr, the identifier will be assumed to be a + * mapred.join.define.<ident> to a classname. In the expression + * mapred.join.expr, the identifier will be assumed to be a * ComposableRecordReader. - * mapred.join.keycomparator can be a classname used to compare keys + * mapred.join.keycomparator can be a classname used to compare keys * in the join. * @see #setFormat * @see JoinRecordReader @@ -66,9 +66,9 @@ public CompositeInputFormat() { } * class ::= @see java.lang.Class#forName(java.lang.String) * path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) * } - * Reads expression from the mapred.join.expr property and - * user-supplied join types from mapred.join.define.<ident> - * types. Paths supplied to tbl are given as input paths to the + * Reads expression from the mapred.join.expr property and + * user-supplied join types from mapred.join.define.<ident> + * types. Paths supplied to tbl are given as input paths to the * InputFormat class listed. * @see #compose(java.lang.String, java.lang.Class, java.lang.String...) */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java index 0684268d2d..1bb0745d91 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java @@ -61,8 +61,8 @@ public abstract class CompositeRecordReader< protected abstract boolean combine(Object[] srcs, TupleWritable value); /** - * Create a RecordReader with capacity children to position - * id in the parent reader. + * Create a RecordReader with capacity children to position + * id in the parent reader. * The id of a root CompositeRecordReader is -1 by convention, but relying * on this is not recommended. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java index 1671e6e895..d36b776a94 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/OverrideRecordReader.java @@ -31,7 +31,7 @@ /** * Prefer the "rightmost" data source for this key. - * For example, override(S1,S2,S3) will prefer values + * For example, override(S1,S2,S3) will prefer values * from S3 over S2, and values from S2 over S1 for all keys * emitted from all sources. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java index 3c7a991fd0..96792c1e66 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/Parser.java @@ -275,7 +275,7 @@ public WNode(String ident) { /** * Let the first actual define the InputFormat and the second define - * the mapred.input.dir property. + * the mapred.input.dir property. */ public void parse(List ll, JobConf job) throws IOException { StringBuilder sb = new StringBuilder(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java index b06961e5cf..98ca9318df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TotalOrderPartitioner.java @@ -43,7 +43,7 @@ public void configure(JobConf job) { /** * Set the path to the SequenceFile storing the sorted partition keyset. - * It must be the case that for R reduces, there are R-1 + * It must be the case that for R reduces, there are R-1 * keys in the SequenceFile. * @deprecated Use * {@link #setPartitionFile(Configuration, Path)} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java index 48cde0e5f0..249079b8e4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java @@ -205,7 +205,7 @@ public List getDependentJobs() { * is waiting to run, not during or afterwards. * * @param dependingJob Job that this Job depends on. - * @return true if the Job was added. + * @return true if the Job was added. */ public synchronized boolean addDependingJob(ControlledJob dependingJob) { if (this.state == State.WAITING) { //only allowed to add jobs when waiting diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java index 6189a271bc..b0b459afe2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java @@ -41,10 +41,10 @@ * and partitioned the same way. * * A user may define new join types by setting the property - * mapreduce.join.define.<ident> to a classname. - * In the expression mapreduce.join.expr, the identifier will be + * mapreduce.join.define.<ident> to a classname. + * In the expression mapreduce.join.expr, the identifier will be * assumed to be a ComposableRecordReader. - * mapreduce.join.keycomparator can be a classname used to compare + * mapreduce.join.keycomparator can be a classname used to compare * keys in the join. * @see #setFormat * @see JoinRecordReader @@ -73,9 +73,9 @@ public CompositeInputFormat() { } * class ::= @see java.lang.Class#forName(java.lang.String) * path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) * } - * Reads expression from the mapreduce.join.expr property and - * user-supplied join types from mapreduce.join.define.<ident> - * types. Paths supplied to tbl are given as input paths to the + * Reads expression from the mapreduce.join.expr property and + * user-supplied join types from mapreduce.join.define.<ident> + * types. Paths supplied to tbl are given as input paths to the * InputFormat class listed. * @see #compose(java.lang.String, java.lang.Class, java.lang.String...) */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java index 40f3570cb5..45e3224a3f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java @@ -67,8 +67,8 @@ public abstract class CompositeRecordReader< protected X value; /** - * Create a RecordReader with capacity children to position - * id in the parent reader. + * Create a RecordReader with capacity children to position + * id in the parent reader. * The id of a root CompositeRecordReader is -1 by convention, but relying * on this is not recommended. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java index 5678445f11..2396e9daa4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/OverrideRecordReader.java @@ -33,7 +33,7 @@ /** * Prefer the "rightmost" data source for this key. - * For example, override(S1,S2,S3) will prefer values + * For example, override(S1,S2,S3) will prefer values * from S3 over S2, and values from S2 over S1 for all keys * emitted from all sources. */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java index c557e14136..68cf310259 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/Parser.java @@ -290,7 +290,7 @@ public WNode(String ident) { /** * Let the first actual define the InputFormat and the second define - * the mapred.input.dir property. + * the mapred.input.dir property. */ @Override public void parse(List ll, Configuration conf) throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java index aa541f3640..c48f925b98 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/TupleWritable.java @@ -144,7 +144,7 @@ public void remove() { /** * Convert Tuple to String as in the following. - * [<child1>,<child2>,...,<childn>] + * [<child1>,<child2>,...,<childn>] */ public String toString() { StringBuilder buf = new StringBuilder("["); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java index 2b1f7e37eb..5dd572835c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java @@ -208,15 +208,15 @@ public static Path getOutputPath(JobContext job) { * (running simultaneously e.g. speculative tasks) trying to open/write-to the * same file (path) on HDFS. Hence the application-writer will have to pick * unique names per task-attempt (e.g. using the attemptid, say - * attempt_200709221812_0001_m_000000_0), not just per TIP.

      + * attempt_200709221812_0001_m_000000_0), not just per TIP.

      * *

      To get around this the Map-Reduce framework helps the application-writer * out by maintaining a special - * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} + * ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} * sub-directory for each task-attempt on HDFS where the output of the * task-attempt goes. On successful completion of the task-attempt the files - * in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) - * are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the + * in the ${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid} (only) + * are promoted to ${mapreduce.output.fileoutputformat.outputdir}. Of course, the * framework discards the sub-directory of unsuccessful task-attempts. This * is completely transparent to the application.

      * diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java index c19724e842..25967f92fa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java @@ -65,8 +65,8 @@ public TotalOrderPartitioner() { } /** * Read in the partition file and build indexing data structures. * If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and - * total.order.partitioner.natural.order is not false, a trie - * of the first total.order.partitioner.max.trie.depth(2) + 1 bytes + * total.order.partitioner.natural.order is not false, a trie + * of the first total.order.partitioner.max.trie.depth(2) + 1 bytes * will be built. Otherwise, keys will be located using a binary search of * the partition keyset using the {@link org.apache.hadoop.io.RawComparator} * defined for this job. The input file must be sorted with the same @@ -128,7 +128,7 @@ public int getPartition(K key, V value, int numPartitions) { /** * Set the path to the SequenceFile storing the sorted partition keyset. - * It must be the case that for R reduces, there are R-1 + * It must be the case that for R reduces, there are R-1 * keys in the SequenceFile. */ public static void setPartitionFile(Configuration conf, Path p) { @@ -156,7 +156,7 @@ interface Node { /** * Base class for trie nodes. If the keytype is memcomp-able, this builds - * tries of the first total.order.partitioner.max.trie.depth + * tries of the first total.order.partitioner.max.trie.depth * bytes. */ static abstract class TrieNode implements Node { @@ -171,7 +171,7 @@ int getLevel() { /** * For types that are not {@link org.apache.hadoop.io.BinaryComparable} or - * where disabled by total.order.partitioner.natural.order, + * where disabled by total.order.partitioner.natural.order, * search the partition keyset with a binary search. */ class BinarySearchNode implements Node { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java index b6313494e4..eb0972b30f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java @@ -31,13 +31,13 @@ * The type is specified in the key part of the key-value pair * as a prefix to the key in the following way *

      - * type:key + * type:key *

      * The values are accumulated according to the types: *

        - *
      • s: - string, concatenate
      • - *
      • f: - float, summ
      • - *
      • l: - long, summ
      • + *
      • s: - string, concatenate
      • + *
      • f: - float, summ
      • + *
      • l: - long, summ
      • *
      * */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java index ddd2d2f126..7ded7a1e63 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/IOMapperBase.java @@ -109,8 +109,8 @@ abstract void collectStats(OutputCollector output, * Map file name and offset into statistical data. *

      * The map task is to get the - * key, which contains the file name, and the - * value, which is the offset within the file. + * key, which contains the file name, and the + * value, which is the offset within the file. * * The parameters are passed to the abstract method * {@link #doIO(Reporter,String,long)}, which performs the io operation, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java index 8937bdafe3..125dad5cbe 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java @@ -76,7 +76,7 @@ * specific attempt A during hour h. * The tool then sums all slots for all attempts for every hour. * The result is the slot hour utilization of the cluster: - * slotTime(h) = SUMA slotTime(A,h). + * slotTime(h) = SUMA slotTime(A,h). *

      * Log analyzer calculates slot hours for MAP and REDUCE * attempts separately. @@ -88,8 +88,8 @@ *

      * Map-reduce clusters are usually configured to have a fixed number of MAP * and REDUCE slots per node. Thus the maximal possible number of slots on - * the cluster is total_slots = total_nodes * slots_per_node. - * Effective slot hour cannot exceed total_slots for successful + * the cluster is total_slots = total_nodes * slots_per_node. + * Effective slot hour cannot exceed total_slots for successful * attempts. *

      * Pending time characterizes the wait time of attempts. @@ -106,39 +106,39 @@ * The following input parameters can be specified in the argument string * to the job log analyzer: *

        - *
      • -historyDir inputDir specifies the location of the directory + *
      • -historyDir inputDir specifies the location of the directory * where analyzer will be looking for job history log files.
      • - *
      • -resFile resultFile the name of the result file.
      • - *
      • -usersIncluded | -usersExcluded userList slot utilization and + *
      • -resFile resultFile the name of the result file.
      • + *
      • -usersIncluded | -usersExcluded userList slot utilization and * pending time can be calculated for all or for all but the specified users. *
        - * userList is a comma or semicolon separated list of users.
      • - *
      • -gzip is used if history log files are compressed. + * userList is a comma or semicolon separated list of users.
      • + *
      • -gzip is used if history log files are compressed. * Only {@link GzipCodec} is currently supported.
      • - *
      • -jobDelimiter pattern one can concatenate original log files into + *
      • -jobDelimiter pattern one can concatenate original log files into * larger file(s) with the specified delimiter to recognize the end of the log * for one job from the next one.
        - * pattern is a java regular expression + * pattern is a java regular expression * {@link java.util.regex.Pattern}, which should match only the log delimiters. *
        - * E.g. pattern ".!!FILE=.*!!" matches delimiters, which contain + * E.g. pattern ".!!FILE=.*!!" matches delimiters, which contain * the original history log file names in the following form:
        - * "$!!FILE=my.job.tracker.com_myJobId_user_wordcount.log!!"
      • - *
      • -clean cleans up default directories used by the analyzer.
      • - *
      • -test test one file locally and exit; + * "$!!FILE=my.job.tracker.com_myJobId_user_wordcount.log!!"
      • + *
      • -clean cleans up default directories used by the analyzer.
      • + *
      • -test test one file locally and exit; * does not require map-reduce.
      • - *
      • -help print usage.
      • + *
      • -help print usage.
      • *
      * *

      Output.

      * The output file is formatted as a tab separated table consisting of four - * columns: SERIES, PERIOD, TYPE, SLOT_HOUR. + * columns: SERIES, PERIOD, TYPE, SLOT_HOUR. *
        - *
      • SERIES one of the four statistical series;
      • - *
      • PERIOD the start of the time interval in the following format: - * "yyyy-mm-dd hh:mm:ss";
      • - *
      • TYPE the slot type, e.g. MAP or REDUCE;
      • - *
      • SLOT_HOUR the value of the slot usage during this + *
      • SERIES one of the four statistical series;
      • + *
      • PERIOD the start of the time interval in the following format: + * "yyyy-mm-dd hh:mm:ss";
      • + *
      • TYPE the slot type, e.g. MAP or REDUCE;
      • + *
      • SLOT_HOUR the value of the slot usage during this * time interval.
      • *
      */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html index 91484aa49c..9fa0c41e0a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/package.html @@ -23,7 +23,7 @@ for large n, say n > 100,000,000. For computing the lower bits of π, consider using bbp. -

      The distbbp Program

      +

      The distbbp Program

      The main class is DistBbp and the actually computation is done by DistSum jobs. The steps for launching the jobs are: @@ -39,8 +39,10 @@

      The distbbp Program

    • Combine the job outputs and print the π bits.
    • -
      Metrics2
      -

      The Bits of π

      + + +
      "The Bits of Pi"
      +

      The Bits of π

      The table on the right are the results computed by distbbp.

      @@ -56,7 +58,7 @@

      The Bits of π

    • The computations in Row 13 and Row 14 were completed on May 20, 2009. It seems that the corresponding bits were never computed before.
    • -
    • The first part of Row 15 (6216B06) +
    • The first part of Row 15 (6216B06)
      • The first 30% of the computation was done in idle cycles of some clusters spread over 20 days.
      • @@ -69,7 +71,7 @@

        The Bits of π

        this YDN blog.
    • -
    • The second part of Row 15 (D3611) +
    • The second part of Row 15 (D3611)
      • The starting position is 1,000,000,000,000,053, totally 20 bits.
      • Two computations, at positions n and n+4, were performed.
      • A single computation was divided into 14,000 jobs @@ -85,42 +87,42 @@

        The Bits of π

        computed ever in the history.
    • -
      - - +
      Position nπ bits (in hex) starting at n
      + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - + +
      "Pi in hex"
      Position nπ bits (in hex) starting at n
      01243F6A8885A3*
      111FDAA22168C23
      21013707344A409
      31,001574E69A458F
      01243F6A8885A3*
      111FDAA22168C23
      21013707344A409
      31,001574E69A458F
      410,00144EC5716F2B
      5100,001944F7A204
      61,000,0016FFFA4103
      710,000,0016CFDD54E3
      8100,000,001A306CFA7
      410,00144EC5716F2B
      5100,001944F7A204
      61,000,0016FFFA4103
      710,000,0016CFDD54E3
      8100,000,001A306CFA7
      91,000,000,0013E08FF2B
      1010,000,000,0010A8BD8C0
      11100,000,000,001B2238C1
      121,000,000,000,0010FEE563
      1310,000,000,000,001896DC3
      91,000,000,0013E08FF2B
      1010,000,000,0010A8BD8C0
      11100,000,000,001B2238C1
      121,000,000,000,0010FEE563
      1310,000,000,000,001896DC3
      14100,000,000,000,001C216EC
      151,000,000,000,000,0016216B06 ... D3611
      14100,000,000,000,001C216EC
      151,000,000,000,000,0016216B06 ... D3611
      * By representing π in decimal, hexadecimal and binary, we have - - +
      π=3.1415926535 8979323846 2643383279 ...
      + - + - +
      "Pi in various formats"
      π=3.1415926535 8979323846 2643383279 ...
      =3.243F6A8885 A308D31319 8A2E037073 ...=3.243F6A8885 A308D31319 8A2E037073 ...
      =11.0010010000 1111110110 1010100010 ...=11.0010010000 1111110110 1010100010 ...
      -The first ten bits of π are 0010010000. +The first ten bits of π are 0010010000.
      @@ -130,7 +132,8 @@

      Command Line Usages

      $ hadoop org.apache.hadoop.examples.pi.DistBbp \ <b> <nThreads> <nJobs> <type> <nPart> <remoteDir> <localDir> And the parameters are: - +
      + diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 33533dbbae..32a140a9de 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -158,7 +158,6 @@ 1.8 - false - -html4 - - - - - - diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml index 595413edc4..997f63b0dc 100644 --- a/hadoop-tools/hadoop-aws/pom.xml +++ b/hadoop-tools/hadoop-aws/pom.xml @@ -35,7 +35,6 @@ UTF-8 true ${project.build.directory}/test - true unset diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java index 9a0cca380b..9fbad6b7a9 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/FilePool.java @@ -128,7 +128,7 @@ static abstract class Node { /** * Return a set of files whose cumulative size is at least - * targetSize. + * targetSize. * TODO Clearly size is not the only criterion, e.g. refresh from * generated data without including running task output, tolerance * for permission issues, etc. diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java index b0cd5b4fdb..9db4087c0c 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/IdentifierResolver.java @@ -26,7 +26,7 @@ /** * This class is used to resolve a string identifier into the required IO * classes. By extending this class and pointing the property - * stream.io.identifier.resolver.class to this extension, additional + * stream.io.identifier.resolver.class to this extension, additional * IO classes can be added by external code. */ public class IdentifierResolver { diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html index be64426757..d7924e8d4e 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/package.html @@ -19,7 +19,7 @@ -Hadoop Streaming is a utility which allows users to create and run +Hadoop Streaming is a utility which allows users to create and run Map-Reduce jobs with any executables (e.g. Unix shell utilities) as the mapper and/or the reducer. diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html index 3494fbd858..fb72cc3a8e 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/typedbytes/package.html @@ -22,11 +22,12 @@ Typed bytes are sequences of bytes in which the first byte is a type code. They are especially useful as a (simple and very straightforward) binary format for transferring data to and from Hadoop Streaming programs. -

      Type Codes

      +

      Type Codes

      Each typed bytes sequence starts with an unsigned byte that contains the type code. Possible values are: -
      "command line option"
      <b> The number of bits to skip, i.e. compute the (b+1)th position.
      +
      + @@ -48,7 +49,8 @@

      Subsequent Bytes

      These are the subsequent bytes for the different type codes (everything is big-endian and unpadded): -
      "Type Codes"
      CodeType
      0A sequence of bytes.
      1A byte.
      +
      + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SignalContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SignalContainerRequest.java index 28cc8ea5b4..d002071e6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SignalContainerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/SignalContainerRequest.java @@ -29,7 +29,7 @@ *

      The request sent by the client to the ResourceManager * or by the ApplicationMaster to the NodeManager * to signal a container. - * @see SignalContainerCommand

      + * @see SignalContainerCommand */ @Public @Evolving diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java index ff4556f7cd..c4417851a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java @@ -31,7 +31,7 @@ * adding the following to by This would actually be set as: * [prefix].sink.[some instance name].class * =org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink - * , where prefix is "atsv2": and some instance name is + * , where prefix is "atsv2": and some instance name is * just any unique name, so properties can be differentiated if there are * multiple sinks of the same type created */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java index 949c6a2e27..3ff53cce8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/AdminACLsManager.java @@ -93,7 +93,7 @@ public UserGroupInformation getOwner() { * * @see YarnConfiguration#YARN_ACL_ENABLE * @see YarnConfiguration#DEFAULT_YARN_ACL_ENABLE - * @return true if ACLs are enabled + * @return true if ACLs are enabled */ public boolean areACLsEnabled() { return aclsEnabled; @@ -103,7 +103,7 @@ public boolean areACLsEnabled() { * Returns whether the specified user/group is an administrator * * @param callerUGI user/group to to check - * @return true if the UserGroupInformation specified + * @return true if the UserGroupInformation specified * is a member of the access control list for administrators */ public boolean isAdmin(UserGroupInformation callerUGI) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java index 6d582ca1ec..423f029b9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java @@ -56,7 +56,7 @@ * } * *

      - * Note that null values are {@link #append(CharSequence) append}ed + * Note that null values are {@link #append(CharSequence) append}ed * just like in {@link StringBuilder#append(CharSequence) original * implementation}. *

      diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/LeveldbIterator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/LeveldbIterator.java index 463bee7eba..00b97aadc8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/LeveldbIterator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/LeveldbIterator.java @@ -112,7 +112,7 @@ public void seekToLast() throws DBException { } /** - * Returns true if the iteration has more elements. + * Returns true if the iteration has more elements. */ public boolean hasNext() throws DBException { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java index 433b352016..cac14eaa82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java @@ -21,7 +21,7 @@ /** * The base type of tables. - * @param T table type + * @param table type */ public abstract class BaseTable { }

      "Subsequent Bytes"
      CodeSubsequent Bytes
      0<32-bit signed integer> <as many bytes as indicated by the integer>
      1<signed byte>