From 6a9ceedfb3ee7c2f66a44083fb8e68cca508e207 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Fri, 23 Oct 2020 03:15:45 +0900 Subject: [PATCH] HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397) --- hadoop-common-project/hadoop-common/pom.xml | 1 - .../java/org/apache/hadoop/fs/FileUtil.java | 2 +- .../org/apache/hadoop/fs/PartialListing.java | 2 +- .../hadoop/fs/impl/FutureIOSupport.java | 8 +- .../apache/hadoop/fs/viewfs/Constants.java | 4 +- .../fs/viewfs/HCFSMountTableConfigLoader.java | 2 +- .../viewfs/ViewFileSystemOverloadScheme.java | 95 +++++++++++-------- .../org/apache/hadoop/ipc/ProxyCombiner.java | 4 +- .../hadoop/ipc/WeightedTimeCostProvider.java | 4 +- .../hadoop/net/DomainNameResolverFactory.java | 4 +- .../org/apache/hadoop/security/Groups.java | 12 ++- .../ssl/DelegatingSSLSocketFactory.java | 23 ++--- 12 files changed, 87 insertions(+), 74 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index fc0927eca3..cc786e8750 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -35,7 +35,6 @@ true ../etc/hadoop wsce-site.xml - true diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 73ca6e6521..e078a2c519 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -1812,7 +1812,7 @@ public static FileSystem write(final FileSystem fs, final Path path, * specified charset. This utility method opens the file for writing, creating * the file if it does not exist, or overwrites an existing file. * - * @param FileContext the file context with which to create the file + * @param fs the file context with which to create the file * @param path the path to the file * @param charseq the char sequence to write to the file * @param cs the charset to use for encoding diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java index 80d173e905..cec5d68341 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java @@ -30,7 +30,7 @@ * A partial listing of the children of a parent directory. Since it is a * partial listing, multiple PartialListing may need to be combined to obtain * the full listing of a parent directory. - *

+ *

* ListingBatch behaves similar to a Future, in that getting the result via * {@link #get()} will throw an Exception if there was a failure. */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java index f13d701803..84ca94e642 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FutureIOSupport.java @@ -166,11 +166,11 @@ private static IOException unwrapInnerException(final Throwable e) { * Propagate options to any builder, converting everything with the * prefix to an option where, if there were 2+ dot-separated elements, * it is converted to a schema. - *

+   * 
{@code
    *   fs.example.s3a.option => s3a:option
    *   fs.example.fs.io.policy => s3a.io.policy
    *   fs.example.something => something
-   * 
+ * }
* @param builder builder to modify * @param conf configuration to read * @param optionalPrefix prefix for optional settings @@ -196,11 +196,11 @@ FSBuilder propagateOptions( * Propagate options to any builder, converting everything with the * prefix to an option where, if there were 2+ dot-separated elements, * it is converted to a schema. - *
+   * 
{@code
    *   fs.example.s3a.option => s3a:option
    *   fs.example.fs.io.policy => s3a.io.policy
    *   fs.example.something => something
-   * 
+ * }
* @param builder builder to modify * @param conf configuration to read * @param prefix prefix to scan/strip diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java index bf9f7db722..5c27692eb5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java @@ -89,8 +89,8 @@ public interface Constants { /** * Config variable for specifying a regex link which uses regular expressions * as source and target could use group captured in src. - * E.g. (^/(?\\w+), /prefix-${firstDir}) => - * (/path1/file1 => /prefix-path1/file1) + * E.g. {@literal (^/(?\\w+), /prefix-${firstDir}) => + * (/path1/file1 => /prefix-path1/file1)} */ String CONFIG_VIEWFS_LINK_REGEX = "linkRegex"; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java index 3968e3650c..8dbb0f3007 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/HCFSMountTableConfigLoader.java @@ -42,7 +42,7 @@ public class HCFSMountTableConfigLoader implements MountTableConfigLoader { * Loads the mount-table configuration from hadoop compatible file system and * add the configuration items to given configuration. Mount-table * configuration format should be suffixed with version number. - * Format: mount-table..xml + * Format: {@literal mount-table..xml} * Example: mount-table.1.xml * When user wants to update mount-table, the expectation is to upload new * mount-table configuration file with monotonically increasing integer as diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java index 60d14d3851..12877ccf4b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java @@ -33,73 +33,85 @@ import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME; -/****************************************************************************** - * This class is extended from the ViewFileSystem for the overloaded scheme - * file system. Mount link configurations and in-memory mount table - * building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem - * scheme (viewfs://), the users would be able to use any scheme. +/** + *

This class is extended from the ViewFileSystem for the overloaded + * scheme file system. Mount link configurations and in-memory mount table + * building behaviors are inherited from ViewFileSystem. Unlike + * ViewFileSystem scheme (viewfs://), the users would be able to use + * any scheme.

* - * To use this class, the following configurations need to be added in - * core-site.xml file. - * 1) fs..impl - * = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme - * 2) fs.viewfs.overload.scheme.target..impl - * = " + *

To use this class, the following configurations need to be added in + * core-site.xml file.
+ * 1) fs.{@literal }.impl + * = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme
+ * 2) fs.viewfs.overload.scheme.target.{@literal }.impl + * = {@literal >}

* - * Here can be any scheme, but with that scheme there should be a - * hadoop compatible file system available. Second configuration value should - * be the respective scheme's file system implementation class. + *

Here {@literal } can be any scheme, but with that scheme there + * should be a hadoop compatible file system available. Second configuration + * value should be the respective scheme's file system implementation class. * Example: if scheme is configured with "hdfs", then the 2nd configuration * class name will be org.apache.hadoop.hdfs.DistributedFileSystem. * if scheme is configured with "s3a", then the 2nd configuration class name - * will be org.apache.hadoop.fs.s3a.S3AFileSystem. + * will be org.apache.hadoop.fs.s3a.S3AFileSystem.

* - * Use Case 1: - * =========== + *

Use Case 1:
+ * ===========
* If users want some of their existing cluster (hdfs://Cluster) * data to mount with other hdfs and object store clusters(hdfs://NN1, - * o3fs://bucket1.volume1/, s3a://bucket1/) + * o3fs://bucket1.volume1/, s3a://bucket1/)

* - * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user - * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data + *

+ * fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data
* fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/ + *

* + *

* Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA + *
* Op2: Create file hdfs://Cluster/data/datafile will go to - * o3fs://bucket1.volume1/data/datafile + * o3fs://bucket1.volume1/data/datafile
* Op3: Create file hdfs://Cluster/backup/data.zip will go to * s3a://bucket1/backup/data.zip + *

* - * Use Case 2: - * =========== + *

Use Case 2:
+ * ===========
* If users want some of their existing cluster (s3a://bucketA/) * data to mount with other hdfs and object store clusters - * (hdfs://NN1, o3fs://bucket1.volume1/) + * (hdfs://NN1, o3fs://bucket1.volume1/)

* - * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user - * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data + *

+ * fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user
+ * fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data
* fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/ + *

* + *

* Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA + *
* Op2: Create file s3a://bucketA/data/datafile will go to - * o3fs://bucket1.volume1/data/datafile + * o3fs://bucket1.volume1/data/datafile
* Op3: Create file s3a://bucketA/salesDB/dbfile will go to * s3a://bucketA/salesDB/dbfile + *

* - * Note: + *

Note:
* (1) In ViewFileSystemOverloadScheme, by default the mount links will be * represented as non-symlinks. If you want to change this behavior, please see - * {@link ViewFileSystem#listStatus(Path)} + * {@link ViewFileSystem#listStatus(Path)}
* (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will * be considered as the mount table name. When the passed uri has hostname:port, * it will simply ignore the port number and only hostname will be considered as - * the mount table name. + * the mount table name.
* (3) If there are no mount links configured with the initializing uri's * hostname as the mount table name, then it will automatically consider the - * current uri as fallback( ex: fs.viewfs.mounttable..linkFallback) - * target fs uri. - *****************************************************************************/ + * current uri as fallback( ex: + * {@literal fs.viewfs.mounttable..linkFallback}) target fs uri. + *

+ */ @InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" }) @InterfaceStability.Evolving public class ViewFileSystemOverloadScheme extends ViewFileSystem { @@ -164,12 +176,13 @@ public void initialize(URI theUri, Configuration conf) throws IOException { /** * This method is overridden because in ViewFileSystemOverloadScheme if * overloaded scheme matches with mounted target fs scheme, file system - * should be created without going into fs..impl based resolution. - * Otherwise it will end up in an infinite loop as the target will be - * resolved again to ViewFileSystemOverloadScheme as fs..impl points - * to ViewFileSystemOverloadScheme. So, below method will initialize the - * fs.viewfs.overload.scheme.target..impl. Other schemes can - * follow fs.newInstance + * should be created without going into {@literal fs..impl} based + * resolution. Otherwise it will end up in an infinite loop as the target + * will be resolved again to ViewFileSystemOverloadScheme as + * {@literal fs..impl} points to ViewFileSystemOverloadScheme. + * So, below method will initialize the + * {@literal fs.viewfs.overload.scheme.target..impl}. + * Other schemes can follow fs.newInstance */ @Override protected FsGetter fsGetter() { @@ -179,7 +192,7 @@ protected FsGetter fsGetter() { /** * This class checks whether the rooScheme is same as URI scheme. If both are * same, then it will initialize file systems by using the configured - * fs.viewfs.overload.scheme.target..impl class. + * {@literal fs.viewfs.overload.scheme.target..impl} class. */ static class ChildFsGetter extends FsGetter { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java index 835d8065bd..b7188b7de5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProxyCombiner.java @@ -50,8 +50,8 @@ private ProxyCombiner() { } * all of the methods of the combined proxy interface, delegating calls * to which proxy implements that method. If multiple proxies implement the * same method, the first in the list will be used for delegation. - * - *

This will check that every method on the combined interface is + *

+ * This will check that every method on the combined interface is * implemented by at least one of the supplied proxy objects. * * @param combinedProxyInterface The interface of the combined proxy. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java index 4304b24299..1ecd19b74c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedTimeCostProvider.java @@ -29,8 +29,8 @@ * {@link ProcessingDetails}). This can be used by specifying the * {@link org.apache.hadoop.fs.CommonConfigurationKeys#IPC_COST_PROVIDER_KEY} * configuration key. - * - *

This allows for configuration of how heavily each of the operations + *

+ * This allows for configuration of how heavily each of the operations * within {@link ProcessingDetails} is weighted. By default, * {@link ProcessingDetails.Timing#LOCKFREE}, * {@link ProcessingDetails.Timing#RESPONSE}, and diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java index a0b0380c18..fdb45dd85d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DomainNameResolverFactory.java @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; -import java.io.IOException; import java.net.URI; /** @@ -49,7 +48,7 @@ private DomainNameResolverFactory() { * @return Domain name resolver. */ public static DomainNameResolver newInstance( - Configuration conf, URI uri, String configKey) throws IOException { + Configuration conf, URI uri, String configKey) { String host = uri.getHost(); String confKeyWithHost = configKey + "." + host; return newInstance(conf, confKeyWithHost); @@ -61,7 +60,6 @@ public static DomainNameResolver newInstance( * @param conf Configuration * @param configKey config key name. * @return Domain name resolver. - * @throws IOException when the class cannot be found or initiated. */ public static DomainNameResolver newInstance( Configuration conf, String configKey) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java index 406d0d0e15..47dca6cfe9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java @@ -201,10 +201,10 @@ private IOException noGroupsForUser(String user) { /** * Get the group memberships of a given user. * If the user's group is not cached, this method may block. - * Note this method can be expensive as it involves Set->List conversion. - * For user with large group membership (i.e., > 1000 groups), we recommend - * using getGroupSet to avoid the conversion and fast membership look up via - * contains(). + * Note this method can be expensive as it involves Set {@literal ->} List + * conversion. For user with large group membership + * (i.e., {@literal >} 1000 groups), we recommend using getGroupSet + * to avoid the conversion and fast membership look up via contains(). * @param user User's name * @return the group memberships of the user as list * @throws IOException if user does not exist @@ -220,7 +220,9 @@ public List getGroups(final String user) throws IOException { * Get the group memberships of a given user. * If the user's group is not cached, this method may block. * This provide better performance when user has large group membership via - * 1) avoid set->list->set conversion for the caller UGI/PermissionCheck + *
+ * 1) avoid {@literal set->list->set} conversion for the caller + * UGI/PermissionCheck
* 2) fast lookup using contains() via Set instead of List * @param user User's name * @return the group memberships of the user as set diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java index 9d7afa933b..5644234a57 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/DelegatingSSLSocketFactory.java @@ -43,19 +43,20 @@ * *

* The factory has several different modes of operation: - *

    - *
  • OpenSSL: Uses the wildly-openssl library to delegate to the - * system installed OpenSSL. If the wildfly-openssl integration is not - * properly setup, an exception is thrown.
  • - *
  • Default: Attempts to use the OpenSSL mode, if it cannot load the - * necessary libraries, it falls back to the Default_JSEE mode.
  • - *
  • Default_JSSE: Delegates to the JSSE implementation of SSL, but - * it disables the GCM cipher when running on Java 8.
  • - *
  • Default_JSSE_with_GCM: Delegates to the JSSE implementation of - * SSL with no modification to the list of enabled ciphers.
  • - *
*

* + *
    + *
  • OpenSSL: Uses the wildly-openssl library to delegate to the + * system installed OpenSSL. If the wildfly-openssl integration is not + * properly setup, an exception is thrown.
  • + *
  • Default: Attempts to use the OpenSSL mode, if it cannot load the + * necessary libraries, it falls back to the Default_JSEE mode.
  • + *
  • Default_JSSE: Delegates to the JSSE implementation of SSL, but + * it disables the GCM cipher when running on Java 8.
  • + *
  • Default_JSSE_with_GCM: Delegates to the JSSE implementation of + * SSL with no modification to the list of enabled ciphers.
  • + *
+ * * In order to load OpenSSL, applications must ensure the wildfly-openssl * artifact is on the classpath. Currently, only ABFS declares * wildfly-openssl as an explicit dependency.