HADOOP-17175. [JDK 11] Fix javadoc errors in hadoop-common module. (#2397)
This commit is contained in:
parent
7435604a91
commit
6a9ceedfb3
@ -35,7 +35,6 @@
|
|||||||
<is.hadoop.common.component>true</is.hadoop.common.component>
|
<is.hadoop.common.component>true</is.hadoop.common.component>
|
||||||
<wsce.config.dir>../etc/hadoop</wsce.config.dir>
|
<wsce.config.dir>../etc/hadoop</wsce.config.dir>
|
||||||
<wsce.config.file>wsce-site.xml</wsce.config.file>
|
<wsce.config.file>wsce-site.xml</wsce.config.file>
|
||||||
<javadoc.skip.jdk11>true</javadoc.skip.jdk11>
|
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
||||||
|
@ -1812,7 +1812,7 @@ public static FileSystem write(final FileSystem fs, final Path path,
|
|||||||
* specified charset. This utility method opens the file for writing, creating
|
* specified charset. This utility method opens the file for writing, creating
|
||||||
* the file if it does not exist, or overwrites an existing file.
|
* the file if it does not exist, or overwrites an existing file.
|
||||||
*
|
*
|
||||||
* @param FileContext the file context with which to create the file
|
* @param fs the file context with which to create the file
|
||||||
* @param path the path to the file
|
* @param path the path to the file
|
||||||
* @param charseq the char sequence to write to the file
|
* @param charseq the char sequence to write to the file
|
||||||
* @param cs the charset to use for encoding
|
* @param cs the charset to use for encoding
|
||||||
|
@ -30,7 +30,7 @@
|
|||||||
* A partial listing of the children of a parent directory. Since it is a
|
* A partial listing of the children of a parent directory. Since it is a
|
||||||
* partial listing, multiple PartialListing may need to be combined to obtain
|
* partial listing, multiple PartialListing may need to be combined to obtain
|
||||||
* the full listing of a parent directory.
|
* the full listing of a parent directory.
|
||||||
* <p/>
|
* <p>
|
||||||
* ListingBatch behaves similar to a Future, in that getting the result via
|
* ListingBatch behaves similar to a Future, in that getting the result via
|
||||||
* {@link #get()} will throw an Exception if there was a failure.
|
* {@link #get()} will throw an Exception if there was a failure.
|
||||||
*/
|
*/
|
||||||
|
@ -166,11 +166,11 @@ private static IOException unwrapInnerException(final Throwable e) {
|
|||||||
* Propagate options to any builder, converting everything with the
|
* Propagate options to any builder, converting everything with the
|
||||||
* prefix to an option where, if there were 2+ dot-separated elements,
|
* prefix to an option where, if there were 2+ dot-separated elements,
|
||||||
* it is converted to a schema.
|
* it is converted to a schema.
|
||||||
* <pre>
|
* <pre>{@code
|
||||||
* fs.example.s3a.option => s3a:option
|
* fs.example.s3a.option => s3a:option
|
||||||
* fs.example.fs.io.policy => s3a.io.policy
|
* fs.example.fs.io.policy => s3a.io.policy
|
||||||
* fs.example.something => something
|
* fs.example.something => something
|
||||||
* </pre>
|
* }</pre>
|
||||||
* @param builder builder to modify
|
* @param builder builder to modify
|
||||||
* @param conf configuration to read
|
* @param conf configuration to read
|
||||||
* @param optionalPrefix prefix for optional settings
|
* @param optionalPrefix prefix for optional settings
|
||||||
@ -196,11 +196,11 @@ FSBuilder<T, U> propagateOptions(
|
|||||||
* Propagate options to any builder, converting everything with the
|
* Propagate options to any builder, converting everything with the
|
||||||
* prefix to an option where, if there were 2+ dot-separated elements,
|
* prefix to an option where, if there were 2+ dot-separated elements,
|
||||||
* it is converted to a schema.
|
* it is converted to a schema.
|
||||||
* <pre>
|
* <pre>{@code
|
||||||
* fs.example.s3a.option => s3a:option
|
* fs.example.s3a.option => s3a:option
|
||||||
* fs.example.fs.io.policy => s3a.io.policy
|
* fs.example.fs.io.policy => s3a.io.policy
|
||||||
* fs.example.something => something
|
* fs.example.something => something
|
||||||
* </pre>
|
* }</pre>
|
||||||
* @param builder builder to modify
|
* @param builder builder to modify
|
||||||
* @param conf configuration to read
|
* @param conf configuration to read
|
||||||
* @param prefix prefix to scan/strip
|
* @param prefix prefix to scan/strip
|
||||||
|
@ -89,8 +89,8 @@ public interface Constants {
|
|||||||
/**
|
/**
|
||||||
* Config variable for specifying a regex link which uses regular expressions
|
* Config variable for specifying a regex link which uses regular expressions
|
||||||
* as source and target could use group captured in src.
|
* as source and target could use group captured in src.
|
||||||
* E.g. (^/(?<firstDir>\\w+), /prefix-${firstDir}) =>
|
* E.g. {@literal (^/(?<firstDir>\\w+), /prefix-${firstDir}) =>
|
||||||
* (/path1/file1 => /prefix-path1/file1)
|
* (/path1/file1 => /prefix-path1/file1)}
|
||||||
*/
|
*/
|
||||||
String CONFIG_VIEWFS_LINK_REGEX = "linkRegex";
|
String CONFIG_VIEWFS_LINK_REGEX = "linkRegex";
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ public class HCFSMountTableConfigLoader implements MountTableConfigLoader {
|
|||||||
* Loads the mount-table configuration from hadoop compatible file system and
|
* Loads the mount-table configuration from hadoop compatible file system and
|
||||||
* add the configuration items to given configuration. Mount-table
|
* add the configuration items to given configuration. Mount-table
|
||||||
* configuration format should be suffixed with version number.
|
* configuration format should be suffixed with version number.
|
||||||
* Format: mount-table.<versionNumber>.xml
|
* Format: {@literal mount-table.<versionNumber>.xml}
|
||||||
* Example: mount-table.1.xml
|
* Example: mount-table.1.xml
|
||||||
* When user wants to update mount-table, the expectation is to upload new
|
* When user wants to update mount-table, the expectation is to upload new
|
||||||
* mount-table configuration file with monotonically increasing integer as
|
* mount-table configuration file with monotonically increasing integer as
|
||||||
|
@ -33,73 +33,85 @@
|
|||||||
|
|
||||||
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
|
import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
|
||||||
|
|
||||||
/******************************************************************************
|
/**
|
||||||
* This class is extended from the ViewFileSystem for the overloaded scheme
|
* <p> This class is extended from the ViewFileSystem for the overloaded
|
||||||
* file system. Mount link configurations and in-memory mount table
|
* scheme file system. Mount link configurations and in-memory mount table
|
||||||
* building behaviors are inherited from ViewFileSystem. Unlike ViewFileSystem
|
* building behaviors are inherited from ViewFileSystem. Unlike
|
||||||
* scheme (viewfs://), the users would be able to use any scheme.
|
* ViewFileSystem scheme (viewfs://), the users would be able to use
|
||||||
|
* any scheme. </p>
|
||||||
*
|
*
|
||||||
* To use this class, the following configurations need to be added in
|
* <p> To use this class, the following configurations need to be added in
|
||||||
* core-site.xml file.
|
* core-site.xml file. <br>
|
||||||
* 1) fs.<scheme>.impl
|
* 1) fs.{@literal <scheme>}.impl
|
||||||
* = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme
|
* = org.apache.hadoop.fs.viewfs.ViewFileSystemOverloadScheme <br>
|
||||||
* 2) fs.viewfs.overload.scheme.target.<scheme>.impl
|
* 2) fs.viewfs.overload.scheme.target.{@literal <scheme>}.impl
|
||||||
* = <hadoop compatible file system implementation class name for the
|
* = {@literal <hadoop compatible file system implementation class name
|
||||||
* <scheme>"
|
* for the <scheme>>} </p>
|
||||||
*
|
*
|
||||||
* Here <scheme> can be any scheme, but with that scheme there should be a
|
* <p> Here {@literal <scheme>} can be any scheme, but with that scheme there
|
||||||
* hadoop compatible file system available. Second configuration value should
|
* should be a hadoop compatible file system available. Second configuration
|
||||||
* be the respective scheme's file system implementation class.
|
* value should be the respective scheme's file system implementation class.
|
||||||
* Example: if scheme is configured with "hdfs", then the 2nd configuration
|
* Example: if scheme is configured with "hdfs", then the 2nd configuration
|
||||||
* class name will be org.apache.hadoop.hdfs.DistributedFileSystem.
|
* class name will be org.apache.hadoop.hdfs.DistributedFileSystem.
|
||||||
* if scheme is configured with "s3a", then the 2nd configuration class name
|
* if scheme is configured with "s3a", then the 2nd configuration class name
|
||||||
* will be org.apache.hadoop.fs.s3a.S3AFileSystem.
|
* will be org.apache.hadoop.fs.s3a.S3AFileSystem. </p>
|
||||||
*
|
*
|
||||||
* Use Case 1:
|
* <p> Use Case 1: <br>
|
||||||
* ===========
|
* =========== <br>
|
||||||
* If users want some of their existing cluster (hdfs://Cluster)
|
* If users want some of their existing cluster (hdfs://Cluster)
|
||||||
* data to mount with other hdfs and object store clusters(hdfs://NN1,
|
* data to mount with other hdfs and object store clusters(hdfs://NN1,
|
||||||
* o3fs://bucket1.volume1/, s3a://bucket1/)
|
* o3fs://bucket1.volume1/, s3a://bucket1/) </p>
|
||||||
*
|
*
|
||||||
* fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user
|
* <p>
|
||||||
* fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data
|
* fs.viewfs.mounttable.Cluster.link./user = hdfs://NN1/user <br>
|
||||||
|
* fs.viewfs.mounttable.Cluster.link./data = o3fs://bucket1.volume1/data <br>
|
||||||
* fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/
|
* fs.viewfs.mounttable.Cluster.link./backup = s3a://bucket1/backup/
|
||||||
|
* </p>
|
||||||
*
|
*
|
||||||
|
* <p>
|
||||||
* Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
|
* Op1: Create file hdfs://Cluster/user/fileA will go to hdfs://NN1/user/fileA
|
||||||
|
* <br>
|
||||||
* Op2: Create file hdfs://Cluster/data/datafile will go to
|
* Op2: Create file hdfs://Cluster/data/datafile will go to
|
||||||
* o3fs://bucket1.volume1/data/datafile
|
* o3fs://bucket1.volume1/data/datafile<br>
|
||||||
* Op3: Create file hdfs://Cluster/backup/data.zip will go to
|
* Op3: Create file hdfs://Cluster/backup/data.zip will go to
|
||||||
* s3a://bucket1/backup/data.zip
|
* s3a://bucket1/backup/data.zip
|
||||||
|
* </p>
|
||||||
*
|
*
|
||||||
* Use Case 2:
|
* <p> Use Case 2:<br>
|
||||||
* ===========
|
* ===========<br>
|
||||||
* If users want some of their existing cluster (s3a://bucketA/)
|
* If users want some of their existing cluster (s3a://bucketA/)
|
||||||
* data to mount with other hdfs and object store clusters
|
* data to mount with other hdfs and object store clusters
|
||||||
* (hdfs://NN1, o3fs://bucket1.volume1/)
|
* (hdfs://NN1, o3fs://bucket1.volume1/) </p>
|
||||||
*
|
*
|
||||||
* fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user
|
* <p>
|
||||||
* fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data
|
* fs.viewfs.mounttable.bucketA.link./user = hdfs://NN1/user<br>
|
||||||
|
* fs.viewfs.mounttable.bucketA.link./data = o3fs://bucket1.volume1/data<br>
|
||||||
* fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/
|
* fs.viewfs.mounttable.bucketA.link./salesDB = s3a://bucketA/salesDB/
|
||||||
|
* </p>
|
||||||
*
|
*
|
||||||
|
* <p>
|
||||||
* Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
|
* Op1: Create file s3a://bucketA/user/fileA will go to hdfs://NN1/user/fileA
|
||||||
|
* <br>
|
||||||
* Op2: Create file s3a://bucketA/data/datafile will go to
|
* Op2: Create file s3a://bucketA/data/datafile will go to
|
||||||
* o3fs://bucket1.volume1/data/datafile
|
* o3fs://bucket1.volume1/data/datafile<br>
|
||||||
* Op3: Create file s3a://bucketA/salesDB/dbfile will go to
|
* Op3: Create file s3a://bucketA/salesDB/dbfile will go to
|
||||||
* s3a://bucketA/salesDB/dbfile
|
* s3a://bucketA/salesDB/dbfile
|
||||||
|
* </p>
|
||||||
*
|
*
|
||||||
* Note:
|
* <p> Note:<br>
|
||||||
* (1) In ViewFileSystemOverloadScheme, by default the mount links will be
|
* (1) In ViewFileSystemOverloadScheme, by default the mount links will be
|
||||||
* represented as non-symlinks. If you want to change this behavior, please see
|
* represented as non-symlinks. If you want to change this behavior, please see
|
||||||
* {@link ViewFileSystem#listStatus(Path)}
|
* {@link ViewFileSystem#listStatus(Path)}<br>
|
||||||
* (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will
|
* (2) In ViewFileSystemOverloadScheme, only the initialized uri's hostname will
|
||||||
* be considered as the mount table name. When the passed uri has hostname:port,
|
* be considered as the mount table name. When the passed uri has hostname:port,
|
||||||
* it will simply ignore the port number and only hostname will be considered as
|
* it will simply ignore the port number and only hostname will be considered as
|
||||||
* the mount table name.
|
* the mount table name.<br>
|
||||||
* (3) If there are no mount links configured with the initializing uri's
|
* (3) If there are no mount links configured with the initializing uri's
|
||||||
* hostname as the mount table name, then it will automatically consider the
|
* hostname as the mount table name, then it will automatically consider the
|
||||||
* current uri as fallback( ex: fs.viewfs.mounttable.<mycluster>.linkFallback)
|
* current uri as fallback( ex:
|
||||||
* target fs uri.
|
* {@literal fs.viewfs.mounttable.<mycluster>.linkFallback}) target fs uri.
|
||||||
*****************************************************************************/
|
* </p>
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
|
@InterfaceAudience.LimitedPrivate({ "MapReduce", "HBase", "Hive" })
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class ViewFileSystemOverloadScheme extends ViewFileSystem {
|
public class ViewFileSystemOverloadScheme extends ViewFileSystem {
|
||||||
@ -164,12 +176,13 @@ public void initialize(URI theUri, Configuration conf) throws IOException {
|
|||||||
/**
|
/**
|
||||||
* This method is overridden because in ViewFileSystemOverloadScheme if
|
* This method is overridden because in ViewFileSystemOverloadScheme if
|
||||||
* overloaded scheme matches with mounted target fs scheme, file system
|
* overloaded scheme matches with mounted target fs scheme, file system
|
||||||
* should be created without going into fs.<scheme>.impl based resolution.
|
* should be created without going into {@literal fs.<scheme>.impl} based
|
||||||
* Otherwise it will end up in an infinite loop as the target will be
|
* resolution. Otherwise it will end up in an infinite loop as the target
|
||||||
* resolved again to ViewFileSystemOverloadScheme as fs.<scheme>.impl points
|
* will be resolved again to ViewFileSystemOverloadScheme as
|
||||||
* to ViewFileSystemOverloadScheme. So, below method will initialize the
|
* {@literal fs.<scheme>.impl} points to ViewFileSystemOverloadScheme.
|
||||||
* fs.viewfs.overload.scheme.target.<scheme>.impl. Other schemes can
|
* So, below method will initialize the
|
||||||
* follow fs.newInstance
|
* {@literal fs.viewfs.overload.scheme.target.<scheme>.impl}.
|
||||||
|
* Other schemes can follow fs.newInstance
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected FsGetter fsGetter() {
|
protected FsGetter fsGetter() {
|
||||||
@ -179,7 +192,7 @@ protected FsGetter fsGetter() {
|
|||||||
/**
|
/**
|
||||||
* This class checks whether the rooScheme is same as URI scheme. If both are
|
* This class checks whether the rooScheme is same as URI scheme. If both are
|
||||||
* same, then it will initialize file systems by using the configured
|
* same, then it will initialize file systems by using the configured
|
||||||
* fs.viewfs.overload.scheme.target.<scheme>.impl class.
|
* {@literal fs.viewfs.overload.scheme.target.<scheme>.impl} class.
|
||||||
*/
|
*/
|
||||||
static class ChildFsGetter extends FsGetter {
|
static class ChildFsGetter extends FsGetter {
|
||||||
|
|
||||||
|
@ -50,8 +50,8 @@ private ProxyCombiner() { }
|
|||||||
* all of the methods of the combined proxy interface, delegating calls
|
* all of the methods of the combined proxy interface, delegating calls
|
||||||
* to which proxy implements that method. If multiple proxies implement the
|
* to which proxy implements that method. If multiple proxies implement the
|
||||||
* same method, the first in the list will be used for delegation.
|
* same method, the first in the list will be used for delegation.
|
||||||
*
|
* <p>
|
||||||
* <p/>This will check that every method on the combined interface is
|
* This will check that every method on the combined interface is
|
||||||
* implemented by at least one of the supplied proxy objects.
|
* implemented by at least one of the supplied proxy objects.
|
||||||
*
|
*
|
||||||
* @param combinedProxyInterface The interface of the combined proxy.
|
* @param combinedProxyInterface The interface of the combined proxy.
|
||||||
|
@ -29,8 +29,8 @@
|
|||||||
* {@link ProcessingDetails}). This can be used by specifying the
|
* {@link ProcessingDetails}). This can be used by specifying the
|
||||||
* {@link org.apache.hadoop.fs.CommonConfigurationKeys#IPC_COST_PROVIDER_KEY}
|
* {@link org.apache.hadoop.fs.CommonConfigurationKeys#IPC_COST_PROVIDER_KEY}
|
||||||
* configuration key.
|
* configuration key.
|
||||||
*
|
* <p>
|
||||||
* <p/>This allows for configuration of how heavily each of the operations
|
* This allows for configuration of how heavily each of the operations
|
||||||
* within {@link ProcessingDetails} is weighted. By default,
|
* within {@link ProcessingDetails} is weighted. By default,
|
||||||
* {@link ProcessingDetails.Timing#LOCKFREE},
|
* {@link ProcessingDetails.Timing#LOCKFREE},
|
||||||
* {@link ProcessingDetails.Timing#RESPONSE}, and
|
* {@link ProcessingDetails.Timing#RESPONSE}, and
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -49,7 +48,7 @@ private DomainNameResolverFactory() {
|
|||||||
* @return Domain name resolver.
|
* @return Domain name resolver.
|
||||||
*/
|
*/
|
||||||
public static DomainNameResolver newInstance(
|
public static DomainNameResolver newInstance(
|
||||||
Configuration conf, URI uri, String configKey) throws IOException {
|
Configuration conf, URI uri, String configKey) {
|
||||||
String host = uri.getHost();
|
String host = uri.getHost();
|
||||||
String confKeyWithHost = configKey + "." + host;
|
String confKeyWithHost = configKey + "." + host;
|
||||||
return newInstance(conf, confKeyWithHost);
|
return newInstance(conf, confKeyWithHost);
|
||||||
@ -61,7 +60,6 @@ public static DomainNameResolver newInstance(
|
|||||||
* @param conf Configuration
|
* @param conf Configuration
|
||||||
* @param configKey config key name.
|
* @param configKey config key name.
|
||||||
* @return Domain name resolver.
|
* @return Domain name resolver.
|
||||||
* @throws IOException when the class cannot be found or initiated.
|
|
||||||
*/
|
*/
|
||||||
public static DomainNameResolver newInstance(
|
public static DomainNameResolver newInstance(
|
||||||
Configuration conf, String configKey) {
|
Configuration conf, String configKey) {
|
||||||
|
@ -201,10 +201,10 @@ private IOException noGroupsForUser(String user) {
|
|||||||
/**
|
/**
|
||||||
* Get the group memberships of a given user.
|
* Get the group memberships of a given user.
|
||||||
* If the user's group is not cached, this method may block.
|
* If the user's group is not cached, this method may block.
|
||||||
* Note this method can be expensive as it involves Set->List conversion.
|
* Note this method can be expensive as it involves Set {@literal ->} List
|
||||||
* For user with large group membership (i.e., > 1000 groups), we recommend
|
* conversion. For user with large group membership
|
||||||
* using getGroupSet to avoid the conversion and fast membership look up via
|
* (i.e., {@literal >} 1000 groups), we recommend using getGroupSet
|
||||||
* contains().
|
* to avoid the conversion and fast membership look up via contains().
|
||||||
* @param user User's name
|
* @param user User's name
|
||||||
* @return the group memberships of the user as list
|
* @return the group memberships of the user as list
|
||||||
* @throws IOException if user does not exist
|
* @throws IOException if user does not exist
|
||||||
@ -220,7 +220,9 @@ public List<String> getGroups(final String user) throws IOException {
|
|||||||
* Get the group memberships of a given user.
|
* Get the group memberships of a given user.
|
||||||
* If the user's group is not cached, this method may block.
|
* If the user's group is not cached, this method may block.
|
||||||
* This provide better performance when user has large group membership via
|
* This provide better performance when user has large group membership via
|
||||||
* 1) avoid set->list->set conversion for the caller UGI/PermissionCheck
|
* <br>
|
||||||
|
* 1) avoid {@literal set->list->set} conversion for the caller
|
||||||
|
* UGI/PermissionCheck <br>
|
||||||
* 2) fast lookup using contains() via Set instead of List
|
* 2) fast lookup using contains() via Set instead of List
|
||||||
* @param user User's name
|
* @param user User's name
|
||||||
* @return the group memberships of the user as set
|
* @return the group memberships of the user as set
|
||||||
|
@ -43,6 +43,8 @@
|
|||||||
*
|
*
|
||||||
* <p>
|
* <p>
|
||||||
* The factory has several different modes of operation:
|
* The factory has several different modes of operation:
|
||||||
|
* </p>
|
||||||
|
*
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>OpenSSL: Uses the wildly-openssl library to delegate to the
|
* <li>OpenSSL: Uses the wildly-openssl library to delegate to the
|
||||||
* system installed OpenSSL. If the wildfly-openssl integration is not
|
* system installed OpenSSL. If the wildfly-openssl integration is not
|
||||||
@ -54,7 +56,6 @@
|
|||||||
* <li>Default_JSSE_with_GCM: Delegates to the JSSE implementation of
|
* <li>Default_JSSE_with_GCM: Delegates to the JSSE implementation of
|
||||||
* SSL with no modification to the list of enabled ciphers.</li>
|
* SSL with no modification to the list of enabled ciphers.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
* </p>
|
|
||||||
*
|
*
|
||||||
* In order to load OpenSSL, applications must ensure the wildfly-openssl
|
* In order to load OpenSSL, applications must ensure the wildfly-openssl
|
||||||
* artifact is on the classpath. Currently, only ABFS declares
|
* artifact is on the classpath. Currently, only ABFS declares
|
||||||
|
Loading…
Reference in New Issue
Block a user