HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal tags in hadoop-tools. Contributed by Akira AJISAKA.
This commit is contained in:
parent
bb243cea93
commit
ef9946cd52
@ -1108,6 +1108,9 @@ Release 2.7.0 - UNRELEASED
|
||||
HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD
|
||||
and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth)
|
||||
|
||||
HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal
|
||||
tags in hadoop-tools. (Akira AJISAKA via ozawa)
|
||||
|
||||
Release 2.6.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -41,8 +41,8 @@
|
||||
public class DfsTask extends Task {
|
||||
|
||||
/**
|
||||
* Default sink for {@link java.lang.System.out System.out}
|
||||
* and {@link java.lang.System.err System.err}.
|
||||
* Default sink for {@link java.lang.System#out}
|
||||
* and {@link java.lang.System#err}.
|
||||
*/
|
||||
private static final OutputStream nullOut = new OutputStream() {
|
||||
public void write(int b) { /* ignore */ }
|
||||
@ -171,7 +171,7 @@ protected int postCmd(int exit_code) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoke {@link org.apache.hadoop.fs.FsShell#doMain FsShell.doMain} after a
|
||||
* Invoke {@link org.apache.hadoop.fs.FsShell#main} after a
|
||||
* few cursory checks of the configuration.
|
||||
*/
|
||||
public void execute() throws BuildException {
|
||||
|
@ -44,10 +44,9 @@
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A block-based {@link FileSystem} backed by
|
||||
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
|
||||
* </p>
|
||||
*
|
||||
* @see NativeS3FileSystem
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@ -70,7 +69,6 @@ public S3FileSystem(FileSystemStore store) {
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
* <p/>
|
||||
*
|
||||
* @return <code>s3</code>
|
||||
*/
|
||||
|
@ -57,11 +57,11 @@
|
||||
/**
|
||||
* Upload files/parts asap directly from a memory buffer (instead of buffering
|
||||
* to a file).
|
||||
* <p/>
|
||||
* <p>
|
||||
* Uploads are managed low-level rather than through the AWS TransferManager.
|
||||
* This allows for uploading each part of a multi-part upload as soon as
|
||||
* the bytes are in memory, rather than waiting until the file is closed.
|
||||
* <p/>
|
||||
* <p>
|
||||
* Unstable: statistics and error handling might evolve
|
||||
*/
|
||||
@InterfaceStability.Unstable
|
||||
|
@ -62,24 +62,29 @@
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A {@link FileSystem} for reading and writing files stored on
|
||||
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
|
||||
* Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
|
||||
* stores files on S3 in their
|
||||
* native form so they can be read by other S3 tools.
|
||||
*
|
||||
* <p>
|
||||
* A note about directories. S3 of course has no "native" support for them.
|
||||
* The idiom we choose then is: for any directory created by this class,
|
||||
* we use an empty object "#{dirpath}_$folder$" as a marker.
|
||||
* Further, to interoperate with other S3 tools, we also accept the following:
|
||||
* - an object "#{dirpath}/' denoting a directory marker
|
||||
* - if there exists any objects with the prefix "#{dirpath}/", then the
|
||||
* directory is said to exist
|
||||
* - if both a file with the name of a directory and a marker for that
|
||||
* directory exists, then the *file masks the directory*, and the directory
|
||||
* is never returned.
|
||||
* </p>
|
||||
* <ul>
|
||||
* <li>an object "#{dirpath}/' denoting a directory marker</li>
|
||||
* <li>
|
||||
* if there exists any objects with the prefix "#{dirpath}/", then the
|
||||
* directory is said to exist
|
||||
* </li>
|
||||
* <li>
|
||||
* if both a file with the name of a directory and a marker for that
|
||||
* directory exists, then the *file masks the directory*, and the directory
|
||||
* is never returned.
|
||||
* </li>
|
||||
* </ul>
|
||||
*
|
||||
* @see org.apache.hadoop.fs.s3.S3FileSystem
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@ -308,7 +313,6 @@ public NativeS3FileSystem(NativeFileSystemStore store) {
|
||||
|
||||
/**
|
||||
* Return the protocol scheme for the FileSystem.
|
||||
* <p/>
|
||||
*
|
||||
* @return <code>s3n</code>
|
||||
*/
|
||||
|
@ -600,8 +600,6 @@ private String getHTTPScheme() {
|
||||
* Azure.
|
||||
*
|
||||
* @throws AzureException
|
||||
* @throws ConfigurationException
|
||||
*
|
||||
*/
|
||||
private void configureAzureStorageSession() throws AzureException {
|
||||
|
||||
@ -705,7 +703,7 @@ private void configureAzureStorageSession() throws AzureException {
|
||||
* raised on errors communicating with Azure storage.
|
||||
* @throws IOException
|
||||
* raised on errors performing I/O or setting up the session.
|
||||
* @throws URISyntaxExceptions
|
||||
* @throws URISyntaxException
|
||||
* raised on creating mal-formed URI's.
|
||||
*/
|
||||
private void connectUsingAnonymousCredentials(final URI uri)
|
||||
@ -1036,7 +1034,6 @@ private Set<String> getDirectorySet(final String configVar)
|
||||
/**
|
||||
* Checks if the given key in Azure Storage should be stored as a page
|
||||
* blob instead of block blob.
|
||||
* @throws URISyntaxException
|
||||
*/
|
||||
public boolean isPageBlobKey(String key) {
|
||||
return isKeyForDirectorySet(key, pageBlobDirs);
|
||||
@ -1755,7 +1752,7 @@ private String normalizeKey(CloudBlobWrapper blob) {
|
||||
* the path and returns a path relative to the root directory of the
|
||||
* container.
|
||||
*
|
||||
* @param blob
|
||||
* @param directory
|
||||
* - adjust the key to this directory to a path relative to the root
|
||||
* directory
|
||||
*
|
||||
@ -2142,14 +2139,10 @@ private PartialListing list(String prefix, String delimiter,
|
||||
* uses a in-order first traversal of blob directory structures to maintain
|
||||
* the sorted order of the blob names.
|
||||
*
|
||||
* @param dir
|
||||
* -- Azure blob directory
|
||||
*
|
||||
* @param list
|
||||
* -- a list of file metadata objects for each non-directory blob.
|
||||
*
|
||||
* @param maxListingLength
|
||||
* -- maximum length of the built up list.
|
||||
* @param aCloudBlobDirectory Azure blob directory
|
||||
* @param aFileMetadataList a list of file metadata objects for each
|
||||
* non-directory blob.
|
||||
* @param maxListingCount maximum length of the built up list.
|
||||
*/
|
||||
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
|
||||
ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount,
|
||||
@ -2320,8 +2313,7 @@ private long getDataLength(CloudBlobWrapper blob, BlobProperties properties)
|
||||
* swallow the error since what most probably happened is that
|
||||
* the first operation succeeded on the server.
|
||||
* @param blob The blob to delete.
|
||||
* @param leaseID A string identifying the lease, or null if no
|
||||
* lease is to be used.
|
||||
* @param lease Azure blob lease, or null if no lease is to be used.
|
||||
* @throws StorageException
|
||||
*/
|
||||
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {
|
||||
|
@ -81,12 +81,10 @@
|
||||
import com.microsoft.azure.storage.core.*;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* A {@link FileSystem} for reading and writing files stored on <a
|
||||
* href="http://store.azure.com/">Windows Azure</a>. This implementation is
|
||||
* blob-based and stores files on Azure in their native form so they can be read
|
||||
* by other Azure tools.
|
||||
* </p>
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
@ -218,9 +216,11 @@ public SelfRenewingLease getFolderLease() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Write to disk the information needed to redo folder rename, in JSON format.
|
||||
* The file name will be wasb://<sourceFolderPrefix>/folderName-RenamePending.json
|
||||
* Write to disk the information needed to redo folder rename,
|
||||
* in JSON format. The file name will be
|
||||
* {@code wasb://<sourceFolderPrefix>/folderName-RenamePending.json}
|
||||
* The file format will be:
|
||||
* <pre>{@code
|
||||
* {
|
||||
* FormatVersion: "1.0",
|
||||
* OperationTime: "<YYYY-MM-DD HH:MM:SS.MMM>",
|
||||
@ -239,7 +239,7 @@ public SelfRenewingLease getFolderLease() {
|
||||
* "innerFile",
|
||||
* "innerFile2"
|
||||
* ]
|
||||
* }
|
||||
* } }</pre>
|
||||
* @throws IOException
|
||||
*/
|
||||
public void writeFile(FileSystem fs) throws IOException {
|
||||
@ -913,9 +913,6 @@ public void setEncodedKey(String anEncodedKey) {
|
||||
* The create also includes the name of the original key value which is
|
||||
* stored in the m_key member variable. This method should only be called
|
||||
* when the stream is closed.
|
||||
*
|
||||
* @param anEncodedKey
|
||||
* Encoding of the original key stored in m_key member.
|
||||
*/
|
||||
private void restoreKey() throws IOException {
|
||||
store.rename(getEncodedKey(), getKey());
|
||||
@ -1796,7 +1793,7 @@ private static enum UMaskApplyMode {
|
||||
*
|
||||
* @param permission
|
||||
* The permission to mask.
|
||||
* @param applyDefaultUmask
|
||||
* @param applyMode
|
||||
* Whether to also apply the default umask.
|
||||
* @return The masked persmission.
|
||||
*/
|
||||
@ -2409,7 +2406,6 @@ protected void finalize() throws Throwable {
|
||||
* recover the original key.
|
||||
*
|
||||
* @param aKey
|
||||
* @param numBuckets
|
||||
* @return Encoded version of the original key.
|
||||
*/
|
||||
private static String encodeKey(String aKey) {
|
||||
|
@ -79,7 +79,7 @@ public CopyListingFileStatus(FileStatus fileStatus) throws IOException {
|
||||
/**
|
||||
* Returns the full logical ACL.
|
||||
*
|
||||
* @return List<AclEntry> containing full logical ACL
|
||||
* @return List containing full logical ACL
|
||||
*/
|
||||
public List<AclEntry> getAclEntries() {
|
||||
return AclUtil.getAclFromPermAndEntries(getPermission(),
|
||||
@ -89,7 +89,7 @@ public List<AclEntry> getAclEntries() {
|
||||
/**
|
||||
* Sets optional ACL entries.
|
||||
*
|
||||
* @param aclEntries List<AclEntry> containing all ACL entries
|
||||
* @param aclEntries List containing all ACL entries
|
||||
*/
|
||||
public void setAclEntries(List<AclEntry> aclEntries) {
|
||||
this.aclEntries = aclEntries;
|
||||
@ -98,7 +98,7 @@ public void setAclEntries(List<AclEntry> aclEntries) {
|
||||
/**
|
||||
* Returns all xAttrs.
|
||||
*
|
||||
* @return Map<String, byte[]> containing all xAttrs
|
||||
* @return Map containing all xAttrs
|
||||
*/
|
||||
public Map<String, byte[]> getXAttrs() {
|
||||
return xAttrs != null ? xAttrs : Collections.<String, byte[]>emptyMap();
|
||||
@ -107,7 +107,7 @@ public Map<String, byte[]> getXAttrs() {
|
||||
/**
|
||||
* Sets optional xAttrs.
|
||||
*
|
||||
* @param xAttrs Map<String, byte[]> containing all xAttrs
|
||||
* @param xAttrs Map containing all xAttrs
|
||||
*/
|
||||
public void setXAttrs(Map<String, byte[]> xAttrs) {
|
||||
this.xAttrs = xAttrs;
|
||||
|
@ -141,7 +141,7 @@ public void doBuildListing(Path pathToListingFile, DistCpOptions options) throws
|
||||
}
|
||||
/**
|
||||
* Collect the list of
|
||||
* <sourceRelativePath, sourceFileStatus>
|
||||
* {@literal <sourceRelativePath, sourceFileStatus>}
|
||||
* to be copied and write to the sequence file. In essence, any file or
|
||||
* directory that need to be copied or sync-ed is written as an entry to the
|
||||
* sequence file, with the possible exception of the source root:
|
||||
|
@ -270,7 +270,7 @@ public static void preserve(FileSystem targetFS, Path path,
|
||||
*
|
||||
* @param fileSystem FileSystem containing the file
|
||||
* @param fileStatus FileStatus of file
|
||||
* @return List<AclEntry> containing full logical ACL
|
||||
* @return List containing full logical ACL
|
||||
* @throws IOException if there is an I/O error
|
||||
*/
|
||||
public static List<AclEntry> getAcl(FileSystem fileSystem,
|
||||
@ -285,7 +285,7 @@ public static List<AclEntry> getAcl(FileSystem fileSystem,
|
||||
*
|
||||
* @param fileSystem FileSystem containing the file
|
||||
* @param path file path
|
||||
* @return Map<String, byte[]> containing all xAttrs
|
||||
* @return Map containing all xAttrs
|
||||
* @throws IOException if there is an I/O error
|
||||
*/
|
||||
public static Map<String, byte[]> getXAttrs(FileSystem fileSystem,
|
||||
|
@ -25,10 +25,10 @@
|
||||
|
||||
/**
|
||||
* A byte sequence that is used as a Java native type for buffer.
|
||||
* It is resizable and distinguishes between the count of the seqeunce and
|
||||
* It is resizable and distinguishes between the count of the sequence and
|
||||
* the current capacity.
|
||||
*
|
||||
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
|
||||
* @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Public
|
||||
@ -124,7 +124,7 @@ public int getCapacity() {
|
||||
|
||||
/**
|
||||
* Change the capacity of the backing storage.
|
||||
* The data is preserved if newCapacity >= getCount().
|
||||
* The data is preserved if newCapacity {@literal >=} getCount().
|
||||
* @param newCapacity The new capacity in bytes.
|
||||
*/
|
||||
public void setCapacity(int newCapacity) {
|
||||
@ -162,7 +162,7 @@ public void reset() {
|
||||
public void truncate() {
|
||||
setCapacity(count);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Append specified bytes to the buffer.
|
||||
*
|
||||
|
@ -28,9 +28,9 @@
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**
|
||||
* Various utility functions for Hadooop record I/O runtime.
|
||||
* Various utility functions for Hadoop record I/O runtime.
|
||||
*
|
||||
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
|
||||
* @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
|
||||
*/
|
||||
@Deprecated
|
||||
@InterfaceAudience.Public
|
||||
@ -462,8 +462,8 @@ public static int getVIntSize(long i) {
|
||||
|
||||
/**
|
||||
* Serializes a long to a binary stream with zero-compressed encoding.
|
||||
* For -112 <= i <= 127, only one byte is used with the actual value.
|
||||
* For other values of i, the first byte value indicates whether the
|
||||
* For {@literal -112 <= i <= 127}, only one byte is used with the actual
|
||||
* value. For other values of i, the first byte value indicates whether the
|
||||
* long is positive or negative, and the number of bytes that follow.
|
||||
* If the first byte value v is between -113 and -120, the following long
|
||||
* is positive, with number of bytes that follow are -(v+112).
|
||||
|
Loading…
Reference in New Issue
Block a user