diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 2d8953e98b..ff2c5f37c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -568,7 +568,7 @@ public FSDataOutputStream next(final FileSystem fs, final Path p) /** * Same as - * {@link #create(Path, FsPermission, EnumSet, int, short, long, + * {@link #create(Path, FsPermission, EnumSet, int, short, long, * Progressable, ChecksumOpt)} with a few additions. First, addition of * favoredNodes that is a hint to where the namenode should place the file * blocks. The favored nodes hint is not persisted in HDFS. Hence it may be @@ -637,12 +637,12 @@ protected HdfsDataOutputStream primitiveCreate(Path f, /** * Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long, - * Progressable, ChecksumOpt, InetSocketAddress[], String)}, it provides a + * Progressable, ChecksumOpt, InetSocketAddress[], String, String)}, it provides a * HDFS-specific version of {@link #createNonRecursive(Path, FsPermission, * EnumSet, int, short, long, Progressable)} with a few additions. * * @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable, - * ChecksumOpt, InetSocketAddress[], String) for the descriptions of + * ChecksumOpt, InetSocketAddress[], String, String) for the descriptions of * additional parameters, i.e., favoredNodes, ecPolicyName and * storagePolicyName. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java index ad4cea6468..fe87158c1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java @@ -19,6 +19,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; + +import java.util.EnumSet; /** * CreateEncryptionZoneFlag is used in diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java index e5df4893a9..ecaa97b933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java @@ -131,7 +131,7 @@ public Class getProtocol() { /** * Get the represented java method. * - * @return Method + * @return {@link Method} * @throws IOException If the method cannot be found. */ public Method getMethod() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index 624e574024..a65120e361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -87,7 +87,7 @@ boolean isEpochEstablished() { /** * @return the epoch number for this writer. This may only be called after - * a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}. + * a successful call to {@link QuorumJournalManager#createNewUniqueEpoch()}. */ long getEpoch() { Preconditions.checkState(myEpoch != INVALID_EPOCH, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 782f2f36cc..5cf13f698e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -1403,7 +1403,7 @@ public boolean accept(File dir, String name) { } /** - * Get the BlockPoolSliceStorage from {@link bpStorageMap}. + * Get the BlockPoolSliceStorage from {@link #bpStorageMap}. * If the object is not found, create a new object and put it to the map. */ synchronized BlockPoolSliceStorage getBlockPoolSliceStorage( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java index f969c7ade2..af62835c4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java @@ -44,14 +44,14 @@ /** * An implementation of {@link AsyncChecker} that skips checking recently - * checked objects. It will enforce at least {@link minMsBetweenChecks} + * checked objects. It will enforce at least {@link #minMsBetweenChecks} * milliseconds between two successive checks of any one object. * * It is assumed that the total number of Checkable objects in the system * is small, (not more than a few dozen) since the checker uses O(Checkables) * storage and also potentially O(Checkables) threads. * - * {@link minMsBetweenChecks} should be configured reasonably + * {@link #minMsBetweenChecks} should be configured reasonably * by the caller to avoid spinning up too many threads frequently. */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java index 6bb2c7a841..6f80780e14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java @@ -31,6 +31,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.ExecutionException; /** * Implementation of {@code Futures#withTimeout}. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java index 1103468d3c..15bd9dec60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java @@ -184,7 +184,7 @@ public long getLockedBytesReserved() { * {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}. * * @param conf the configuration to be used - * @param dataset the FsDataset object. + * @param fsDataset the FsDataset object. * @return an instance of RamDiskReplicaTracker */ static RamDiskReplicaTracker getInstance(final Configuration conf, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index c129d1928a..8f7db1a06e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -152,8 +152,8 @@ private static INodesInPath dstForRenameTo( * @param srcIIP source path * @param dstIIP destination path * @return true INodesInPath if rename succeeds; null otherwise - * @deprecated See {@link #renameToInt(FSDirectory, String, String, - * boolean, Options.Rename...)} + * @deprecated See {@link #renameToInt(FSDirectory, FSPermissionChecker, + * String, String, boolean, Options.Rename...)} */ @Deprecated static INodesInPath unprotectedRenameTo(FSDirectory fsd, @@ -258,8 +258,8 @@ static RenameResult renameToInt( } /** - * @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath, - * INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)} + * @see {@link #unprotectedRenameTo(FSDirectory, INodesInPath, INodesInPath, + * long, BlocksMapUpdateInfo, Options.Rename...)} */ static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, BlocksMapUpdateInfo collectedBlocks, @@ -482,8 +482,8 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd, } /** - * @deprecated Use {@link #renameToInt(FSDirectory, String, String, - * boolean, Options.Rename...)} + * @deprecated Use {@link #renameToInt(FSDirectory, FSPermissionChecker, + * String, String, boolean, Options.Rename...)} */ @Deprecated private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index f72ec7c917..6921e204ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -123,7 +123,7 @@ public class EditLogTailer { /** * The timeout in milliseconds of calling rollEdits RPC to Active NN. - * @see HDFS-4176. + * See HDFS-4176. */ private final long rollEditsTimeoutMs; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index 21642da9c2..527d767b09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -70,7 +70,7 @@ public static void saveSnapshots(INodeDirectory current, DataOutput out) /** * Save SnapshotDiff list for an INodeDirectoryWithSnapshot. - * @param sNode The directory that the SnapshotDiff list belongs to. + * @param diffs The directory that the SnapshotDiff list belongs to. * @param out The {@link DataOutput} to write. */ private static > @@ -186,7 +186,7 @@ private static List loadCreatedList(INodeDirectory parent, * @param createdList The created list associated with the deleted list in * the same Diff. * @param in The {@link DataInput} to read. - * @param loader The {@link Loader} instance. + * @param loader The {@link FSImageFormat.Loader} instance. * @return The deleted list. */ private static List loadDeletedList(INodeDirectory parent, @@ -260,7 +260,7 @@ public static void loadDirectoryDiffList(INodeDirectory dir, * Load the snapshotINode field of {@link AbstractINodeDiff}. * @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}. * @param in The {@link DataInput} to read. - * @param loader The {@link Loader} instance that this loading procedure is + * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is * using. * @return The snapshotINode. */ @@ -281,7 +281,7 @@ private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff( * Load {@link DirectoryDiff} from fsimage. * @param parent The directory that the SnapshotDiff belongs to. * @param in The {@link DataInput} instance to read. - * @param loader The {@link Loader} instance that this loading procedure is + * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is * using. * @return A {@link DirectoryDiff}. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java index 5773d7fecf..bd6c860ccf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java @@ -85,7 +85,7 @@ /** * This class reads the protobuf-based fsimage and generates text output * for each inode to {@link PBImageTextWriter#out}. The sub-class can override - * {@link getEntry()} to generate formatted string for each inode. + * {@link #getEntry(String, INode)} to generate formatted string for each inode. * * Since protobuf-based fsimage does not guarantee the order of inodes and * directories, PBImageTextWriter runs two-phase scans: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java index 2bc63ec77e..77ec789058 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java @@ -68,7 +68,7 @@ public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5) /** * Read the md5 file stored alongside the given data file * and match the md5 file content. - * @param dataFile the file containing data + * @param md5File the file containing md5 data * @return a matcher with two matched groups * where group(1) is the md5 string and group(2) is the data file path. */