HDFS-16803.Improve some annotations in hdfs module. (#5031)

This commit is contained in:
jianghuazhu 2022-10-20 10:58:23 +08:00 committed by GitHub
parent 48b6f9f335
commit c5c00f3d2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 26 additions and 22 deletions

View File

@ -568,7 +568,7 @@ public FSDataOutputStream next(final FileSystem fs, final Path p)
/** /**
* Same as * Same as
* {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long, * {@link #create(Path, FsPermission, EnumSet, int, short, long,
* Progressable, ChecksumOpt)} with a few additions. First, addition of * Progressable, ChecksumOpt)} with a few additions. First, addition of
* favoredNodes that is a hint to where the namenode should place the file * favoredNodes that is a hint to where the namenode should place the file
* blocks. The favored nodes hint is not persisted in HDFS. Hence it may be * blocks. The favored nodes hint is not persisted in HDFS. Hence it may be
@ -637,12 +637,12 @@ protected HdfsDataOutputStream primitiveCreate(Path f,
/** /**
* Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long, * Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long,
* Progressable, ChecksumOpt, InetSocketAddress[], String)}, it provides a * Progressable, ChecksumOpt, InetSocketAddress[], String, String)}, it provides a
* HDFS-specific version of {@link #createNonRecursive(Path, FsPermission, * HDFS-specific version of {@link #createNonRecursive(Path, FsPermission,
* EnumSet, int, short, long, Progressable)} with a few additions. * EnumSet, int, short, long, Progressable)} with a few additions.
* *
* @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable, * @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable,
* ChecksumOpt, InetSocketAddress[], String) for the descriptions of * ChecksumOpt, InetSocketAddress[], String, String) for the descriptions of
* additional parameters, i.e., favoredNodes, ecPolicyName and * additional parameters, i.e., favoredNodes, ecPolicyName and
* storagePolicyName. * storagePolicyName.
*/ */

View File

@ -19,6 +19,9 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import java.util.EnumSet;
/** /**
* CreateEncryptionZoneFlag is used in * CreateEncryptionZoneFlag is used in

View File

@ -131,7 +131,7 @@ public Class<?> getProtocol() {
/** /**
* Get the represented java method. * Get the represented java method.
* *
* @return Method * @return {@link Method}
* @throws IOException If the method cannot be found. * @throws IOException If the method cannot be found.
*/ */
public Method getMethod() throws IOException { public Method getMethod() throws IOException {

View File

@ -87,7 +87,7 @@ boolean isEpochEstablished() {
/** /**
* @return the epoch number for this writer. This may only be called after * @return the epoch number for this writer. This may only be called after
* a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}. * a successful call to {@link QuorumJournalManager#createNewUniqueEpoch()}.
*/ */
long getEpoch() { long getEpoch() {
Preconditions.checkState(myEpoch != INVALID_EPOCH, Preconditions.checkState(myEpoch != INVALID_EPOCH,

View File

@ -1403,7 +1403,7 @@ public boolean accept(File dir, String name) {
} }
/** /**
* Get the BlockPoolSliceStorage from {@link bpStorageMap}. * Get the BlockPoolSliceStorage from {@link #bpStorageMap}.
* If the object is not found, create a new object and put it to the map. * If the object is not found, create a new object and put it to the map.
*/ */
synchronized BlockPoolSliceStorage getBlockPoolSliceStorage( synchronized BlockPoolSliceStorage getBlockPoolSliceStorage(

View File

@ -44,14 +44,14 @@
/** /**
* An implementation of {@link AsyncChecker} that skips checking recently * An implementation of {@link AsyncChecker} that skips checking recently
* checked objects. It will enforce at least {@link minMsBetweenChecks} * checked objects. It will enforce at least {@link #minMsBetweenChecks}
* milliseconds between two successive checks of any one object. * milliseconds between two successive checks of any one object.
* *
* It is assumed that the total number of Checkable objects in the system * It is assumed that the total number of Checkable objects in the system
* is small, (not more than a few dozen) since the checker uses O(Checkables) * is small, (not more than a few dozen) since the checker uses O(Checkables)
* storage and also potentially O(Checkables) threads. * storage and also potentially O(Checkables) threads.
* *
* {@link minMsBetweenChecks} should be configured reasonably * {@link #minMsBetweenChecks} should be configured reasonably
* by the caller to avoid spinning up too many threads frequently. * by the caller to avoid spinning up too many threads frequently.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -31,6 +31,7 @@
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import java.util.concurrent.ExecutionException;
/** /**
* Implementation of {@code Futures#withTimeout}. * Implementation of {@code Futures#withTimeout}.

View File

@ -184,7 +184,7 @@ public long getLockedBytesReserved() {
* {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}. * {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}.
* *
* @param conf the configuration to be used * @param conf the configuration to be used
* @param dataset the FsDataset object. * @param fsDataset the FsDataset object.
* @return an instance of RamDiskReplicaTracker * @return an instance of RamDiskReplicaTracker
*/ */
static RamDiskReplicaTracker getInstance(final Configuration conf, static RamDiskReplicaTracker getInstance(final Configuration conf,

View File

@ -152,8 +152,8 @@ private static INodesInPath dstForRenameTo(
* @param srcIIP source path * @param srcIIP source path
* @param dstIIP destination path * @param dstIIP destination path
* @return true INodesInPath if rename succeeds; null otherwise * @return true INodesInPath if rename succeeds; null otherwise
* @deprecated See {@link #renameToInt(FSDirectory, String, String, * @deprecated See {@link #renameToInt(FSDirectory, FSPermissionChecker,
* boolean, Options.Rename...)} * String, String, boolean, Options.Rename...)}
*/ */
@Deprecated @Deprecated
static INodesInPath unprotectedRenameTo(FSDirectory fsd, static INodesInPath unprotectedRenameTo(FSDirectory fsd,
@ -258,8 +258,8 @@ static RenameResult renameToInt(
} }
/** /**
* @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath, * @see {@link #unprotectedRenameTo(FSDirectory, INodesInPath, INodesInPath,
* INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)} * long, BlocksMapUpdateInfo, Options.Rename...)}
*/ */
static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc, static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
String src, String dst, BlocksMapUpdateInfo collectedBlocks, String src, String dst, BlocksMapUpdateInfo collectedBlocks,
@ -482,8 +482,8 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd,
} }
/** /**
* @deprecated Use {@link #renameToInt(FSDirectory, String, String, * @deprecated Use {@link #renameToInt(FSDirectory, FSPermissionChecker,
* boolean, Options.Rename...)} * String, String, boolean, Options.Rename...)}
*/ */
@Deprecated @Deprecated
private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc, private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,

View File

@ -123,7 +123,7 @@ public class EditLogTailer {
/** /**
* The timeout in milliseconds of calling rollEdits RPC to Active NN. * The timeout in milliseconds of calling rollEdits RPC to Active NN.
* @see HDFS-4176. * See HDFS-4176.
*/ */
private final long rollEditsTimeoutMs; private final long rollEditsTimeoutMs;

View File

@ -70,7 +70,7 @@ public static void saveSnapshots(INodeDirectory current, DataOutput out)
/** /**
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot. * Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
* @param sNode The directory that the SnapshotDiff list belongs to. * @param diffs The directory that the SnapshotDiff list belongs to.
* @param out The {@link DataOutput} to write. * @param out The {@link DataOutput} to write.
*/ */
private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>> private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
@ -186,7 +186,7 @@ private static List<INode> loadCreatedList(INodeDirectory parent,
* @param createdList The created list associated with the deleted list in * @param createdList The created list associated with the deleted list in
* the same Diff. * the same Diff.
* @param in The {@link DataInput} to read. * @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance. * @param loader The {@link FSImageFormat.Loader} instance.
* @return The deleted list. * @return The deleted list.
*/ */
private static List<INode> loadDeletedList(INodeDirectory parent, private static List<INode> loadDeletedList(INodeDirectory parent,
@ -260,7 +260,7 @@ public static void loadDirectoryDiffList(INodeDirectory dir,
* Load the snapshotINode field of {@link AbstractINodeDiff}. * Load the snapshotINode field of {@link AbstractINodeDiff}.
* @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}. * @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}.
* @param in The {@link DataInput} to read. * @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
* using. * using.
* @return The snapshotINode. * @return The snapshotINode.
*/ */
@ -281,7 +281,7 @@ private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff(
* Load {@link DirectoryDiff} from fsimage. * Load {@link DirectoryDiff} from fsimage.
* @param parent The directory that the SnapshotDiff belongs to. * @param parent The directory that the SnapshotDiff belongs to.
* @param in The {@link DataInput} instance to read. * @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
* using. * using.
* @return A {@link DirectoryDiff}. * @return A {@link DirectoryDiff}.
*/ */

View File

@ -85,7 +85,7 @@
/** /**
* This class reads the protobuf-based fsimage and generates text output * This class reads the protobuf-based fsimage and generates text output
* for each inode to {@link PBImageTextWriter#out}. The sub-class can override * for each inode to {@link PBImageTextWriter#out}. The sub-class can override
* {@link getEntry()} to generate formatted string for each inode. * {@link #getEntry(String, INode)} to generate formatted string for each inode.
* *
* Since protobuf-based fsimage does not guarantee the order of inodes and * Since protobuf-based fsimage does not guarantee the order of inodes and
* directories, PBImageTextWriter runs two-phase scans: * directories, PBImageTextWriter runs two-phase scans:

View File

@ -68,7 +68,7 @@ public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5)
/** /**
* Read the md5 file stored alongside the given data file * Read the md5 file stored alongside the given data file
* and match the md5 file content. * and match the md5 file content.
* @param dataFile the file containing data * @param md5File the file containing md5 data
* @return a matcher with two matched groups * @return a matcher with two matched groups
* where group(1) is the md5 string and group(2) is the data file path. * where group(1) is the md5 string and group(2) is the data file path.
*/ */