HDFS-16803.Improve some annotations in hdfs module. (#5031)
This commit is contained in:
parent
48b6f9f335
commit
c5c00f3d2c
@ -568,7 +568,7 @@ public FSDataOutputStream next(final FileSystem fs, final Path p)
|
||||
|
||||
/**
|
||||
* Same as
|
||||
* {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long,
|
||||
* {@link #create(Path, FsPermission, EnumSet, int, short, long,
|
||||
* Progressable, ChecksumOpt)} with a few additions. First, addition of
|
||||
* favoredNodes that is a hint to where the namenode should place the file
|
||||
* blocks. The favored nodes hint is not persisted in HDFS. Hence it may be
|
||||
@ -637,12 +637,12 @@ protected HdfsDataOutputStream primitiveCreate(Path f,
|
||||
|
||||
/**
|
||||
* Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long,
|
||||
* Progressable, ChecksumOpt, InetSocketAddress[], String)}, it provides a
|
||||
* Progressable, ChecksumOpt, InetSocketAddress[], String, String)}, it provides a
|
||||
* HDFS-specific version of {@link #createNonRecursive(Path, FsPermission,
|
||||
* EnumSet, int, short, long, Progressable)} with a few additions.
|
||||
*
|
||||
* @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable,
|
||||
* ChecksumOpt, InetSocketAddress[], String) for the descriptions of
|
||||
* ChecksumOpt, InetSocketAddress[], String, String) for the descriptions of
|
||||
* additional parameters, i.e., favoredNodes, ecPolicyName and
|
||||
* storagePolicyName.
|
||||
*/
|
||||
|
@ -19,6 +19,9 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.EnumSet;
|
||||
|
||||
/**
|
||||
* CreateEncryptionZoneFlag is used in
|
||||
|
@ -131,7 +131,7 @@ public Class<?> getProtocol() {
|
||||
/**
|
||||
* Get the represented java method.
|
||||
*
|
||||
* @return Method
|
||||
* @return {@link Method}
|
||||
* @throws IOException If the method cannot be found.
|
||||
*/
|
||||
public Method getMethod() throws IOException {
|
||||
|
@ -87,7 +87,7 @@ boolean isEpochEstablished() {
|
||||
|
||||
/**
|
||||
* @return the epoch number for this writer. This may only be called after
|
||||
* a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}.
|
||||
* a successful call to {@link QuorumJournalManager#createNewUniqueEpoch()}.
|
||||
*/
|
||||
long getEpoch() {
|
||||
Preconditions.checkState(myEpoch != INVALID_EPOCH,
|
||||
|
@ -1403,7 +1403,7 @@ public boolean accept(File dir, String name) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the BlockPoolSliceStorage from {@link bpStorageMap}.
|
||||
* Get the BlockPoolSliceStorage from {@link #bpStorageMap}.
|
||||
* If the object is not found, create a new object and put it to the map.
|
||||
*/
|
||||
synchronized BlockPoolSliceStorage getBlockPoolSliceStorage(
|
||||
|
@ -44,14 +44,14 @@
|
||||
|
||||
/**
|
||||
* An implementation of {@link AsyncChecker} that skips checking recently
|
||||
* checked objects. It will enforce at least {@link minMsBetweenChecks}
|
||||
* checked objects. It will enforce at least {@link #minMsBetweenChecks}
|
||||
* milliseconds between two successive checks of any one object.
|
||||
*
|
||||
* It is assumed that the total number of Checkable objects in the system
|
||||
* is small, (not more than a few dozen) since the checker uses O(Checkables)
|
||||
* storage and also potentially O(Checkables) threads.
|
||||
*
|
||||
* {@link minMsBetweenChecks} should be configured reasonably
|
||||
* {@link #minMsBetweenChecks} should be configured reasonably
|
||||
* by the caller to avoid spinning up too many threads frequently.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
|
@ -31,6 +31,7 @@
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
/**
|
||||
* Implementation of {@code Futures#withTimeout}.
|
||||
|
@ -184,7 +184,7 @@ public long getLockedBytesReserved() {
|
||||
* {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}.
|
||||
*
|
||||
* @param conf the configuration to be used
|
||||
* @param dataset the FsDataset object.
|
||||
* @param fsDataset the FsDataset object.
|
||||
* @return an instance of RamDiskReplicaTracker
|
||||
*/
|
||||
static RamDiskReplicaTracker getInstance(final Configuration conf,
|
||||
|
@ -152,8 +152,8 @@ private static INodesInPath dstForRenameTo(
|
||||
* @param srcIIP source path
|
||||
* @param dstIIP destination path
|
||||
* @return true INodesInPath if rename succeeds; null otherwise
|
||||
* @deprecated See {@link #renameToInt(FSDirectory, String, String,
|
||||
* boolean, Options.Rename...)}
|
||||
* @deprecated See {@link #renameToInt(FSDirectory, FSPermissionChecker,
|
||||
* String, String, boolean, Options.Rename...)}
|
||||
*/
|
||||
@Deprecated
|
||||
static INodesInPath unprotectedRenameTo(FSDirectory fsd,
|
||||
@ -258,8 +258,8 @@ static RenameResult renameToInt(
|
||||
}
|
||||
|
||||
/**
|
||||
* @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath,
|
||||
* INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)}
|
||||
* @see {@link #unprotectedRenameTo(FSDirectory, INodesInPath, INodesInPath,
|
||||
* long, BlocksMapUpdateInfo, Options.Rename...)}
|
||||
*/
|
||||
static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
|
||||
String src, String dst, BlocksMapUpdateInfo collectedBlocks,
|
||||
@ -482,8 +482,8 @@ static RenameResult unprotectedRenameTo(FSDirectory fsd,
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #renameToInt(FSDirectory, String, String,
|
||||
* boolean, Options.Rename...)}
|
||||
* @deprecated Use {@link #renameToInt(FSDirectory, FSPermissionChecker,
|
||||
* String, String, boolean, Options.Rename...)}
|
||||
*/
|
||||
@Deprecated
|
||||
private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
|
||||
|
@ -123,7 +123,7 @@ public class EditLogTailer {
|
||||
|
||||
/**
|
||||
* The timeout in milliseconds of calling rollEdits RPC to Active NN.
|
||||
* @see HDFS-4176.
|
||||
* See HDFS-4176.
|
||||
*/
|
||||
private final long rollEditsTimeoutMs;
|
||||
|
||||
|
@ -70,7 +70,7 @@ public static void saveSnapshots(INodeDirectory current, DataOutput out)
|
||||
|
||||
/**
|
||||
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
|
||||
* @param sNode The directory that the SnapshotDiff list belongs to.
|
||||
* @param diffs The directory that the SnapshotDiff list belongs to.
|
||||
* @param out The {@link DataOutput} to write.
|
||||
*/
|
||||
private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
|
||||
@ -186,7 +186,7 @@ private static List<INode> loadCreatedList(INodeDirectory parent,
|
||||
* @param createdList The created list associated with the deleted list in
|
||||
* the same Diff.
|
||||
* @param in The {@link DataInput} to read.
|
||||
* @param loader The {@link Loader} instance.
|
||||
* @param loader The {@link FSImageFormat.Loader} instance.
|
||||
* @return The deleted list.
|
||||
*/
|
||||
private static List<INode> loadDeletedList(INodeDirectory parent,
|
||||
@ -260,7 +260,7 @@ public static void loadDirectoryDiffList(INodeDirectory dir,
|
||||
* Load the snapshotINode field of {@link AbstractINodeDiff}.
|
||||
* @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}.
|
||||
* @param in The {@link DataInput} to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
|
||||
* using.
|
||||
* @return The snapshotINode.
|
||||
*/
|
||||
@ -281,7 +281,7 @@ private static INodeDirectoryAttributes loadSnapshotINodeInDirectoryDiff(
|
||||
* Load {@link DirectoryDiff} from fsimage.
|
||||
* @param parent The directory that the SnapshotDiff belongs to.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
|
||||
* using.
|
||||
* @return A {@link DirectoryDiff}.
|
||||
*/
|
||||
|
@ -85,7 +85,7 @@
|
||||
/**
|
||||
* This class reads the protobuf-based fsimage and generates text output
|
||||
* for each inode to {@link PBImageTextWriter#out}. The sub-class can override
|
||||
* {@link getEntry()} to generate formatted string for each inode.
|
||||
* {@link #getEntry(String, INode)} to generate formatted string for each inode.
|
||||
*
|
||||
* Since protobuf-based fsimage does not guarantee the order of inodes and
|
||||
* directories, PBImageTextWriter runs two-phase scans:
|
||||
|
@ -68,7 +68,7 @@ public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5)
|
||||
/**
|
||||
* Read the md5 file stored alongside the given data file
|
||||
* and match the md5 file content.
|
||||
* @param dataFile the file containing data
|
||||
* @param md5File the file containing md5 data
|
||||
* @return a matcher with two matched groups
|
||||
* where group(1) is the md5 string and group(2) is the data file path.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user