HDFS-13943. [JDK10] Fix javadoc errors in hadoop-hdfs-client module. Contributed by Akira Ajisaka.

This commit is contained in:
Takanobu Asanuma 2018-10-02 09:49:48 +09:00
parent 7d082193d2
commit f6c5ef9903
18 changed files with 92 additions and 83 deletions

View File

@ -49,7 +49,7 @@
* not visible to the user except when getXAttr/getXAttrs is called on a file
* or directory in the /.reserved/raw HDFS directory hierarchy. These
* attributes can only be accessed by the superuser.
* <p/>
* <p>
* @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
* http://en.wikipedia.org/wiki/Extended_file_attributes</a>
*

View File

@ -20,11 +20,17 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import java.util.EnumSet;
/**
* AddBlockFlag provides hints for new block allocation and placement.
* Users can use this flag to control <em>per DFSOutputStream</em>
* {@see ClientProtocol#addBlock()} behavior.
* @see ClientProtocol#addBlock(String, String, ExtendedBlock, DatanodeInfo[],
* long, String[], EnumSet)
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving

View File

@ -1054,8 +1054,8 @@ public ClientProtocol getNamenode() {
/**
* Call {@link #create(String, boolean, short, long, Progressable)} with
* default <code>replication</code> and <code>blockSize<code> and null <code>
* progress</code>.
* default <code>replication</code> and <code>blockSize</code> and null
* <code>progress</code>.
*/
public OutputStream create(String src, boolean overwrite)
throws IOException {
@ -1065,7 +1065,7 @@ public OutputStream create(String src, boolean overwrite)
/**
* Call {@link #create(String, boolean, short, long, Progressable)} with
* default <code>replication</code> and <code>blockSize<code>.
* default <code>replication</code> and <code>blockSize</code>.
*/
public OutputStream create(String src,
boolean overwrite, Progressable progress) throws IOException {

View File

@ -139,7 +139,7 @@ public EventBatch poll() throws IOException, MissingEventsException {
* are falling behind (i.e. transaction are being generated faster than the
* client is reading them). If a client falls too far behind events may be
* deleted before the client can read them.
* <p/>
* <p>
* A return value of -1 indicates that an estimate could not be produced, and
* should be ignored. The value returned by this method is really only useful
* when compared to previous or subsequent returned values.

View File

@ -294,12 +294,12 @@ public String toString() {
}
/**
* Add a trace parent span for this packet.<p/>
*
* Add a trace parent span for this packet.
* <p>
* Trace parent spans for a packet are the trace spans responsible for
* adding data to that packet. We store them as an array of longs for
* efficiency.<p/>
*
* efficiency.
* <p>
* Protected by the DFSOutputStream dataQueue lock.
*/
public void addTraceParent(Span span) {
@ -323,10 +323,10 @@ public void addTraceParent(SpanId id) {
}
/**
* Get the trace parent spans for this packet.<p/>
*
* Will always be non-null.<p/>
*
* Get the trace parent spans for this packet.
* <p>
* Will always be non-null.
* <p>
* Protected by the DFSOutputStream dataQueue lock.
*/
public SpanId[] getTraceParents() {

View File

@ -266,8 +266,8 @@ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* Convert a List to BlockLocation[]
* @param blocks A List to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(

View File

@ -149,7 +149,6 @@ public DistributedFileSystem() {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>hdfs</code>
*/
@ -1860,7 +1859,7 @@ public boolean isInSafeMode() throws IOException {
return setSafeMode(SafeModeAction.SAFEMODE_GET, true);
}
/** @see HdfsAdmin#allowSnapshot(Path) */
/** @see org.apache.hadoop.hdfs.client.HdfsAdmin#allowSnapshot(Path) */
public void allowSnapshot(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.ALLOW_SNAPSHOT);
@ -1888,7 +1887,7 @@ public Void next(final FileSystem fs, final Path p)
}.resolve(this, absF);
}
/** @see HdfsAdmin#disallowSnapshot(Path) */
/** @see org.apache.hadoop.hdfs.client.HdfsAdmin#disallowSnapshot(Path) */
public void disallowSnapshot(final Path path) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.DISALLOW_SNAPSHOT);
@ -2207,7 +2206,7 @@ public Boolean next(final FileSystem fs, final Path p)
}
/**
* @see {@link #addCacheDirective(CacheDirectiveInfo, EnumSet)}
* @see #addCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public long addCacheDirective(CacheDirectiveInfo info) throws IOException {
return addCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
@ -2234,7 +2233,7 @@ public long addCacheDirective(
}
/**
* @see {@link #modifyCacheDirective(CacheDirectiveInfo, EnumSet)}
* @see #modifyCacheDirective(CacheDirectiveInfo, EnumSet)
*/
public void modifyCacheDirective(CacheDirectiveInfo info) throws IOException {
modifyCacheDirective(info, EnumSet.noneOf(CacheFlag.class));
@ -3305,10 +3304,10 @@ public HdfsDataOutputStreamBuilder createFile(Path path) {
* Returns a RemoteIterator which can be used to list all open files
* currently managed by the NameNode. For large numbers of open files,
* iterator will fetch the list in batches of configured size.
* <p/>
* <p>
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the all open files.
* <p/>
* <p>
* This method can only be called by HDFS superusers.
*/
@Deprecated

View File

@ -67,7 +67,7 @@
* non-HA-enabled client proxy as appropriate.
*
* For creating proxy objects with other protocols, please see
* {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
* NameNodeProxies#createProxy(Configuration, URI, Class).
*/
@InterfaceAudience.Private
public class NameNodeProxiesClient {
@ -118,7 +118,6 @@ public InetSocketAddress getAddress() {
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
* @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(
Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)

View File

@ -360,10 +360,10 @@ public EncryptionZone getEncryptionZoneForPath(Path path)
* Returns a RemoteIterator which can be used to list the encryption zones
* in HDFS. For large numbers of encryption zones, the iterator will fetch
* the list of zones in a number of small batches.
* <p/>
* <p>
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the entire list of encryption zones.
* <p/>
* <p>
* This method can only be called by HDFS superusers.
*/
public RemoteIterator<EncryptionZone> listEncryptionZones()
@ -418,7 +418,7 @@ public FileEncryptionInfo getFileEncryptionInfo(final Path path)
* for information on stream usage.
* See {@link org.apache.hadoop.hdfs.inotify.Event}
* for information on the available events.
* <p/>
* <p>
* Inotify users may want to tune the following HDFS parameters to
* ensure that enough extra HDFS edits are saved to support inotify clients
* that fall behind the current state of the namespace while reading events.
@ -438,7 +438,7 @@ public FileEncryptionInfo getFileEncryptionInfo(final Path path)
* dfs.namenode.checkpoint.txns
* dfs.namenode.num.checkpoints.retained
* dfs.ha.log-roll.period
* <p/>
* <p>
* It is recommended that local journaling be configured
* (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
* so that edit transfers from the shared journal can be avoided.
@ -615,10 +615,10 @@ public void disableErasureCodingPolicy(String ecPolicyName)
* Returns a RemoteIterator which can be used to list all open files
* currently managed by the NameNode. For large numbers of open files,
* iterator will fetch the list in batches of configured size.
* <p/>
* <p>
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the all open files.
* <p/>
* <p>
* This method can only be called by HDFS superusers.
*/
@Deprecated

View File

@ -70,7 +70,7 @@
* to renew the leases.
* </li>
* </ul>
* </p>
* <p>
*/
@InterfaceAudience.Private
public class LeaseRenewer {

View File

@ -143,7 +143,7 @@ BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
/**
* Get the status of the previously issued reconfig task.
* @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
* @see org.apache.hadoop.conf.ReconfigurationTaskStatus
*/
ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;

View File

@ -839,7 +839,7 @@ long getPreferredBlockSize(String filename)
* percentage called threshold of blocks, which satisfy the minimal
* replication condition.
* The minimal replication condition is that each block must have at least
* <tt>dfs.namenode.replication.min</tt> replicas.
* {@code dfs.namenode.replication.min} replicas.
* When the threshold is reached the name node extends safe mode
* for a configurable amount of time
* to let the remaining data nodes to check in before it
@ -855,12 +855,13 @@ long getPreferredBlockSize(String filename)
* Current state of the name node can be verified using
* {@link #setSafeMode(HdfsConstants.SafeModeAction,boolean)
* setSafeMode(SafeModeAction.SAFEMODE_GET,false)}
* <h4>Configuration parameters:</h4>
* <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
* <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
* <tt>dfs.namenode.replication.min</tt> is the minimal replication parameter.
*
* <h4>Special cases:</h4>
* <p><b>Configuration parameters:</b></p>
* {@code dfs.safemode.threshold.pct} is the threshold parameter.<br>
* {@code dfs.safemode.extension} is the safe mode extension parameter.<br>
* {@code dfs.namenode.replication.min} is the minimal replication parameter.
*
* <p><b>Special cases:</b></p>
* The name node does not enter safe mode at startup if the threshold is
* set to 0 or if the name space is empty.<br>
* If the threshold is set to 1 then all blocks need to have at least
@ -1211,7 +1212,6 @@ void updatePipeline(String clientName, ExtendedBlock oldBlock,
* Get a valid Delegation Token.
*
* @param renewer the designated renewer for the token
* @return Token<DelegationTokenIdentifier>
* @throws IOException
*/
@Idempotent
@ -1490,7 +1490,7 @@ EncryptionZone getEZForPath(String src)
throws IOException;
/**
* Used to implement cursor-based batched listing of {@EncryptionZone}s.
* Used to implement cursor-based batched listing of {@link EncryptionZone}s.
*
* @param prevId ID of the last item in the previous batch. If there is no
* previous batch, a negative value can be used.
@ -1513,7 +1513,7 @@ void reencryptEncryptionZone(String zone, ReencryptAction action)
/**
* Used to implement cursor-based batched listing of
* {@ZoneReencryptionStatus}s.
* {@link ZoneReencryptionStatus}s.
*
* @param prevId ID of the last item in the previous batch. If there is no
* previous batch, a negative value can be used.
@ -1528,7 +1528,7 @@ BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(long prevId)
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
@ -1545,12 +1545,12 @@ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
* If xAttrs is null or empty, this is the same as getting all xattrs of the
* file or directory. Only those xattrs for which the logged-in user has
* permissions to view are returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @param xAttrs xAttrs to get
* @return List<XAttr> <code>XAttr</code> list
* @return <code>XAttr</code> list
* @throws IOException
*/
@Idempotent
@ -1561,11 +1561,11 @@ List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
* List the xattrs names for a file or directory.
* Only the xattr names for which the logged in user has the permissions to
* access will be returned.
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory
* @return List<XAttr> <code>XAttr</code> list
* @return <code>XAttr</code> list
* @throws IOException
*/
@Idempotent
@ -1576,7 +1576,7 @@ List<XAttr> listXAttrs(String src)
* Remove xattr of a file or directory.Value in xAttr parameter is ignored.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* <p>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param src file or directory

View File

@ -45,7 +45,7 @@ public interface ReconfigurationProtocol {
/**
* Get the status of the previously issued reconfig task.
* @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
* @see org.apache.hadoop.conf.ReconfigurationTaskStatus
*/
@Idempotent
ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;

View File

@ -182,13 +182,13 @@ public int hashCode() {
* because we know the first field is the Expiry date.
*
* In the case of the legacy buffer, the expiry date is a VInt, so the size
* (which should always be >1) is encoded in the first byte - which is
* (which should always be &gt;1) is encoded in the first byte - which is
* always negative due to this encoding. However, there are sometimes null
* BlockTokenIdentifier written so we also need to handle the case there
* the first byte is also 0.
*
* In the case of protobuf, the first byte is a type tag for the expiry date
* which is written as <code>(field_number << 3 | wire_type</code>.
* which is written as <code>field_number &lt;&lt; 3 | wire_type</code>.
* So as long as the field_number is less than 16, but also positive, then
* we know we have a Protobuf.
*

View File

@ -57,9 +57,8 @@
* slots in the set of existing segments, falling back to getting a new segment
* from the DataNode via {@link DataTransferProtocol#requestShortCircuitFds}.
*
* The counterpart to this class on the DataNode is
* {@link ShortCircuitRegistry}. See {@link ShortCircuitRegistry} for more
* information on the communication protocol.
* The counterpart to this class on the DataNode is ShortCircuitRegistry.
* See ShortCircuitRegistry for more information on the communication protocol.
*/
@InterfaceAudience.Private
public class DfsClientShmManager implements Closeable {

View File

@ -54,10 +54,10 @@ private static void logDebugMessage() {
/**
* @return the least power of two greater than or equal to n, i.e. return
* the least integer x with x >= n and x a power of two.
* the least integer x with x &gt;= n and x a power of two.
*
* @throws HadoopIllegalArgumentException
* if n <= 0.
* if n &lt;= 0.
*/
public static int leastPowerOfTwo(final int n) {
if (n <= 0) {

View File

@ -53,7 +53,8 @@
* illustrated in the following diagram. Unless otherwise specified, all
* range-related calculations are inclusive (the end offset of the previous
* range should be 1 byte lower than the start offset of the next one).
*
*/
/*
* | <---- Block Group ----> | <- Block Group: logical unit composing
* | | striped HDFS files.
* blk_0 blk_1 blk_2 <- Internal Blocks: each internal block
@ -492,9 +493,12 @@ private static AlignedStripe[] mergeRangesForInternalBlocks(
return stripes.toArray(new AlignedStripe[stripes.size()]);
}
/**
* Cell indexing convention defined in {@link StripingCell}.
*/
private static void calcualteChunkPositionsInBuf(int cellSize,
AlignedStripe[] stripes, StripingCell[] cells, ByteBuffer buf) {
/**
/*
* | <--------------- AlignedStripe --------------->|
*
* |<- length_0 ->|<-- length_1 -->|<- length_2 ->|
@ -508,8 +512,6 @@ private static void calcualteChunkPositionsInBuf(int cellSize,
* | cell_0_0_0 | cell_1_0_1 and cell_2_0_2 |cell_3_1_0 ...| <- buf
* | (partial) | (from blk_1 and blk_2) | |
* +----------------------------------------------------------+
*
* Cell indexing convention defined in {@link StripingCell}
*/
int done = 0;
for (StripingCell cell : cells) {
@ -562,7 +564,11 @@ private static void prepareAllZeroChunks(LocatedStripedBlock blockGroup,
* its start and end offsets -- e.g., the end logical offset of cell_0_0_0
* should be 1 byte lower than the start logical offset of cell_1_0_1.
*
* | <------- Striped Block Group -------> |
* A StripingCell is a special instance of {@link StripingChunk} whose offset
* and size align with the cell used when writing data.
* TODO: consider parity cells
*/
/* | <------- Striped Block Group -------> |
* blk_0 blk_1 blk_2
* | | |
* v v v
@ -572,9 +578,6 @@ private static void prepareAllZeroChunks(LocatedStripedBlock blockGroup,
* |cell_3_1_0| |cell_4_1_1| |cell_5_1_2| <- {@link #idxInBlkGroup} = 5
* +----------+ +----------+ +----------+ {@link #idxInInternalBlk} = 1
* {@link #idxInStripe} = 2
* A StripingCell is a special instance of {@link StripingChunk} whose offset
* and size align with the cell used when writing data.
* TODO: consider parity cells
*/
@VisibleForTesting
public static class StripingCell {
@ -622,6 +625,18 @@ public String toString() {
* the diagram, any given byte range on a block group leads to 1~5
* AlignedStripe's.
*
* An AlignedStripe is the basic unit of reading from a striped block group,
* because within the AlignedStripe, all internal blocks can be processed in
* a uniform manner.
*
* The coverage of an AlignedStripe on an internal block is represented as a
* {@link StripingChunk}.
*
* To simplify the logic of reading a logical byte range from a block group,
* a StripingChunk is either completely in the requested byte range or
* completely outside the requested byte range.
*/
/*
* |<-------- Striped Block Group -------->|
* blk_0 blk_1 blk_2 blk_3 blk_4
* +----+ | +----+ +----+
@ -638,18 +653,7 @@ public String toString() {
* | | | | | | | <- AlignedStripe4:
* +----+ | +----+ +----+ last cell is partial
* |
* <---- data blocks ----> | <--- parity --->
*
* An AlignedStripe is the basic unit of reading from a striped block group,
* because within the AlignedStripe, all internal blocks can be processed in
* a uniform manner.
*
* The coverage of an AlignedStripe on an internal block is represented as a
* {@link StripingChunk}.
*
* To simplify the logic of reading a logical byte range from a block group,
* a StripingChunk is either completely in the requested byte range or
* completely outside the requested byte range.
* <---- data blocks ----> | <--- parity -->
*/
public static class AlignedStripe {
public VerticalRange range;
@ -691,7 +695,8 @@ public String toString() {
* starting at {@link #offsetInBlock} and lasting for {@link #spanInBlock}
* bytes in an internal block. Note that VerticalRange doesn't necessarily
* align with {@link StripingCell}.
*
*/
/*
* |<- Striped Block Group ->|
* blk_0
* |
@ -735,8 +740,8 @@ public String toString() {
/**
* Indicates the coverage of an {@link AlignedStripe} on an internal block,
* and the state of the chunk in the context of the read request.
*
* |<---------------- Striped Block Group --------------->|
*/
/* |<---------------- Striped Block Group --------------->|
* blk_0 blk_1 blk_2 blk_3 blk_4
* +---------+ | +----+ +----+
* null null |REQUESTED| | |null| |null| <- AlignedStripe0
@ -745,7 +750,7 @@ public String toString() {
* +---------+ +---------+ +---------+ | +----+ +----+
* |REQUESTED| |REQUESTED| ALLZERO | |null| |null| <- AlignedStripe2
* +---------+ +---------+ | +----+ +----+
* <----------- data blocks ------------> | <--- parity --->
* <----------- data blocks ------------> | <--- parity -->
*/
public static class StripingChunk {
/** Chunk has been successfully fetched */
@ -767,10 +772,12 @@ public static class StripingChunk {
/**
* If a chunk is completely in requested range, the state transition is:
* REQUESTED (when AlignedStripe created) -> PENDING -> {FETCHED | MISSING}
* REQUESTED (when AlignedStripe created) -&gt; PENDING -&gt;
* {FETCHED | MISSING}
* If a chunk is completely outside requested range (including parity
* chunks), state transition is:
* null (AlignedStripe created) -> REQUESTED (upon failure) -> PENDING ...
* null (AlignedStripe created) -&gt;REQUESTED (upon failure) -&gt;
* PENDING ...
*/
public int state = REQUESTED;

View File

@ -176,7 +176,6 @@ public class WebHdfsFileSystem extends FileSystem
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>webhdfs</code>
*/