HDFS-13942. [JDK10] Fix javadoc errors in hadoop-hdfs module. Contributed by Dinesh Chitlangia.

This commit is contained in:
Akira Ajisaka 2018-10-31 14:43:58 +09:00
parent e4f22b08e0
commit fac9f91b29
No known key found for this signature in database
GPG Key ID: C1EDBB9CA400FD50
65 changed files with 310 additions and 246 deletions

View File

@ -160,7 +160,8 @@ public int compare(DatanodeInfo a, DatanodeInfo b) {
/** /**
* Comparator for sorting DataNodeInfo[] based on * Comparator for sorting DataNodeInfo[] based on
* stale, decommissioned and entering_maintenance states. * stale, decommissioned and entering_maintenance states.
* Order: live -> stale -> entering_maintenance -> decommissioned * Order: live {@literal ->} stale {@literal ->} entering_maintenance
* {@literal ->} decommissioned
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public static class ServiceAndStaleComparator extends ServiceComparator { public static class ServiceAndStaleComparator extends ServiceComparator {
@ -390,7 +391,8 @@ public static String addKeySuffixes(String key, String... suffixes) {
* @param conf Configuration * @param conf Configuration
* @param nsId the nameservice whose NNs addresses we want. * @param nsId the nameservice whose NNs addresses we want.
* @param defaultValue default address to return in case key is not found. * @param defaultValue default address to return in case key is not found.
* @return A map from nnId -> RPC address of each NN in the nameservice. * @return A map from nnId {@literal ->} RPC address of each NN in the
* nameservice.
*/ */
public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId( public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue) { Configuration conf, String nsId, String defaultValue) {
@ -1289,7 +1291,8 @@ static URI trimUri(URI uri) {
* @param conf configuration * @param conf configuration
* @param protocol Protocol interface * @param protocol Protocol interface
* @param service service that implements the protocol * @param service service that implements the protocol
* @param server RPC server to which the protocol & implementation is added to * @param server RPC server to which the protocol &amp; implementation is
* added to
* @throws IOException * @throws IOException
*/ */
public static void addPBProtocol(Configuration conf, Class<?> protocol, public static void addPBProtocol(Configuration conf, Class<?> protocol,
@ -1357,7 +1360,8 @@ public static String getNamenodeWebAddr(final Configuration conf, String nsId,
* @param conf Configuration * @param conf Configuration
* @param nsId the nameservice whose NNs addresses we want. * @param nsId the nameservice whose NNs addresses we want.
* @param defaultValue default address to return in case key is not found. * @param defaultValue default address to return in case key is not found.
* @return A map from nnId -> Web address of each NN in the nameservice. * @return A map from nnId {@literal ->} Web address of each NN in the
* nameservice.
*/ */
public static Map<String, InetSocketAddress> getWebAddressesForNameserviceId( public static Map<String, InetSocketAddress> getWebAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue) { Configuration conf, String nsId, String defaultValue) {

View File

@ -201,7 +201,7 @@ public static Builder builder(int maxDataLength) {
/** /**
* Very efficient encoding of the block report into a ByteString to avoid * Very efficient encoding of the block report into a ByteString to avoid
* the overhead of protobuf repeating fields. Primitive repeating fields * the overhead of protobuf repeating fields. Primitive repeating fields
* require re-allocs of an ArrayList<Long> and the associated (un)boxing * require re-allocs of an ArrayList&lt;Long&gt; and the associated (un)boxing
* overhead which puts pressure on GC. * overhead which puts pressure on GC.
* *
* The structure of the buffer is as follows: * The structure of the buffer is as follows:

View File

@ -168,7 +168,7 @@ public JournalResponseProto journal(RpcController unused,
return VOID_JOURNAL_RESPONSE; return VOID_JOURNAL_RESPONSE;
} }
/** @see JournalProtocol#heartbeat */ /** @see QJournalProtocol#heartbeat */
@Override @Override
public HeartbeatResponseProto heartbeat(RpcController controller, public HeartbeatResponseProto heartbeat(RpcController controller,
HeartbeatRequestProto req) throws ServiceException { HeartbeatRequestProto req) throws ServiceException {

View File

@ -220,7 +220,7 @@ public synchronized void addKeys(ExportedBlockKeys exportedKeys)
} }
/** /**
* Update block keys if update time > update interval. * Update block keys if update time {@literal >} update interval.
* @return true if the keys are updated. * @return true if the keys are updated.
*/ */
public synchronized boolean updateKeys(final long updateTime) throws IOException { public synchronized boolean updateKeys(final long updateTime) throws IOException {

View File

@ -78,7 +78,7 @@
* <p>SYNOPSIS * <p>SYNOPSIS
* <pre> * <pre>
* To start: * To start:
* bin/start-balancer.sh [-threshold <threshold>] * bin/start-balancer.sh [-threshold {@literal <threshold>}]
* Example: bin/ start-balancer.sh * Example: bin/ start-balancer.sh
* start the balancer with a default threshold of 10% * start the balancer with a default threshold of 10%
* bin/ start-balancer.sh -threshold 5 * bin/ start-balancer.sh -threshold 5
@ -113,13 +113,14 @@
* <p>A system property that limits the balancer's use of bandwidth is * <p>A system property that limits the balancer's use of bandwidth is
* defined in the default configuration file: * defined in the default configuration file:
* <pre> * <pre>
* <property> * &lt;property&gt;
* <name>dfs.datanode.balance.bandwidthPerSec</name> * &lt;name&gt;dfs.datanode.balance.bandwidthPerSec&lt;/name&gt;
* <value>1048576</value> * &lt;value&gt;1048576&lt;/value&gt;
* <description> Specifies the maximum bandwidth that each datanode * &lt;description&gt; Specifies the maximum bandwidth that each datanode
* can utilize for the balancing purpose in term of the number of bytes * can utilize for the balancing purpose in term of the number of bytes
* per second. </description> * per second.
* </property> * &lt;/description&gt;
* &lt;/property&gt;
* </pre> * </pre>
* *
* <p>This property determines the maximum speed at which a block will be * <p>This property determines the maximum speed at which a block will be

View File

@ -143,12 +143,13 @@
* If any of the replica is in maintenance mode, the safety property * If any of the replica is in maintenance mode, the safety property
* is extended as follows. These property still apply for the case of zero * is extended as follows. These property still apply for the case of zero
* maintenance replicas, thus we can use these safe property for all scenarios. * maintenance replicas, thus we can use these safe property for all scenarios.
* a. # of live replicas >= # of min replication for maintenance. * a. # of live replicas &gt;= # of min replication for maintenance.
* b. # of live replicas <= # of expected redundancy. * b. # of live replicas &lt;= # of expected redundancy.
* c. # of live replicas and maintenance replicas >= # of expected redundancy. * c. # of live replicas and maintenance replicas &gt;= # of expected
* redundancy.
* *
* For regular replication, # of min live replicas for maintenance is determined * For regular replication, # of min live replicas for maintenance is determined
* by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <= * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to &lt;=
* DFS_NAMENODE_REPLICATION_MIN_KEY. * DFS_NAMENODE_REPLICATION_MIN_KEY.
* For erasure encoding, # of min live replicas for maintenance is * For erasure encoding, # of min live replicas for maintenance is
* BlockInfoStriped#getRealDataBlockNum. * BlockInfoStriped#getRealDataBlockNum.
@ -305,7 +306,7 @@ public long getTotalECBlockGroups() {
private final double storageInfoDefragmentRatio; private final double storageInfoDefragmentRatio;
/** /**
* Mapping: Block -> { BlockCollection, datanodes, self ref } * Mapping: Block {@literal ->} { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information. * Updated only in response to client-sent information.
*/ */
final BlocksMap blocksMap; final BlocksMap blocksMap;
@ -321,7 +322,9 @@ public long getTotalECBlockGroups() {
private final BlockReportProcessingThread blockReportThread = private final BlockReportProcessingThread blockReportThread =
new BlockReportProcessingThread(); new BlockReportProcessingThread();
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ /**
* Store blocks {@literal ->} datanodedescriptor(s) map of corrupt replicas.
*/
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
/** /**
@ -2105,7 +2108,7 @@ public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src,
* Choose target datanodes for creating a new block. * Choose target datanodes for creating a new block.
* *
* @throws IOException * @throws IOException
* if the number of targets < minimum replication. * if the number of targets {@literal <} minimum replication.
* @see BlockPlacementPolicy#chooseTarget(String, int, Node, * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
* Set, long, List, BlockStoragePolicy, EnumSet) * Set, long, List, BlockStoragePolicy, EnumSet)
*/ */
@ -2487,7 +2490,8 @@ private static class BlockInfoToAdd {
/** /**
* The given storage is reporting all its blocks. * The given storage is reporting all its blocks.
* Update the (storage-->block list) and (block-->storage list) maps. * Update the (storage{@literal -->}block list) and
* (block{@literal -->}storage list) maps.
* *
* @return true if all known storages of the given DN have finished reporting. * @return true if all known storages of the given DN have finished reporting.
* @throws IOException * @throws IOException
@ -3777,8 +3781,8 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
} }
/** /**
* Modify (block-->datanode) map. Possibly generate replication tasks, if the * Modify (block{@literal -->}datanode) map. Possibly generate replication
* removed block is still valid. * tasks, if the removed block is still valid.
*/ */
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node); blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
@ -4341,7 +4345,7 @@ public void checkRedundancy(BlockCollection bc) {
} }
/** /**
* Get blocks to invalidate for <i>nodeId</i> * Get blocks to invalidate for {@code nodeId}
* in {@link #invalidateBlocks}. * in {@link #invalidateBlocks}.
* *
* @return number of blocks scheduled for removal during this iteration. * @return number of blocks scheduled for removal during this iteration.

View File

@ -152,7 +152,6 @@ protected abstract void initialize(Configuration conf, FSClusterStats stats,
/** /**
* Check if the move is allowed. Used by balancer and other tools. * Check if the move is allowed. Used by balancer and other tools.
* @
* *
* @param candidates all replicas including source and target * @param candidates all replicas including source and target
* @param source source replica of the move * @param source source replica of the move

View File

@ -47,13 +47,11 @@
/** /**
* This class manages datanode configuration using a json file. * This class manages datanode configuration using a json file.
* Please refer to {@link CombinedHostsFileReader} for the json format. * Please refer to {@link CombinedHostsFileReader} for the json format.
* <p/> * <p>
* <p/>
* Entries may or may not specify a port. If they don't, we consider * Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the * them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses. * entries into IP addresses.
* <p/> * <p>
* <p/>
* The code ignores all entries that the DNS fails to resolve their IP * The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations * addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS * of DNs when it fails to do a forward and reverse lookup. Note that DNS

View File

@ -38,7 +38,7 @@
* corrupt. While reporting replicas of a Block, we hide any corrupt * corrupt. While reporting replicas of a Block, we hide any corrupt
* copies. These copies are removed once Block is found to have * copies. These copies are removed once Block is found to have
* expected number of good replicas. * expected number of good replicas.
* Mapping: Block -> TreeSet<DatanodeDescriptor> * Mapping: Block {@literal -> TreeSet<DatanodeDescriptor>}
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -57,7 +57,7 @@
* Manages decommissioning and maintenance state for DataNodes. A background * Manages decommissioning and maintenance state for DataNodes. A background
* monitor thread periodically checks the status of DataNodes that are * monitor thread periodically checks the status of DataNodes that are
* decommissioning or entering maintenance state. * decommissioning or entering maintenance state.
* <p/> * <p>
* A DataNode can be decommissioned in a few situations: * A DataNode can be decommissioned in a few situations:
* <ul> * <ul>
* <li>If a DN is dead, it is decommissioned immediately.</li> * <li>If a DN is dead, it is decommissioned immediately.</li>
@ -72,11 +72,11 @@
* determine if they can be DECOMMISSIONED. The monitor also prunes this list * determine if they can be DECOMMISSIONED. The monitor also prunes this list
* as blocks become replicated, so monitor scans will become more efficient * as blocks become replicated, so monitor scans will become more efficient
* over time. * over time.
* <p/> * <p>
* DECOMMISSION_INPROGRESS nodes that become dead do not progress to * DECOMMISSION_INPROGRESS nodes that become dead do not progress to
* DECOMMISSIONED until they become live again. This prevents potential * DECOMMISSIONED until they become live again. This prevents potential
* durability loss for singly-replicated blocks (see HDFS-6791). * durability loss for singly-replicated blocks (see HDFS-6791).
* <p/> * <p>
* DataNodes can also be put under maintenance state for any short duration * DataNodes can also be put under maintenance state for any short duration
* maintenance operations. Unlike decommissioning, blocks are not always * maintenance operations. Unlike decommissioning, blocks are not always
* re-replicated for the DataNodes to enter maintenance state. When the * re-replicated for the DataNodes to enter maintenance state. When the
@ -88,7 +88,7 @@
* of maintenance expiry time. When DataNodes don't transition or join the * of maintenance expiry time. When DataNodes don't transition or join the
* cluster back by expiry time, blocks are re-replicated just as in * cluster back by expiry time, blocks are re-replicated just as in
* decommissioning case as to avoid read or write performance degradation. * decommissioning case as to avoid read or write performance degradation.
* <p/> * <p>
* This class depends on the FSNamesystem lock for synchronization. * This class depends on the FSNamesystem lock for synchronization.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -33,17 +33,16 @@
/** /**
* This class manages the include and exclude files for HDFS. * This class manages the include and exclude files for HDFS.
* <p/> * <p>
* These files control which DataNodes the NameNode expects to see in the * These files control which DataNodes the NameNode expects to see in the
* cluster. Loosely speaking, the include file, if it exists and is not * cluster. Loosely speaking, the include file, if it exists and is not
* empty, is a list of everything we expect to see. The exclude file is * empty, is a list of everything we expect to see. The exclude file is
* a list of everything we want to ignore if we do see it. * a list of everything we want to ignore if we do see it.
* <p/> * <p>
* Entries may or may not specify a port. If they don't, we consider * Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the * them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses. * entries into IP addresses.
* <p/> * <p>
* <p/>
* The code ignores all entries that the DNS fails to resolve their IP * The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations * addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS * of DNs when it fails to do a forward and reverse lookup. Note that DNS

View File

@ -35,9 +35,9 @@
/** /**
* The HostSet allows efficient queries on matching wildcard addresses. * The HostSet allows efficient queries on matching wildcard addresses.
* <p/> * <p>
* For InetSocketAddress A and B with the same host address, * For InetSocketAddress A and B with the same host address,
* we define a partial order between A and B, A <= B iff A.getPort() == B * we define a partial order between A and B, A &lt;= B iff A.getPort() == B
* .getPort() || B.getPort() == 0. * .getPort() || B.getPort() == 0.
*/ */
public class HostSet implements Iterable<InetSocketAddress> { public class HostSet implements Iterable<InetSocketAddress> {
@ -46,7 +46,7 @@ public class HostSet implements Iterable<InetSocketAddress> {
/** /**
* The function that checks whether there exists an entry foo in the set * The function that checks whether there exists an entry foo in the set
* so that foo <= addr. * so that foo &lt;= addr.
*/ */
boolean matchedBy(InetSocketAddress addr) { boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress()); Collection<Integer> ports = addrs.get(addr.getAddress());
@ -56,7 +56,7 @@ boolean matchedBy(InetSocketAddress addr) {
/** /**
* The function that checks whether there exists an entry foo in the set * The function that checks whether there exists an entry foo in the set
* so that addr <= foo. * so that addr &lt;= foo.
*/ */
boolean match(InetSocketAddress addr) { boolean match(InetSocketAddress addr) {
int port = addr.getPort(); int port = addr.getPort();

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
import org.apache.hadoop.util.Timer; import org.apache.hadoop.util.Timer;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -82,7 +83,7 @@ public class SlowPeerTracker {
/** /**
* Information about peers that have reported a node as being slow. * Information about peers that have reported a node as being slow.
* Each outer map entry is a map of (DatanodeId) -> (timestamp), * Each outer map entry is a map of (DatanodeId) {@literal ->} (timestamp),
* mapping reporting nodes to the timestamp of the last report from * mapping reporting nodes to the timestamp of the last report from
* that node. * that node.
* *
@ -146,7 +147,7 @@ public Set<String> getReportsForNode(String slowNode) {
/** /**
* Retrieve all reports for all nodes. Stale reports are excluded. * Retrieve all reports for all nodes. Stale reports are excluded.
* *
* @return map from SlowNodeId -> (set of nodes reporting peers). * @return map from SlowNodeId {@literal ->} (set of nodes reporting peers).
*/ */
public Map<String, SortedSet<String>> getReportsForAllDataNodes() { public Map<String, SortedSet<String>> getReportsForAllDataNodes() {
if (allReports.isEmpty()) { if (allReports.isEmpty()) {

View File

@ -56,9 +56,9 @@
* block pool id, on this DataNode. * block pool id, on this DataNode.
* *
* This class supports the following functionality: * This class supports the following functionality:
* <ol> * <ul>
* <li> Formatting a new block pool storage</li> * <li> Formatting a new block pool storage</li>
* <li> Recovering a storage state to a consistent state (if possible></li> * <li> Recovering a storage state to a consistent state (if possible)</li>
* <li> Taking a snapshot of the block pool during upgrade</li> * <li> Taking a snapshot of the block pool during upgrade</li>
* <li> Rolling back a block pool to a previous snapshot</li> * <li> Rolling back a block pool to a previous snapshot</li>
* <li> Finalizing block storage by deletion of a snapshot</li> * <li> Finalizing block storage by deletion of a snapshot</li>
@ -139,11 +139,12 @@ public void addStorageDir(StorageDirectory sd) {
/** /**
* Load one storage directory. Recover from previous transitions if required. * Load one storage directory. Recover from previous transitions if required.
* * @param nsInfo namespace information
* @param nsInfo namespace information * @param location the root path of the storage directory
* @param dataDir the root path of the storage directory * @param startOpt startup option
* @param startOpt startup option * @param callables list of callable storage directory
* @return the StorageDirectory successfully loaded. * @param conf configuration
* @return
* @throws IOException * @throws IOException
*/ */
private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo, private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
@ -205,8 +206,10 @@ private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
* data volume. * data volume.
* *
* @param nsInfo namespace information * @param nsInfo namespace information
* @param dataDirs storage directories of block pool * @param location storage directories of block pool
* @param startOpt startup option * @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @return an array of loaded block pool directories. * @return an array of loaded block pool directories.
* @throws IOException on error * @throws IOException on error
*/ */
@ -240,8 +243,10 @@ List<StorageDirectory> loadBpStorageDirectories(NamespaceInfo nsInfo,
* data volume. * data volume.
* *
* @param nsInfo namespace information * @param nsInfo namespace information
* @param dataDirs storage directories of block pool * @param location storage directories of block pool
* @param startOpt startup option * @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @throws IOException on error * @throws IOException on error
*/ */
List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo, List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
@ -348,13 +353,18 @@ protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
* Analyze whether a transition of the BP state is required and * Analyze whether a transition of the BP state is required and
* perform it if necessary. * perform it if necessary.
* <br> * <br>
* Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime. * Rollback if:
* Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular * previousLV &gt;= LAYOUT_VERSION && prevCTime &lt;= namenode.cTime.
* startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime * Upgrade if:
* this.LV &gt; LAYOUT_VERSION || this.cTime &lt; namenode.cTime
* Regular startup if:
* this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
* *
* @param sd storage directory <SD>/current/<bpid> * @param sd storage directory @{literal <SD>/current/<bpid>}
* @param nsInfo namespace info * @param nsInfo namespace info
* @param startOpt startup option * @param startOpt startup option
* @param callables list of callable storage directory
* @param conf configuration
* @return true if the new properties has been written. * @return true if the new properties has been written.
*/ */
private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo, private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
@ -416,20 +426,20 @@ private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
} }
/** /**
* Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23 * Upgrade to any release after 0.22 (0.22 included) release
* e.g. 0.22 =&gt; 0.23
* Upgrade procedure is as follows: * Upgrade procedure is as follows:
* <ol> * <ol>
* <li>If <SD>/current/<bpid>/previous exists then delete it</li> * <li>If {@literal <SD>/current/<bpid>/previous} exists then delete it</li>
* <li>Rename <SD>/current/<bpid>/current to * <li>Rename {@literal <SD>/current/<bpid>/current} to
* <SD>/current/bpid/current/previous.tmp</li> * {@literal <SD>/current/bpid/current/previous.tmp}</li>
* <li>Create new <SD>current/<bpid>/current directory</li> * <li>Create new {@literal <SD>current/<bpid>/current} directory</li>
* <ol>
* <li>Hard links for block files are created from previous.tmp to current</li> * <li>Hard links for block files are created from previous.tmp to current</li>
* <li>Save new version file in current directory</li> * <li>Save new version file in current directory</li>
* <li>Rename previous.tmp to previous</li>
* </ol> * </ol>
* <li>Rename previous.tmp to previous</li> </ol>
* *
* @param bpSd storage directory <SD>/current/<bpid> * @param bpSd storage directory {@literal <SD>/current/<bpid>}
* @param nsInfo Namespace Info from the namenode * @param nsInfo Namespace Info from the namenode
* @throws IOException on error * @throws IOException on error
*/ */
@ -777,12 +787,12 @@ private String getTrashDirectory(File blockFile) {
} }
/** /**
* Get a target subdirectory under current/ for a given block file that is being * Get a target subdirectory under current/ for a given block file that is
* restored from trash. * being restored from trash.
* *
* The subdirectory structure under trash/ mirrors that under current/ to keep * The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored. * implicit memory of where the files are to be restored.
* * @param blockFile block file that is being restored from trash.
* @return the target directory to restore a previously deleted block file. * @return the target directory to restore a previously deleted block file.
*/ */
@VisibleForTesting @VisibleForTesting
@ -847,6 +857,7 @@ public boolean trashEnabled() {
/** /**
* Create a rolling upgrade marker file for each BP storage root, if it * Create a rolling upgrade marker file for each BP storage root, if it
* does not exist already. * does not exist already.
* @param dnStorageDirs
*/ */
public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs) public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException { throws IOException {
@ -872,6 +883,7 @@ public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
* Check whether the rolling upgrade marker file exists for each BP storage * Check whether the rolling upgrade marker file exists for each BP storage
* root. If it does exist, then the marker file is cleared and more * root. If it does exist, then the marker file is cleared and more
* importantly the layout upgrade is finalized. * importantly the layout upgrade is finalized.
* @param dnStorageDirs
*/ */
public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs) public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException { throws IOException {

View File

@ -338,19 +338,24 @@ void syncBlock(List<BlockRecord> syncList) throws IOException {
/** /**
* blk_0 blk_1 blk_2 blk_3 blk_4 blk_5 blk_6 blk_7 blk_8 * blk_0 blk_1 blk_2 blk_3 blk_4 blk_5 blk_6 blk_7 blk_8
* 64k 64k 64k 64k 64k 64k 64k 64k 64k <-- stripe_0 * 64k 64k 64k 64k 64k 64k 64k 64k 64k &lt;--
* stripe_0
* 64k 64k 64k 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k 64k 64k 64k
* 64k 64k 64k 64k 64k 64k 64k 61k <-- startStripeIdx * 64k 64k 64k 64k 64k 64k 64k 61k &lt;--
* startStripeIdx
* 64k 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k 64k
* 64k 64k 64k 64k 64k 64k 59k * 64k 64k 64k 64k 64k 64k 59k
* 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k
* 64k 64k 64k 64k 64k 64k <-- last full stripe * 64k 64k 64k 64k 64k 64k &lt;--
* 64k 64k 13k 64k 55k 3k <-- target last stripe * last full stripe
* 64k 64k 13k 64k 55k 3k &lt;--
* target last stripe
* 64k 64k 64k 1k * 64k 64k 64k 1k
* 64k 64k 58k * 64k 64k 58k
* 64k 64k * 64k 64k
* 64k 19k * 64k 19k
* 64k <-- total visible stripe * 64k &lt;--
* total visible stripe
* *
* Due to different speed of streamers, the internal blocks in a block group * Due to different speed of streamers, the internal blocks in a block group
* could have different lengths when the block group isn't ended normally. * could have different lengths when the block group isn't ended normally.

View File

@ -189,7 +189,7 @@ public BlockScanner(DataNode datanode, Configuration conf) {
} }
/** /**
* Returns true if the block scanner is enabled.<p/> * Returns true if the block scanner is enabled.
* *
* If the block scanner is disabled, no volume scanners will be created, and * If the block scanner is disabled, no volume scanners will be created, and
* no threads will start. * no threads will start.
@ -234,7 +234,7 @@ public synchronized void addVolumeScanner(FsVolumeReference ref) {
} }
/** /**
* Stops and removes a volume scanner.<p/> * Stops and removes a volume scanner.
* *
* This function will block until the volume scanner has stopped. * This function will block until the volume scanner has stopped.
* *
@ -260,7 +260,7 @@ public synchronized void removeVolumeScanner(FsVolumeSpi volume) {
} }
/** /**
* Stops and removes all volume scanners.<p/> * Stops and removes all volume scanners.
* *
* This function will block until all the volume scanners have stopped. * This function will block until all the volume scanners have stopped.
*/ */

View File

@ -242,7 +242,7 @@
* DataNodes. * DataNodes.
* *
* The DataNode maintains just one critical table: * The DataNode maintains just one critical table:
* block-> stream of bytes (of BLOCK_SIZE or less) * block{@literal ->} stream of bytes (of BLOCK_SIZE or less)
* *
* This info is stored on a local disk. The DataNode * This info is stored on a local disk. The DataNode
* reports the table's contents to the NameNode upon startup * reports the table's contents to the NameNode upon startup
@ -527,7 +527,7 @@ protected Configuration getNewConf() {
} }
/** /**
* {@inheritdoc}. * {@inheritDoc }.
*/ */
@Override @Override
public String reconfigurePropertyImpl(String property, String newVal) public String reconfigurePropertyImpl(String property, String newVal)
@ -2713,7 +2713,8 @@ public static List<StorageLocation> getStorageLocations(Configuration conf) {
return locations; return locations;
} }
/** Instantiate & Start a single datanode daemon and wait for it to finish. /** Instantiate &amp; Start a single datanode daemon and wait for it to
* finish.
* If this thread is specifically interrupted, it will stop waiting. * If this thread is specifically interrupted, it will stop waiting.
*/ */
@VisibleForTesting @VisibleForTesting
@ -2722,7 +2723,8 @@ public static DataNode createDataNode(String args[],
return createDataNode(args, conf, null); return createDataNode(args, conf, null);
} }
/** Instantiate & Start a single datanode daemon and wait for it to finish. /** Instantiate &amp; Start a single datanode daemon and wait for it to
* finish.
* If this thread is specifically interrupted, it will stop waiting. * If this thread is specifically interrupted, it will stop waiting.
*/ */
@VisibleForTesting @VisibleForTesting

View File

@ -215,7 +215,9 @@ public String getTrashDirectoryForReplica(String bpid, ReplicaInfo info) {
/** /**
* VolumeBuilder holds the metadata (e.g., the storage directories) of the * VolumeBuilder holds the metadata (e.g., the storage directories) of the
* prepared volume returned from {@link prepareVolume()}. Calling {@link build()} * prepared volume returned from
* {@link #prepareVolume(DataNode, StorageLocation, List)}.
* Calling {@link VolumeBuilder#build()}
* to add the metadata to {@link DataStorage} so that this prepared volume can * to add the metadata to {@link DataStorage} so that this prepared volume can
* be active. * be active.
*/ */

View File

@ -280,7 +280,6 @@ public String toString() {
/** /**
* Create a new directory scanner, but don't cycle it running yet. * Create a new directory scanner, but don't cycle it running yet.
* *
* @param datanode the parent datanode
* @param dataset the dataset to scan * @param dataset the dataset to scan
* @param conf the Configuration object * @param conf the Configuration object
*/ */

View File

@ -178,7 +178,6 @@ public void dirSync(@Nullable FsVolumeSpi volume, File dir)
* Call sync_file_range on the given file descriptor. * Call sync_file_range on the given file descriptor.
* *
* @param volume target volume. null if unavailable. * @param volume target volume. null if unavailable.
* @throws IOException
*/ */
public void syncFileRange( public void syncFileRange(
@Nullable FsVolumeSpi volume, FileDescriptor outFd, @Nullable FsVolumeSpi volume, FileDescriptor outFd,
@ -198,7 +197,6 @@ public void syncFileRange(
* Call posix_fadvise on the given file descriptor. * Call posix_fadvise on the given file descriptor.
* *
* @param volume target volume. null if unavailable. * @param volume target volume. null if unavailable.
* @throws IOException
*/ */
public void posixFadvise( public void posixFadvise(
@Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd, @Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd,
@ -394,7 +392,6 @@ public FileOutputStream getFileOutputStream(
* @param volume target volume. null if unavailable. * @param volume target volume. null if unavailable.
* @param fd File descriptor object. * @param fd File descriptor object.
* @return FileOutputStream to the given file object. * @return FileOutputStream to the given file object.
* @throws FileNotFoundException
*/ */
public FileOutputStream getFileOutputStream( public FileOutputStream getFileOutputStream(
@Nullable FsVolumeSpi volume, FileDescriptor fd) { @Nullable FsVolumeSpi volume, FileDescriptor fd) {

View File

@ -45,8 +45,8 @@
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* VolumeScanner scans a single volume. Each VolumeScanner has its own thread.<p/> * VolumeScanner scans a single volume. Each VolumeScanner has its own thread.
* They are all managed by the DataNode's BlockScanner. * <p>They are all managed by the DataNode's BlockScanner.
*/ */
public class VolumeScanner extends Thread { public class VolumeScanner extends Thread {
public static final Logger LOG = public static final Logger LOG =

View File

@ -24,7 +24,10 @@
import com.google.common.annotations.GwtCompatible; import com.google.common.annotations.GwtCompatible;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles; import com.google.common.util.concurrent.Uninterruptibles;
import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
.newUpdater; .newUpdater;
@ -52,9 +55,7 @@
* {@link ListeningExecutorService}, and deriving a {@code Future} from an * {@link ListeningExecutorService}, and deriving a {@code Future} from an
* existing one, typically using methods like {@link Futures#transform * existing one, typically using methods like {@link Futures#transform
* (ListenableFuture, com.google.common.base.Function) Futures.transform} * (ListenableFuture, com.google.common.base.Function) Futures.transform}
* and {@link Futures#catching(ListenableFuture, Class, * and its overloaded versions.
* com.google.common.base.Function, java.util.concurrent.Executor)
* Futures.catching}.
* <p> * <p>
* <p>This class implements all methods in {@code ListenableFuture}. * <p>This class implements all methods in {@code ListenableFuture}.
* Subclasses should provide a way to set the result of the computation * Subclasses should provide a way to set the result of the computation
@ -1265,12 +1266,6 @@ private static CancellationException cancellationExceptionWithCause(
* r.run(); * r.run();
* } * }
* }}</pre> * }}</pre>
* <p>
* <p>This should be preferred to {@link #newDirectExecutorService()}
* because implementing the {@link ExecutorService} subinterface
* necessitates significant performance overhead.
*
* @since 18.0
*/ */
public static Executor directExecutor() { public static Executor directExecutor() {
return DirectExecutor.INSTANCE; return DirectExecutor.INSTANCE;

View File

@ -192,7 +192,7 @@ public void close() throws IOException {
FsVolumeReferences getFsVolumeReferences(); FsVolumeReferences getFsVolumeReferences();
/** /**
* Add a new volume to the FsDataset.<p/> * Add a new volume to the FsDataset.
* *
* If the FSDataset supports block scanning, this function registers * If the FSDataset supports block scanning, this function registers
* the new volume with the block scanner. * the new volume with the block scanner.
@ -226,7 +226,7 @@ StorageReport[] getStorageReports(String bpid)
/** @return the volume that contains a replica of the block. */ /** @return the volume that contains a replica of the block. */
V getVolume(ExtendedBlock b); V getVolume(ExtendedBlock b);
/** @return a volume information map (name => info). */ /** @return a volume information map (name {@literal =>} info). */
Map<String, Object> getVolumeInfoMap(); Map<String, Object> getVolumeInfoMap();
/** /**
@ -273,7 +273,8 @@ LengthInputStream getMetaDataInputStream(ExtendedBlock b
/** /**
* Get reference to the replica meta info in the replicasMap. * Get reference to the replica meta info in the replicasMap.
* To be called from methods that are synchronized on {@link FSDataset} * To be called from methods that are synchronized on
* implementations of {@link FsDatasetSpi}
* @return replica from the replicas map * @return replica from the replicas map
*/ */
@Deprecated @Deprecated
@ -394,7 +395,7 @@ Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
* Finalizes the block previously opened for writing using writeToBlock. * Finalizes the block previously opened for writing using writeToBlock.
* The block size is what is in the parameter b and it must match the amount * The block size is what is in the parameter b and it must match the amount
* of data written * of data written
* @param block Block to be finalized * @param b Block to be finalized
* @param fsyncDir whether to sync the directory changes to durable device. * @param fsyncDir whether to sync the directory changes to durable device.
* @throws IOException * @throws IOException
* @throws ReplicaNotFoundException if the replica can not be found when the * @throws ReplicaNotFoundException if the replica can not be found when the
@ -488,14 +489,13 @@ void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
/** /**
* Determine if the specified block is cached. * Determine if the specified block is cached.
* @param bpid Block pool id * @param bpid Block pool id
* @param blockIds - block id * @param blockId - block id
* @return true if the block is cached * @return true if the block is cached
*/ */
boolean isCached(String bpid, long blockId); boolean isCached(String bpid, long blockId);
/** /**
* Check if all the data directories are healthy * Check if all the data directories are healthy
* @return A set of unhealthy data directories.
* @param failedVolumes * @param failedVolumes
*/ */
void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes); void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes);

View File

@ -98,17 +98,17 @@ public interface FsVolumeSpi
/** /**
* BlockIterator will return ExtendedBlock entries from a block pool in * BlockIterator will return ExtendedBlock entries from a block pool in
* this volume. The entries will be returned in sorted order.<p/> * this volume. The entries will be returned in sorted order.<p>
* *
* BlockIterator objects themselves do not always have internal * BlockIterator objects themselves do not always have internal
* synchronization, so they can only safely be used by a single thread at a * synchronization, so they can only safely be used by a single thread at a
* time.<p/> * time.<p>
* *
* Closing the iterator does not save it. You must call save to save it. * Closing the iterator does not save it. You must call save to save it.
*/ */
interface BlockIterator extends Closeable { interface BlockIterator extends Closeable {
/** /**
* Get the next block.<p/> * Get the next block.<p>
* *
* Note that this block may be removed in between the time we list it, * Note that this block may be removed in between the time we list it,
* and the time the caller tries to use it, or it may represent a stale * and the time the caller tries to use it, or it may represent a stale
@ -146,7 +146,7 @@ interface BlockIterator extends Closeable {
void save() throws IOException; void save() throws IOException;
/** /**
* Set the maximum staleness of entries that we will return.<p/> * Set the maximum staleness of entries that we will return.<p>
* *
* A maximum staleness of 0 means we will never return stale entries; a * A maximum staleness of 0 means we will never return stale entries; a
* larger value will allow us to reduce resource consumption in exchange * larger value will allow us to reduce resource consumption in exchange
@ -211,12 +211,12 @@ interface BlockIterator extends Closeable {
* Because millions of these structures may be created, we try to save * Because millions of these structures may be created, we try to save
* memory here. So instead of storing full paths, we store path suffixes. * memory here. So instead of storing full paths, we store path suffixes.
* The block file, if it exists, will have a path like this: * The block file, if it exists, will have a path like this:
* <volume_base_path>/<block_path> * {@literal <volume_base_path>/<block_path>}
* So we don't need to store the volume path, since we already know what the * So we don't need to store the volume path, since we already know what the
* volume is. * volume is.
* *
* The metadata file, if it exists, will have a path like this: * The metadata file, if it exists, will have a path like this:
* <volume_base_path>/<block_path>_<genstamp>.meta * {@literal <volume_base_path>/<block_path>_<genstamp>.meta}
* So if we have a block file, there isn't any need to store the block path * So if we have a block file, there isn't any need to store the block path
* again. * again.
* *
@ -439,6 +439,7 @@ byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile)
* @param bpid block pool id to scan * @param bpid block pool id to scan
* @param report the list onto which blocks reports are placed * @param report the list onto which blocks reports are placed
* @param reportCompiler * @param reportCompiler
* @throws InterruptedException
* @throws IOException * @throws IOException
*/ */
void compileReport(String bpid, void compileReport(String bpid,

View File

@ -94,7 +94,8 @@ public OutlierDetector(long minNumResources, long lowThresholdMs) {
/** /**
* Return a set of nodes/ disks whose latency is much higher than * Return a set of nodes/ disks whose latency is much higher than
* their counterparts. The input is a map of (resource -> aggregate latency) * their counterparts. The input is a map of (resource {@literal ->} aggregate
* latency)
* entries. * entries.
* *
* The aggregate may be an arithmetic mean or a percentile e.g. * The aggregate may be an arithmetic mean or a percentile e.g.

View File

@ -60,7 +60,6 @@ public DiskBalancerException(String message, Result result) {
/** /**
* Constructs an {@code IOException} with the specified detail message and * Constructs an {@code IOException} with the specified detail message and
* cause. * cause.
* <p/>
* <p> Note that the detail message associated with {@code cause} is * <p> Note that the detail message associated with {@code cause} is
* <i>not</i> * <i>not</i>
* automatically incorporated into this exception's detail message. * automatically incorporated into this exception's detail message.

View File

@ -50,21 +50,20 @@
/** /**
* DiskBalancerCluster represents the nodes that we are working against. * DiskBalancerCluster represents the nodes that we are working against.
* <p/> * <p>
* Please Note : * Please Note :
* <p/>
* Semantics of inclusionList and exclusionLists. * Semantics of inclusionList and exclusionLists.
* <p/> * <p>
* If a non-empty inclusionList is specified then the diskBalancer assumes that * If a non-empty inclusionList is specified then the diskBalancer assumes that
* the user is only interested in processing that list of nodes. This node list * the user is only interested in processing that list of nodes. This node list
* is checked against the exclusionList and only the nodes in inclusionList but * is checked against the exclusionList and only the nodes in inclusionList but
* not in exclusionList is processed. * not in exclusionList is processed.
* <p/> * <p>
* if inclusionList is empty, then we assume that all live nodes in the nodes is * if inclusionList is empty, then we assume that all live nodes in the nodes is
* to be processed by diskBalancer. In that case diskBalancer will avoid any * to be processed by diskBalancer. In that case diskBalancer will avoid any
* nodes specified in the exclusionList but will process all nodes in the * nodes specified in the exclusionList but will process all nodes in the
* cluster. * cluster.
* <p/> * <p>
* In other words, an empty inclusionList is means all the nodes otherwise * In other words, an empty inclusionList is means all the nodes otherwise
* only a given list is processed and ExclusionList is always honored. * only a given list is processed and ExclusionList is always honored.
*/ */
@ -291,7 +290,7 @@ public void createSnapshot(String snapShotName) throws IOException {
/** /**
* Compute plan takes a node and constructs a planner that creates a plan that * Compute plan takes a node and constructs a planner that creates a plan that
* we would like to follow. * we would like to follow.
* <p/> * <p>
* This function creates a thread pool and executes a planner on each node * This function creates a thread pool and executes a planner on each node
* that we are supposed to plan for. Each of these planners return a NodePlan * that we are supposed to plan for. Each of these planners return a NodePlan
* that we can persist or schedule for execution with a diskBalancer * that we can persist or schedule for execution with a diskBalancer

View File

@ -186,7 +186,7 @@ public int hashCode() {
} }
/** /**
* returns NodeDataDensity Metric. * Returns NodeDataDensity Metric.
* *
* @return float * @return float
*/ */
@ -195,8 +195,8 @@ public double getNodeDataDensity() {
} }
/** /**
* computes nodes data density. * Computes nodes data density.
* <p/> *
* This metric allows us to compare different nodes and how well the data is * This metric allows us to compare different nodes and how well the data is
* spread across a set of volumes inside the node. * spread across a set of volumes inside the node.
*/ */
@ -231,8 +231,8 @@ public boolean isBalancingNeeded(double threshold) {
/** /**
* Adds a volume to the DataNode. * Adds a volume to the DataNode.
* <p/> *
* it is assumed that we have one thread per node hence this call is not * It is assumed that we have one thread per node hence this call is not
* synchronised neither is the map is protected. * synchronised neither is the map is protected.
* *
* @param volume - volume * @param volume - volume

View File

@ -34,7 +34,7 @@
/** /**
* Greedy Planner is a simple planner that computes the largest possible move at * Greedy Planner is a simple planner that computes the largest possible move at
* any point of time given a volumeSet. * any point of time given a volumeSet.
* <p/> * <p>
* This is done by choosing the disks with largest amount of data above and * This is done by choosing the disks with largest amount of data above and
* below the idealStorage and then a move is scheduled between them. * below the idealStorage and then a move is scheduled between them.
*/ */

View File

@ -156,7 +156,7 @@ public static boolean copyINodeDefaultAcl(INode child) {
* *
* @param inode INode to read * @param inode INode to read
* @param snapshotId int ID of snapshot to read * @param snapshotId int ID of snapshot to read
* @return List<AclEntry> containing extended inode ACL entries * @return {@literal List<AclEntry>} containing extended inode ACL entries
*/ */
public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) { public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
AclFeature f = inode.getAclFeature(snapshotId); AclFeature f = inode.getAclFeature(snapshotId);
@ -167,7 +167,7 @@ public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
* Reads the existing extended ACL entries of an INodeAttribute object. * Reads the existing extended ACL entries of an INodeAttribute object.
* *
* @param inodeAttr INode to read * @param inodeAttr INode to read
* @return List<AclEntry> containing extended inode ACL entries * @return {@code List<AclEntry>} containing extended inode ACL entries
*/ */
public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) { public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) {
AclFeature f = inodeAttr.getAclFeature(); AclFeature f = inodeAttr.getAclFeature();
@ -175,7 +175,7 @@ public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) {
} }
/** /**
* Build list of AclEntries from the AclFeature * Build list of AclEntries from the {@link AclFeature}
* @param aclFeature AclFeature * @param aclFeature AclFeature
* @return List of entries * @return List of entries
*/ */
@ -204,7 +204,7 @@ static ImmutableList<AclEntry> getEntriesFromAclFeature(AclFeature aclFeature) {
* ACL modification APIs, which always apply a delta on top of current state. * ACL modification APIs, which always apply a delta on top of current state.
* *
* @param inode INode to read * @param inode INode to read
* @return List<AclEntry> containing all logical inode ACL entries * @return {@code List<AclEntry>} containing all logical inode ACL entries
*/ */
public static List<AclEntry> readINodeLogicalAcl(INode inode) { public static List<AclEntry> readINodeLogicalAcl(INode inode) {
FsPermission perm = inode.getFsPermission(); FsPermission perm = inode.getFsPermission();
@ -262,7 +262,7 @@ public static List<AclEntry> readINodeLogicalAcl(INode inode) {
* {@link AclFeature}. * {@link AclFeature}.
* *
* @param inode INode to update * @param inode INode to update
* @param newAcl List<AclEntry> containing new ACL entries * @param newAcl {@code List<AclEntry>} containing new ACL entries
* @param snapshotId int latest snapshot ID of inode * @param snapshotId int latest snapshot ID of inode
* @throws AclException if the ACL is invalid for the given inode * @throws AclException if the ACL is invalid for the given inode
* @throws QuotaExceededException if quota limit is exceeded * @throws QuotaExceededException if quota limit is exceeded
@ -312,8 +312,8 @@ private AclStorage() {
/** /**
* Creates an AclFeature from the given ACL entries. * Creates an AclFeature from the given ACL entries.
* *
* @param accessEntries List<AclEntry> access ACL entries * @param accessEntries {@code List<AclEntry>} access ACL entries
* @param defaultEntries List<AclEntry> default ACL entries * @param defaultEntries {@code List<AclEntry>} default ACL entries
* @return AclFeature containing the required ACL entries * @return AclFeature containing the required ACL entries
*/ */
private static AclFeature createAclFeature(List<AclEntry> accessEntries, private static AclFeature createAclFeature(List<AclEntry> accessEntries,
@ -347,7 +347,7 @@ private static AclFeature createAclFeature(List<AclEntry> accessEntries,
* POSIX ACLs model, which presents the mask as the permissions of the group * POSIX ACLs model, which presents the mask as the permissions of the group
* class. * class.
* *
* @param accessEntries List<AclEntry> access ACL entries * @param accessEntries {@code List<AclEntry>} access ACL entries
* @param existingPerm FsPermission existing permissions * @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions * @return FsPermission new permissions
*/ */
@ -365,7 +365,7 @@ private static FsPermission createFsPermissionForExtendedAcl(
* group and other permissions are in order. Also preserve sticky bit and * group and other permissions are in order. Also preserve sticky bit and
* toggle ACL bit off. * toggle ACL bit off.
* *
* @param accessEntries List<AclEntry> access ACL entries * @param accessEntries {@code List<AclEntry>} access ACL entries
* @param existingPerm FsPermission existing permissions * @param existingPerm FsPermission existing permissions
* @return FsPermission new permissions * @return FsPermission new permissions
*/ */

View File

@ -67,7 +67,7 @@
/** /**
* Manages the list of encryption zones in the filesystem. * Manages the list of encryption zones in the filesystem.
* <p/> * <p>
* The EncryptionZoneManager has its own lock, but relies on the FSDirectory * The EncryptionZoneManager has its own lock, but relies on the FSDirectory
* lock being held for many operations. The FSDirectory lock should not be * lock being held for many operations. The FSDirectory lock should not be
* taken if the manager lock is already held. * taken if the manager lock is already held.
@ -294,7 +294,7 @@ void stopReencryptThread() {
/** /**
* Add a new encryption zone. * Add a new encryption zone.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
* *
* @param inodeId of the encryption zone * @param inodeId of the encryption zone
@ -308,7 +308,7 @@ void addEncryptionZone(Long inodeId, CipherSuite suite,
/** /**
* Add a new encryption zone. * Add a new encryption zone.
* <p/> * <p>
* Does not assume that the FSDirectory lock is held. * Does not assume that the FSDirectory lock is held.
* *
* @param inodeId of the encryption zone * @param inodeId of the encryption zone
@ -326,7 +326,7 @@ void unprotectedAddEncryptionZone(Long inodeId,
/** /**
* Remove an encryption zone. * Remove an encryption zone.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
void removeEncryptionZone(Long inodeId) { void removeEncryptionZone(Long inodeId) {
@ -344,7 +344,7 @@ void removeEncryptionZone(Long inodeId) {
/** /**
* Returns true if an IIP is within an encryption zone. * Returns true if an IIP is within an encryption zone.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
@ -355,7 +355,7 @@ boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
/** /**
* Returns the full path from an INode id. * Returns the full path from an INode id.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
String getFullPathName(Long nodeId) { String getFullPathName(Long nodeId) {
@ -370,7 +370,7 @@ String getFullPathName(Long nodeId) {
/** /**
* Get the key name for an encryption zone. Returns null if <tt>iip</tt> is * Get the key name for an encryption zone. Returns null if <tt>iip</tt> is
* not within an encryption zone. * not within an encryption zone.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
String getKeyName(final INodesInPath iip) throws IOException { String getKeyName(final INodesInPath iip) throws IOException {
@ -385,7 +385,7 @@ String getKeyName(final INodesInPath iip) throws IOException {
/** /**
* Looks up the EncryptionZoneInt for a path within an encryption zone. * Looks up the EncryptionZoneInt for a path within an encryption zone.
* Returns null if path is not within an EZ. * Returns null if path is not within an EZ.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip)
@ -434,7 +434,7 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip)
* Looks up the nearest ancestor EncryptionZoneInt that contains the given * Looks up the nearest ancestor EncryptionZoneInt that contains the given
* path (excluding itself). * path (excluding itself).
* Returns null if path is not within an EZ, or the path is the root dir '/' * Returns null if path is not within an EZ, or the path is the root dir '/'
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
private EncryptionZoneInt getParentEncryptionZoneForPath(INodesInPath iip) private EncryptionZoneInt getParentEncryptionZoneForPath(INodesInPath iip)
@ -467,7 +467,7 @@ EncryptionZone getEZINodeForPath(INodesInPath iip)
/** /**
* Throws an exception if the provided path cannot be renamed into the * Throws an exception if the provided path cannot be renamed into the
* destination because of differing parent encryption zones. * destination because of differing parent encryption zones.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
* *
* @param srcIIP source IIP * @param srcIIP source IIP
@ -529,7 +529,7 @@ private void checkMoveValidityForReencryption(final String pathName,
/** /**
* Create a new encryption zone. * Create a new encryption zone.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite, XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite,
@ -573,7 +573,7 @@ XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite,
/** /**
* Cursor-based listing of encryption zones. * Cursor-based listing of encryption zones.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
*/ */
BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId) BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
@ -621,6 +621,8 @@ BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
* @param zoneId * @param zoneId
* @param zonePath * @param zonePath
* @return true if path resolve to the id, false if not. * @return true if path resolve to the id, false if not.
* @throws AccessControlException
* @throws ParentNotDirectoryException
* @throws UnresolvedLinkException * @throws UnresolvedLinkException
*/ */
private boolean pathResolvesToId(final long zoneId, final String zonePath) private boolean pathResolvesToId(final long zoneId, final String zonePath)
@ -645,6 +647,9 @@ private boolean pathResolvesToId(final long zoneId, final String zonePath)
/** /**
* Re-encrypts the given encryption zone path. If the given path is not the * Re-encrypts the given encryption zone path. If the given path is not the
* root of an encryption zone, an exception is thrown. * root of an encryption zone, an exception is thrown.
* @param zoneIIP
* @param keyVersionName
* @throws IOException
*/ */
List<XAttr> reencryptEncryptionZone(final INodesInPath zoneIIP, List<XAttr> reencryptEncryptionZone(final INodesInPath zoneIIP,
final String keyVersionName) throws IOException { final String keyVersionName) throws IOException {
@ -673,7 +678,9 @@ List<XAttr> reencryptEncryptionZone(final INodesInPath zoneIIP,
/** /**
* Cancels the currently-running re-encryption of the given encryption zone. * Cancels the currently-running re-encryption of the given encryption zone.
* If the given path is not the root of an encryption zone, * If the given path is not the root of an encryption zone,
* * an exception is thrown. * an exception is thrown.
* @param zoneIIP
* @throws IOException
*/ */
List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP) List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP)
throws IOException { throws IOException {
@ -693,8 +700,10 @@ List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP)
/** /**
* Cursor-based listing of zone re-encryption status. * Cursor-based listing of zone re-encryption status.
* <p/> * <p>
* Called while holding the FSDirectory lock. * Called while holding the FSDirectory lock.
* @param prevId
* @throws IOException
*/ */
BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus( BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
final long prevId) throws IOException { final long prevId) throws IOException {
@ -735,6 +744,10 @@ BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
/** /**
* Return whether an INode is an encryption zone root. * Return whether an INode is an encryption zone root.
* @param inode
* @param name
* @return true when INode is an encryption zone root else false
* @throws FileNotFoundException
*/ */
boolean isEncryptionZoneRoot(final INode inode, final String name) boolean isEncryptionZoneRoot(final INode inode, final String name)
throws FileNotFoundException { throws FileNotFoundException {
@ -756,6 +769,7 @@ boolean isEncryptionZoneRoot(final INode inode, final String name)
* Return whether an INode is an encryption zone root. * Return whether an INode is an encryption zone root.
* *
* @param inode the zone inode * @param inode the zone inode
* @param name
* @throws IOException if the inode is not a directory, * @throws IOException if the inode is not a directory,
* or is a directory but not the root of an EZ. * or is a directory but not the root of an EZ.
*/ */

View File

@ -634,12 +634,10 @@ void disableQuotaChecks() {
* no permission checks. * no permission checks.
* @param src The path to resolve. * @param src The path to resolve.
* @param dirOp The {@link DirOp} that controls additional checks. * @param dirOp The {@link DirOp} that controls additional checks.
* @param resolveLink If false, only ancestor symlinks will be checked. If
* true, the last inode will also be checked.
* @return if the path indicates an inode, return path after replacing up to * @return if the path indicates an inode, return path after replacing up to
* <inodeid> with the corresponding path of the inode, else the path * {@code <inodeid>} with the corresponding path of the inode, else
* in {@code src} as is. If the path refers to a path in the "raw" * the path in {@code src} as is. If the path refers to a path in
* directory, return the non-raw pathname. * the "raw" directory, return the non-raw pathname.
* @throws FileNotFoundException * @throws FileNotFoundException
* @throws AccessControlException * @throws AccessControlException
* @throws ParentNotDirectoryException * @throws ParentNotDirectoryException

View File

@ -341,10 +341,11 @@
* *
* This class and its contents keep: * This class and its contents keep:
* *
* 1) Valid fsname --> blocklist (kept on disk, logged) * 1) Valid fsname {@literal -->} blocklist (kept on disk, logged)
* 2) Set of all valid blocks (inverted #1) * 2) Set of all valid blocks (inverted #1)
* 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) * 3) block {@literal -->} machinelist (kept in memory, rebuilt dynamically
* 4) machine --> blocklist (inverted #2) * from reports)
* 4) machine {@literal -->} blocklist (inverted #2)
* 5) LRU cache of updated-heartbeat machines * 5) LRU cache of updated-heartbeat machines
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@ -1732,11 +1733,12 @@ public boolean isInStandbyState() {
} }
/** /**
* return a list of blocks & their locations on <code>datanode</code> whose * return a list of blocks &amp; their locations on {@code datanode} whose
* total size is <code>size</code> * total size is {@code size}
* *
* @param datanode on which blocks are located * @param datanode on which blocks are located
* @param size total size of blocks * @param size total size of blocks
* @param minimumBlockSize
*/ */
public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long
minimumBlockSize) throws IOException { minimumBlockSize) throws IOException {
@ -1753,6 +1755,7 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long
/** /**
* Dump all metadata into specified file * Dump all metadata into specified file
* @param filename
*/ */
void metaSave(String filename) throws IOException { void metaSave(String filename) throws IOException {
String operationName = "metaSave"; String operationName = "metaSave";
@ -1884,6 +1887,8 @@ public FsServerDefaults getServerDefaults() throws StandbyException {
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////
/** /**
* Set permissions for an existing file. * Set permissions for an existing file.
* @param src
* @param permission
* @throws IOException * @throws IOException
*/ */
void setPermission(String src, FsPermission permission) throws IOException { void setPermission(String src, FsPermission permission) throws IOException {
@ -1908,6 +1913,9 @@ void setPermission(String src, FsPermission permission) throws IOException {
/** /**
* Set owner for an existing file. * Set owner for an existing file.
* @param src
* @param group
* @param username
* @throws IOException * @throws IOException
*/ */
void setOwner(String src, String username, String group) void setOwner(String src, String username, String group)
@ -2188,6 +2196,7 @@ void createSymlink(String target, String link,
* @param replication new replication * @param replication new replication
* @return true if successful; * @return true if successful;
* false if file does not exist or is a directory * false if file does not exist or is a directory
* @throws IOException
*/ */
boolean setReplication(final String src, final short replication) boolean setReplication(final String src, final short replication)
throws IOException { throws IOException {
@ -2219,6 +2228,7 @@ boolean setReplication(final String src, final short replication)
* *
* @param src file/directory path * @param src file/directory path
* @param policyName storage policy name * @param policyName storage policy name
* @throws IOException
*/ */
void setStoragePolicy(String src, String policyName) throws IOException { void setStoragePolicy(String src, String policyName) throws IOException {
final String operationName = "setStoragePolicy"; final String operationName = "setStoragePolicy";
@ -2245,6 +2255,7 @@ void setStoragePolicy(String src, String policyName) throws IOException {
* Satisfy the storage policy for a file or a directory. * Satisfy the storage policy for a file or a directory.
* *
* @param src file/directory path * @param src file/directory path
* @throws IOException
*/ */
void satisfyStoragePolicy(String src, boolean logRetryCache) void satisfyStoragePolicy(String src, boolean logRetryCache)
throws IOException { throws IOException {
@ -2295,6 +2306,7 @@ private void validateStoragePolicySatisfy()
* unset storage policy set for a given file or a directory. * unset storage policy set for a given file or a directory.
* *
* @param src file/directory path * @param src file/directory path
* @throws IOException
*/ */
void unsetStoragePolicy(String src) throws IOException { void unsetStoragePolicy(String src) throws IOException {
final String operationName = "unsetStoragePolicy"; final String operationName = "unsetStoragePolicy";
@ -2321,6 +2333,7 @@ void unsetStoragePolicy(String src) throws IOException {
* @param src * @param src
* file/directory path * file/directory path
* @return storage policy object * @return storage policy object
* @throws IOException
*/ */
BlockStoragePolicy getStoragePolicy(String src) throws IOException { BlockStoragePolicy getStoragePolicy(String src) throws IOException {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
@ -2336,6 +2349,7 @@ BlockStoragePolicy getStoragePolicy(String src) throws IOException {
/** /**
* @return All the existing block storage policies * @return All the existing block storage policies
* @throws IOException
*/ */
BlockStoragePolicy[] getStoragePolicies() throws IOException { BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);

View File

@ -523,8 +523,8 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) {
* 2. For a {@link WithName} node, since the node must be in a snapshot, we * 2. For a {@link WithName} node, since the node must be in a snapshot, we
* only count the quota usage for those nodes that still existed at the * only count the quota usage for those nodes that still existed at the
* creation time of the snapshot associated with the {@link WithName} node. * creation time of the snapshot associated with the {@link WithName} node.
* We do not count in the size of the diff list. * We do not count in the size of the diff list.
* <pre> * </pre>
* *
* @param bsps Block storage policy suite to calculate intended storage type usage * @param bsps Block storage policy suite to calculate intended storage type usage
* @param blockStoragePolicyId block storage policy id of the current INode * @param blockStoragePolicyId block storage policy id of the current INode

View File

@ -669,10 +669,10 @@ public void cleanSubtree(ReclaimContext reclaimContext, int snapshot,
/** /**
* {@inheritDoc} * {@inheritDoc}
* <br/> * <br>
* To destroy a DstReference node, we first remove its link with the * To destroy a DstReference node, we first remove its link with the
* referred node. If the reference number of the referred node is <= 0, we * referred node. If the reference number of the referred node is &lt;= 0,
* destroy the subtree of the referred node. Otherwise, we clean the * we destroy the subtree of the referred node. Otherwise, we clean the
* referred node's subtree and delete everything created after the last * referred node's subtree and delete everything created after the last
* rename operation, i.e., everything outside of the scope of the prior * rename operation, i.e., everything outside of the scope of the prior
* WithName nodes. * WithName nodes.

View File

@ -329,8 +329,8 @@ public int getPathSnapshotId() {
} }
/** /**
* @return the i-th inode if i >= 0; * @return the i-th inode if i {@literal >=} 0;
* otherwise, i < 0, return the (length + i)-th inode. * otherwise, i {@literal <} 0, return the (length + i)-th inode.
*/ */
public INode getINode(int i) { public INode getINode(int i) {
return inodes[(i < 0) ? inodes.length + i : i]; return inodes[(i < 0) ? inodes.length + i : i];

View File

@ -112,7 +112,7 @@ boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
void doRollback() throws IOException; void doRollback() throws IOException;
/** /**
* Discard the segments whose first txid is >= the given txid. * Discard the segments whose first txid is {@literal >=} the given txid.
* @param startTxId The given txid should be right at the segment boundary, * @param startTxId The given txid should be right at the segment boundary,
* i.e., it should be the first txid of some segment, if segment corresponding * i.e., it should be the first txid of some segment, if segment corresponding
* to the txid exists. * to the txid exists.

View File

@ -208,7 +208,7 @@ private synchronized INode[] getINodesWithLease() {
* read or write lock. * read or write lock.
* *
* @param ancestorDir the ancestor {@link INodeDirectory} * @param ancestorDir the ancestor {@link INodeDirectory}
* @return Set<INodesInPath> * @return {@code Set<INodesInPath>}
*/ */
public Set<INodesInPath> getINodeWithLeases(final INodeDirectory public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
ancestorDir) throws IOException { ancestorDir) throws IOException {

View File

@ -52,7 +52,7 @@ public MetaRecoveryContext(int force) {
* Display a prompt to the user and get his or her choice. * Display a prompt to the user and get his or her choice.
* *
* @param prompt The prompt to display * @param prompt The prompt to display
* @param default First choice (will be taken if autoChooseDefault is * @param firstChoice First choice (will be taken if autoChooseDefault is
* true) * true)
* @param choices Other choies * @param choices Other choies
* *

View File

@ -176,8 +176,8 @@
* is a second backup/failover NameNode, or when using federated NameNodes.) * is a second backup/failover NameNode, or when using federated NameNodes.)
* *
* The NameNode controls two critical tables: * The NameNode controls two critical tables:
* 1) filename->blocksequence (namespace) * 1) filename{@literal ->}blocksequence (namespace)
* 2) block->machinelist ("inodes") * 2) block{@literal ->}machinelist ("inodes")
* *
* The first table is stored on disk and is very precious. * The first table is stored on disk and is very precious.
* The second table is rebuilt every time the NameNode comes up. * The second table is rebuilt every time the NameNode comes up.
@ -1111,7 +1111,7 @@ public InetSocketAddress getHttpsAddress() {
} }
/** /**
* @return NameNodeHttpServer, used by unit tests to ensure a full shutdown, * NameNodeHttpServer, used by unit tests to ensure a full shutdown,
* so that no bind exception is thrown during restart. * so that no bind exception is thrown during restart.
*/ */
@VisibleForTesting @VisibleForTesting

View File

@ -94,14 +94,13 @@
* <p>The tool scans all files and directories, starting from an indicated * <p>The tool scans all files and directories, starting from an indicated
* root path. The following abnormal conditions are detected and handled:</p> * root path. The following abnormal conditions are detected and handled:</p>
* <ul> * <ul>
* <li>files with blocks that are completely missing from all datanodes.<br/> * <li>files with blocks that are completely missing from all datanodes.<br>
* In this case the tool can perform one of the following actions: * In this case the tool can perform one of the following actions:
* <ul> * <ul>
* <li>none ({@link #FIXING_NONE})</li>
* <li>move corrupted files to /lost+found directory on DFS * <li>move corrupted files to /lost+found directory on DFS
* ({@link #FIXING_MOVE}). Remaining data blocks are saved as a * ({@link #doMove}). Remaining data blocks are saved as a
* block chains, representing longest consecutive series of valid blocks.</li> * block chains, representing longest consecutive series of valid blocks.</li>
* <li>delete corrupted files ({@link #FIXING_DELETE})</li> * <li>delete corrupted files ({@link #doDelete})</li>
* </ul> * </ul>
* </li> * </li>
* <li>detect files with under-replicated or over-replicated blocks</li> * <li>detect files with under-replicated or over-replicated blocks</li>
@ -201,7 +200,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
*/ */
NamenodeFsck(Configuration conf, NameNode namenode, NamenodeFsck(Configuration conf, NameNode namenode,
NetworkTopology networktopology, NetworkTopology networktopology,
Map<String,String[]> pmap, PrintWriter out, Map<String, String[]> pmap, PrintWriter out,
int totalDatanodes, InetAddress remoteAddress) { int totalDatanodes, InetAddress remoteAddress) {
this.conf = conf; this.conf = conf;
this.namenode = namenode; this.namenode = namenode;

View File

@ -47,7 +47,7 @@ public static Counts newInstance() {
/** /**
* Is quota violated? * Is quota violated?
* The quota is violated if quota is set and usage > quota. * The quota is violated if quota is set and usage &gt; quota.
*/ */
public static boolean isViolated(final long quota, final long usage) { public static boolean isViolated(final long quota, final long usage) {
return quota >= 0 && usage > quota; return quota >= 0 && usage > quota;
@ -55,7 +55,8 @@ public static boolean isViolated(final long quota, final long usage) {
/** /**
* Is quota violated? * Is quota violated?
* The quota is violated if quota is set, delta > 0 and usage + delta > quota. * The quota is violated if quota is set, delta &gt; 0 and
* usage + delta &gt; quota.
*/ */
static boolean isViolated(final long quota, final long usage, static boolean isViolated(final long quota, final long usage,
final long delta) { final long delta) {

View File

@ -319,7 +319,7 @@ void addDummyTracker(final long zoneId, ZoneSubmissionTracker zst) {
/** /**
* Main loop. It takes at most 1 zone per scan, and executes until the zone * Main loop. It takes at most 1 zone per scan, and executes until the zone
* is completed. * is completed.
* {@see #reencryptEncryptionZoneInt(Long)}. * {@link #reencryptEncryptionZone(long)}.
*/ */
@Override @Override
public void run() { public void run() {

View File

@ -31,7 +31,7 @@
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
/** /**
* There are four types of extended attributes <XAttr> defined by the * There are four types of extended attributes &lt;XAttr&gt; defined by the
* following namespaces: * following namespaces:
* <br> * <br>
* USER - extended user attributes: these can be assigned to files and * USER - extended user attributes: these can be assigned to files and
@ -56,7 +56,7 @@
* is called on a file or directory in the /.reserved/raw HDFS directory * is called on a file or directory in the /.reserved/raw HDFS directory
* hierarchy. These attributes can only be accessed by the user who have * hierarchy. These attributes can only be accessed by the user who have
* read access. * read access.
* </br> * <br>
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class XAttrPermissionFilter { public class XAttrPermissionFilter {

View File

@ -33,7 +33,7 @@ public class XAttrStorage {
/** /**
* Reads the extended attribute of an inode by name with prefix. * Reads the extended attribute of an inode by name with prefix.
* <p/> * <p>
* *
* @param inode INode to read * @param inode INode to read
* @param snapshotId the snapshotId of the requested path * @param snapshotId the snapshotId of the requested path
@ -48,11 +48,11 @@ public static XAttr readINodeXAttrByPrefixedName(INode inode, int snapshotId,
/** /**
* Reads the existing extended attributes of an inode. * Reads the existing extended attributes of an inode.
* <p/> * <p>
* Must be called while holding the FSDirectory read lock. * Must be called while holding the FSDirectory read lock.
* *
* @param inodeAttr INodeAttributes to read. * @param inodeAttr INodeAttributes to read.
* @return List<XAttr> <code>XAttr</code> list. * @return {@code XAttr} list.
*/ */
public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) { public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
XAttrFeature f = inodeAttr.getXAttrFeature(); XAttrFeature f = inodeAttr.getXAttrFeature();
@ -61,7 +61,7 @@ public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
/** /**
* Update xattrs of inode. * Update xattrs of inode.
* <p/> * <p>
* Must be called while holding the FSDirectory write lock. * Must be called while holding the FSDirectory write lock.
* *
* @param inode INode to update * @param inode INode to update

View File

@ -157,10 +157,10 @@ public final int getLastSnapshotId() {
/** /**
* Find the latest snapshot before a given snapshot. * Find the latest snapshot before a given snapshot.
* @param anchorId The returned snapshot's id must be <= or < this given * @param anchorId The returned snapshot's id must be &lt;= or &lt; this
* snapshot id. * given snapshot id.
* @param exclusive True means the returned snapshot's id must be < the given * @param exclusive True means the returned snapshot's id must be &lt; the
* id, otherwise <=. * given id, otherwise &lt;=.
* @return The id of the latest snapshot before the given snapshot. * @return The id of the latest snapshot before the given snapshot.
*/ */
public final int getPrior(int anchorId, boolean exclusive) { public final int getPrior(int anchorId, boolean exclusive) {

View File

@ -44,15 +44,15 @@
* and created after a particular snapshot and before the next snapshot. The * and created after a particular snapshot and before the next snapshot. The
* sequence will look like this: * sequence will look like this:
* <p> * <p>
* s0->s1->s2->s3->s4->s5->s6->s7->s8->s9. * {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}.
* <p> * <p>
* Assuming a skip interval of 3, which means a new diff will be added at a * Assuming a skip interval of 3, which means a new diff will be added at a
* level higher than the current level after we have ore than 3 snapshots. * level higher than the current level after we have ore than 3 snapshots.
* Next level promotion happens after 9 snapshots and so on. * Next level promotion happens after 9 snapshots and so on.
* <p> * <p>
* level 2: s08------------------------------->s9 * level 2: {@literal s08------------------------------->s9}
* level 1: S02------->s35-------->s68-------->s9 * level 1: {@literal S02------->s35-------->s68-------->s9}
* level 0: s0->s1->s2->s3->s4->s5->s6->s7->s8->s9 * level 0: {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}
* <p> * <p>
* s02 will be created by combining diffs for s0, s1, s2 once s3 gets created. * s02 will be created by combining diffs for s0, s1, s2 once s3 gets created.
* Similarly, s08 will be created by combining s02, s35 and s68 once s9 gets * Similarly, s08 will be created by combining s02, s35 and s68 once s9 gets
@ -143,6 +143,7 @@ final static class SkipListNode implements Comparable<Integer> {
* and level. * and level.
* *
* @param diff The element to be stored in the node. * @param diff The element to be stored in the node.
* @param level
*/ */
SkipListNode(DirectoryDiff diff, int level) { SkipListNode(DirectoryDiff diff, int level) {
this.diff = diff; this.diff = diff;

View File

@ -106,8 +106,8 @@ public synchronized void addAll(long startPath, List<ItemInfo> itemInfoList,
* Add the itemInfo to tracking list for which storage movement expected if * Add the itemInfo to tracking list for which storage movement expected if
* necessary. * necessary.
* *
* @param itemInfoList * @param itemInfo
* - List of child in the directory * - child in the directory
* @param scanCompleted * @param scanCompleted
* -Indicates whether the ItemInfo start id directory has no more * -Indicates whether the ItemInfo start id directory has no more
* elements to scan. * elements to scan.
@ -191,7 +191,6 @@ public synchronized void removeItemTrackInfo(ItemInfo trackInfo,
/** /**
* Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
* and notify to clean up required resources. * and notify to clean up required resources.
* @throws IOException
*/ */
public synchronized void clearQueuesWithNotification() { public synchronized void clearQueuesWithNotification() {
// Remove xAttr from directories // Remove xAttr from directories

View File

@ -69,7 +69,7 @@ public DatanodeCacheManager(Configuration conf) {
/** /**
* Returns the live datanodes and its storage details, which has available * Returns the live datanodes and its storage details, which has available
* space (> 0) to schedule block moves. This will return array of datanodes * space (&gt; 0) to schedule block moves. This will return array of datanodes
* from its local cache. It has a configurable refresh interval in millis and * from its local cache. It has a configurable refresh interval in millis and
* periodically refresh the datanode cache by fetching latest * periodically refresh the datanode cache by fetching latest
* {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh * {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh

View File

@ -39,7 +39,7 @@
* configured by the administrator. * configured by the administrator.
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything, just maintains the sps invoked path ids. Administrator * it won't do anything, just maintains the sps invoked path ids. Administrator
* requires to start external sps service explicitly, to fetch the sps invoked * requires to start external sps service explicitly, to fetch the sps invoked
* path ids from namenode, then do necessary computations and block movement in * path ids from namenode, then do necessary computations and block movement in
@ -48,7 +48,7 @@
* external sps service functionality. * external sps service functionality.
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then it * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then it
* will disable the sps feature completely by clearing all queued up sps path's * will disable the sps feature completely by clearing all queued up sps path's
* hint. * hint.
* *
@ -88,12 +88,12 @@ public StoragePolicySatisfyManager(Configuration conf,
* This function will do following logic based on the configured sps mode: * This function will do following logic based on the configured sps mode:
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything. Administrator requires to start external sps service * it won't do anything. Administrator requires to start external sps service
* explicitly. * explicitly.
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
* service is disabled and won't do any action. * service is disabled and won't do any action.
*/ */
public void start() { public void start() {
@ -121,12 +121,12 @@ public void start() {
* This function will do following logic based on the configured sps mode: * This function will do following logic based on the configured sps mode:
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
* it won't do anything. Administrator requires to stop external sps service * it won't do anything. Administrator requires to stop external sps service
* explicitly, if needed. * explicitly, if needed.
* *
* <p> * <p>
* If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
* service is disabled and won't do any action. * service is disabled and won't do any action.
*/ */
public void stop() { public void stop() {
@ -225,6 +225,7 @@ public Long getNextPathId() {
/** /**
* Verify that satisfier queue limit exceeds allowed outstanding limit. * Verify that satisfier queue limit exceeds allowed outstanding limit.
* @throws IOException
*/ */
public void verifyOutstandingPathQLimit() throws IOException { public void verifyOutstandingPathQLimit() throws IOException {
long size = pathsToBeTraveresed.size(); long size = pathsToBeTraveresed.size();
@ -269,6 +270,7 @@ public void removeAllPathIds() {
/** /**
* Adds the sps path to SPSPathIds list. * Adds the sps path to SPSPathIds list.
* @param id
*/ */
public void addPathId(long id) { public void addPathId(long id) {
synchronized (pathsToBeTraveresed) { synchronized (pathsToBeTraveresed) {

View File

@ -179,7 +179,7 @@ public float getPercentComplete(Phase phase, Step step) {
/** /**
* Returns all phases. * Returns all phases.
* *
* @return Iterable<Phase> containing all phases * @return {@code Iterable<Phase>} containing all phases
*/ */
public Iterable<Phase> getPhases() { public Iterable<Phase> getPhases() {
return EnumSet.allOf(Phase.class); return EnumSet.allOf(Phase.class);
@ -189,7 +189,7 @@ public Iterable<Phase> getPhases() {
* Returns all steps within a phase. * Returns all steps within a phase.
* *
* @param phase Phase to get * @param phase Phase to get
* @return Iterable<Step> all steps * @return {@code Iterable<Step>} all steps
*/ */
public Iterable<Step> getSteps(Phase phase) { public Iterable<Step> getSteps(Phase phase) {
return new TreeSet<Step>(phases.get(phase).steps.keySet()); return new TreeSet<Step>(phases.get(phase).steps.keySet());

View File

@ -47,22 +47,22 @@
/** /**
* The interface to the top metrics. * The interface to the top metrics.
* <p/> * <p>
* Metrics are collected by a custom audit logger, {@link org.apache.hadoop * Metrics are collected by a custom audit logger, {@link org.apache.hadoop
* .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to
* increment per-operation, per-user counts on every audit log call. These * increment per-operation, per-user counts on every audit log call. These
* counts are used to show the top users by NameNode operation as well as * counts are used to show the top users by NameNode operation as well as
* across all operations. * across all operations.
* <p/> * <p>
* TopMetrics maintains these counts for a configurable number of time * TopMetrics maintains these counts for a configurable number of time
* intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a
* RollingWindowManager. * RollingWindowManager.
* <p/> * <p>
* These metrics are published as a JSON string via {@link org.apache.hadoop * These metrics are published as a JSON string via {@link org.apache.hadoop
* .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is
* done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window
* .RollingWindowManager#snapshot} on each RollingWindowManager. * .RollingWindowManager#snapshot} on each RollingWindowManager.
* <p/> * <p>
* Thread-safe: relies on thread-safety of RollingWindowManager * Thread-safe: relies on thread-safety of RollingWindowManager
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@ -119,6 +119,13 @@ public List<TopWindow> getTopWindows() {
* log file. This is to be consistent when {@link TopMetrics} is charged with * log file. This is to be consistent when {@link TopMetrics} is charged with
* data read back from log files instead of being invoked directly by the * data read back from log files instead of being invoked directly by the
* FsNamesystem * FsNamesystem
* @param succeeded
* @param userName
* @param addr
* @param cmd
* @param src
* @param dst
* @param status
*/ */
public void report(boolean succeeded, String userName, InetAddress addr, public void report(boolean succeeded, String userName, InetAddress addr,
String cmd, String src, String dst, FileStatus status) { String cmd, String src, String dst, FileStatus status) {
@ -147,6 +154,8 @@ public void report(long currTime, String userName, String cmd) {
* {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by
* external metrics systems. Each metrics record added corresponds to the * external metrics systems. Each metrics record added corresponds to the
* reporting period a.k.a window length of the configured rolling windows. * reporting period a.k.a window length of the configured rolling windows.
* @param collector
* @param all
*/ */
@Override @Override
public void getMetrics(MetricsCollector collector, boolean all) { public void getMetrics(MetricsCollector collector, boolean all) {

View File

@ -29,23 +29,24 @@
* Events are reported based on occurrence time. The total number of events in * Events are reported based on occurrence time. The total number of events in
* the last period covered by the rolling window can be retrieved by the * the last period covered by the rolling window can be retrieved by the
* {@link #getSum(long)} method. * {@link #getSum(long)} method.
* <p/> * <p>
* *
* Assumptions: * Assumptions:
* <p/> * <p>
* *
* (1) Concurrent invocation of {@link #incAt} method are possible * (1) Concurrent invocation of {@link #incAt} method are possible
* <p/> * <p>
* *
* (2) The time parameter of two consecutive invocation of {@link #incAt} could * (2) The time parameter of two consecutive invocation of {@link #incAt} could
* be in any given order * be in any given order
* <p/> * <p>
* *
* (3) The buffering delays are not more than the window length, i.e., after two * (3) The buffering delays are not more than the window length, i.e., after two
* consecutive invocation {@link #incAt(long time1, long)} and * consecutive invocation {@link #incAt(long time1, long)} and
* {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < windowLenMs. * {@link #incAt(long time2, long)}, time1 &lt; time2 || time1 - time2 &lt;
* windowLenMs.
* This assumption helps avoiding unnecessary synchronizations. * This assumption helps avoiding unnecessary synchronizations.
* <p/> * <p>
* *
* Thread-safety is built in the {@link RollingWindow.Bucket} * Thread-safety is built in the {@link RollingWindow.Bucket}
*/ */
@ -85,7 +86,7 @@ public class RollingWindow {
/** /**
* When an event occurs at the specified time, this method reflects that in * When an event occurs at the specified time, this method reflects that in
* the rolling window. * the rolling window.
* <p/> * <p>
* *
* @param time the time at which the event occurred * @param time the time at which the event occurred
* @param delta the delta that will be added to the window * @param delta the delta that will be added to the window
@ -153,6 +154,7 @@ void safeReset(long time) {
* performed. We do not need to update the {@link #updateTime} because as * performed. We do not need to update the {@link #updateTime} because as
* long as the {@link #updateTime} belongs to the current view of the * long as the {@link #updateTime} belongs to the current view of the
* rolling window, the algorithm works fine. * rolling window, the algorithm works fine.
* @param delta
*/ */
void inc(long delta) { void inc(long delta) {
value.addAndGet(delta); value.addAndGet(delta);
@ -161,7 +163,7 @@ void inc(long delta) {
/** /**
* Get value represented by this window at the specified time * Get value represented by this window at the specified time
* <p/> * <p>
* *
* If time lags behind the latest update time, the new updates are still * If time lags behind the latest update time, the new updates are still
* included in the sum * included in the sum

View File

@ -38,7 +38,7 @@
* A class to manage the set of {@link RollingWindow}s. This class is the * A class to manage the set of {@link RollingWindow}s. This class is the
* interface of metrics system to the {@link RollingWindow}s to retrieve the * interface of metrics system to the {@link RollingWindow}s to retrieve the
* current top metrics. * current top metrics.
* <p/> * <p>
* Thread-safety is provided by each {@link RollingWindow} being thread-safe as * Thread-safety is provided by each {@link RollingWindow} being thread-safe as
* well as {@link ConcurrentHashMap} for the collection of them. * well as {@link ConcurrentHashMap} for the collection of them.
*/ */

View File

@ -30,11 +30,12 @@
* *
* Upon receiving this command, this DataNode pass the array of block movement * Upon receiving this command, this DataNode pass the array of block movement
* details to * details to
* {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker} * {@link org.apache.hadoop.hdfs.server.sps.ExternalSPSBlockMoveTaskHandler}
* service. Later, StoragePolicySatisfyWorker will schedule block movement tasks * service. Later, ExternalSPSBlockMoveTaskHandler will schedule block movement
* for these blocks and monitors the completion of each task. After the block * tasks for these blocks and monitors the completion of each task. After the
* movement attempt is finished(with success or failure) this DataNode will send * block movement attempt is finished(with success or failure) this DataNode
* response back to NameNode about the block movement attempt finished details. * will send response back to NameNode about the block movement attempt
* finished details.
*/ */
public class BlockStorageMovementCommand extends DatanodeCommand { public class BlockStorageMovementCommand extends DatanodeCommand {
private final String blockPoolId; private final String blockPoolId;

View File

@ -158,7 +158,7 @@ public DatanodeCommand blockReport(DatanodeRegistration registration,
* {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)}, * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)},
* which is used to communicated blocks stored on disk. * which is used to communicated blocks stored on disk.
* *
* @param The datanode registration. * @param registration The datanode registration.
* @param poolId The block pool ID for the blocks. * @param poolId The block pool ID for the blocks.
* @param blockIds A list of block IDs. * @param blockIds A list of block IDs.
* @return The DatanodeCommand. * @return The DatanodeCommand.

View File

@ -73,7 +73,7 @@ public interface NamenodeProtocol {
* @param datanode a data node * @param datanode a data node
* @param size requested size * @param size requested size
* @param minBlockSize each block should be of this minimum Block Size * @param minBlockSize each block should be of this minimum Block Size
* @return a list of blocks & their locations * @return BlocksWithLocations a list of blocks &amp; their locations
* @throws IOException if size is less than or equal to 0 or * @throws IOException if size is less than or equal to 0 or
datanode does not exist datanode does not exist
*/ */
@ -183,7 +183,8 @@ public void endCheckpoint(NamenodeRegistration registration,
/** /**
* Return a structure containing details about all edit logs * Return a structure containing details about all edit logs
* available to be fetched from the NameNode. * available to be fetched from the NameNode.
* @param sinceTxId return only logs that contain transactions >= sinceTxId * @param sinceTxId return only logs that contain transactions {@literal >=}
* sinceTxId
*/ */
@Idempotent @Idempotent
public RemoteEditLogManifest getEditLogManifest(long sinceTxId) public RemoteEditLogManifest getEditLogManifest(long sinceTxId)

View File

@ -36,8 +36,10 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;

View File

@ -50,14 +50,17 @@
* <p>The tool scans all files and directories, starting from an indicated * <p>The tool scans all files and directories, starting from an indicated
* root path. The following abnormal conditions are detected and handled:</p> * root path. The following abnormal conditions are detected and handled:</p>
* <ul> * <ul>
* <li>files with blocks that are completely missing from all datanodes.<br/> * <li>files with blocks that are completely missing from all datanodes.<br>
* In this case the tool can perform one of the following actions: * In this case the tool can perform one of the following actions:
* <ul> * <ul>
* <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
* <li>move corrupted files to /lost+found directory on DFS * <li>move corrupted files to /lost+found directory on DFS
* ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doMove}).
* block chains, representing longest consecutive series of valid blocks.</li> * Remaining data blocks are saved as a
* <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li> * block chains, representing longest consecutive series of valid blocks.
* </li>
* <li>delete corrupted files
* ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doDelete})
* </li>
* </ul> * </ul>
* </li> * </li>
* <li>detect files with under-replicated or over-replicated blocks</li> * <li>detect files with under-replicated or over-replicated blocks</li>

View File

@ -121,8 +121,8 @@ public static Options buildOptions() {
/** Process an edit log using the chosen processor or visitor. /** Process an edit log using the chosen processor or visitor.
* *
* @param inputFilename The file to process * @param inputFileName The file to process
* @param outputFilename The output file name * @param outputFileName The output file name
* @param processor If visitor is null, the processor to use * @param processor If visitor is null, the processor to use
* @param visitor If non-null, the visitor to use. * @param visitor If non-null, the visitor to use.
* *

View File

@ -52,7 +52,7 @@ abstract public interface OfflineEditsVisitor {
* Begin visiting an element that encloses another element, such as * Begin visiting an element that encloses another element, such as
* the beginning of the list of blocks that comprise a file. * the beginning of the list of blocks that comprise a file.
* *
* @param value Token being visited * @param op Token being visited
*/ */
abstract void visitOp(FSEditLogOp op) abstract void visitOp(FSEditLogOp op)
throws IOException; throws IOException;

View File

@ -50,9 +50,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor {
* Create a processor that writes to the file named and may or may not * Create a processor that writes to the file named and may or may not
* also output to the screen, as specified. * also output to the screen, as specified.
* *
* @param filename Name of file to write output to * @param out Name of file to write output to
* @param tokenizer Input tokenizer
* @param printToScreen Mirror output to screen?
*/ */
public StatisticsEditsVisitor(OutputStream out) throws IOException { public StatisticsEditsVisitor(OutputStream out) throws IOException {
this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8)); this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8));

View File

@ -26,11 +26,13 @@
/** /**
* File name distribution visitor. * File name distribution visitor.
* <p> * <p>
* It analyzes file names in fsimage and prints the following information: * It analyzes file names in fsimage and prints the following information:
* <ul>
* <li>Number of unique file names</li> * <li>Number of unique file names</li>
* <li>Number file names and the corresponding number range of files that use * <li>Number file names and the corresponding number range of files that use
* these same names</li> * these same names</li>
* <li>Heap saved if the file name objects are reused</li> * <li>Heap saved if the file name objects are reused</li>
* </ul>
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class NameDistributionVisitor extends TextWriterImageVisitor { public class NameDistributionVisitor extends TextWriterImageVisitor {

View File

@ -470,23 +470,23 @@ public List<E> apply2Current(final List<E> current) {
* <pre> * <pre>
* 1. For (c, 0) in the posterior diff, check the element in this diff: * 1. For (c, 0) in the posterior diff, check the element in this diff:
* 1.1 (c', 0) in this diff: impossible * 1.1 (c', 0) in this diff: impossible
* 1.2 (0, d') in this diff: put in c-list --> (c, d') * 1.2 (0, d') in this diff: put in c-list --&gt; (c, d')
* 1.3 (c', d') in this diff: impossible * 1.3 (c', d') in this diff: impossible
* 1.4 (0, 0) in this diff: put in c-list --> (c, 0) * 1.4 (0, 0) in this diff: put in c-list --&gt; (c, 0)
* This is the same logic as create(E). * This is the same logic as create(E).
* *
* 2. For (0, d) in the posterior diff, * 2. For (0, d) in the posterior diff,
* 2.1 (c', 0) in this diff: remove from c-list --> (0, 0) * 2.1 (c', 0) in this diff: remove from c-list --&gt; (0, 0)
* 2.2 (0, d') in this diff: impossible * 2.2 (0, d') in this diff: impossible
* 2.3 (c', d') in this diff: remove from c-list --> (0, d') * 2.3 (c', d') in this diff: remove from c-list --&gt; (0, d')
* 2.4 (0, 0) in this diff: put in d-list --> (0, d) * 2.4 (0, 0) in this diff: put in d-list --&gt; (0, d)
* This is the same logic as delete(E). * This is the same logic as delete(E).
* *
* 3. For (c, d) in the posterior diff, * 3. For (c, d) in the posterior diff,
* 3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0) * 3.1 (c', 0) in this diff: replace the element in c-list --&gt; (c, 0)
* 3.2 (0, d') in this diff: impossible * 3.2 (0, d') in this diff: impossible
* 3.3 (c', d') in this diff: replace the element in c-list --> (c, d') * 3.3 (c', d') in this diff: replace the element in c-list --&gt; (c, d')
* 3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d) * 3.4 (0, 0) in this diff: put in c-list and d-list --&gt; (c, d)
* This is the same logic as modify(E, E). * This is the same logic as modify(E, E).
* </pre> * </pre>
* *

View File

@ -116,8 +116,8 @@ private static String codePointToEntityRef(int cp) {
* *
* There are three kinds of code points in XML: * There are three kinds of code points in XML:
* - Those that can be represented normally, * - Those that can be represented normally,
* - Those that have to be escaped (for example, & must be represented * - Those that have to be escaped (for example, &amp; must be represented
* as &amp;) * as {@literal &amp;})
* - Those that cannot be represented at all in XML. * - Those that cannot be represented at all in XML.
* *
* The built-in SAX functions will handle the first two types for us just * The built-in SAX functions will handle the first two types for us just