diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 6dd366f650..8627268d9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -160,7 +160,8 @@ public int compare(DatanodeInfo a, DatanodeInfo b) { /** * Comparator for sorting DataNodeInfo[] based on * stale, decommissioned and entering_maintenance states. - * Order: live -> stale -> entering_maintenance -> decommissioned + * Order: live {@literal ->} stale {@literal ->} entering_maintenance + * {@literal ->} decommissioned */ @InterfaceAudience.Private public static class ServiceAndStaleComparator extends ServiceComparator { @@ -390,7 +391,8 @@ public static String addKeySuffixes(String key, String... suffixes) { * @param conf Configuration * @param nsId the nameservice whose NNs addresses we want. * @param defaultValue default address to return in case key is not found. - * @return A map from nnId -> RPC address of each NN in the nameservice. + * @return A map from nnId {@literal ->} RPC address of each NN in the + * nameservice. */ public static Map getRpcAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue) { @@ -1289,7 +1291,8 @@ static URI trimUri(URI uri) { * @param conf configuration * @param protocol Protocol interface * @param service service that implements the protocol - * @param server RPC server to which the protocol & implementation is added to + * @param server RPC server to which the protocol & implementation is + * added to * @throws IOException */ public static void addPBProtocol(Configuration conf, Class protocol, @@ -1357,7 +1360,8 @@ public static String getNamenodeWebAddr(final Configuration conf, String nsId, * @param conf Configuration * @param nsId the nameservice whose NNs addresses we want. * @param defaultValue default address to return in case key is not found. - * @return A map from nnId -> Web address of each NN in the nameservice. + * @return A map from nnId {@literal ->} Web address of each NN in the + * nameservice. */ public static Map getWebAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index 8f482e3531..77e40b4389 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -201,7 +201,7 @@ public static Builder builder(int maxDataLength) { /** * Very efficient encoding of the block report into a ByteString to avoid * the overhead of protobuf repeating fields. Primitive repeating fields - * require re-allocs of an ArrayList and the associated (un)boxing + * require re-allocs of an ArrayList<Long> and the associated (un)boxing * overhead which puts pressure on GC. * * The structure of the buffer is as follows: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index 2ad19da096..61e8fa375c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -168,7 +168,7 @@ public JournalResponseProto journal(RpcController unused, return VOID_JOURNAL_RESPONSE; } - /** @see JournalProtocol#heartbeat */ + /** @see QJournalProtocol#heartbeat */ @Override public HeartbeatResponseProto heartbeat(RpcController controller, HeartbeatRequestProto req) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index 3b2e8d2ddd..52bc52da21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -220,7 +220,7 @@ public synchronized void addKeys(ExportedBlockKeys exportedKeys) } /** - * Update block keys if update time > update interval. + * Update block keys if update time {@literal >} update interval. * @return true if the keys are updated. */ public synchronized boolean updateKeys(final long updateTime) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index a58e391b01..d21d13cd9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -78,7 +78,7 @@ *

SYNOPSIS *

  * To start:
- *      bin/start-balancer.sh [-threshold ]
+ *      bin/start-balancer.sh [-threshold {@literal }]
  *      Example: bin/ start-balancer.sh 
  *                     start the balancer with a default threshold of 10%
  *               bin/ start-balancer.sh -threshold 5
@@ -113,13 +113,14 @@
  * 

A system property that limits the balancer's use of bandwidth is * defined in the default configuration file: *

- * 
- *   dfs.datanode.balance.bandwidthPerSec
- *   1048576
- *   Specifies the maximum bandwidth that each datanode 
+ * <property>
+ *   <name>dfs.datanode.balance.bandwidthPerSec</name>
+ *   <value>1048576</value>
+ * <description>  Specifies the maximum bandwidth that each datanode
  * can utilize for the balancing purpose in term of the number of bytes 
- * per second. 
- * 
+ * per second.
+ * </description>
+ * </property>
  * 
* *

This property determines the maximum speed at which a block will be diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 5e14247a7f..d74b523981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -143,12 +143,13 @@ * If any of the replica is in maintenance mode, the safety property * is extended as follows. These property still apply for the case of zero * maintenance replicas, thus we can use these safe property for all scenarios. - * a. # of live replicas >= # of min replication for maintenance. - * b. # of live replicas <= # of expected redundancy. - * c. # of live replicas and maintenance replicas >= # of expected redundancy. + * a. # of live replicas >= # of min replication for maintenance. + * b. # of live replicas <= # of expected redundancy. + * c. # of live replicas and maintenance replicas >= # of expected + * redundancy. * * For regular replication, # of min live replicas for maintenance is determined - * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <= + * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <= * DFS_NAMENODE_REPLICATION_MIN_KEY. * For erasure encoding, # of min live replicas for maintenance is * BlockInfoStriped#getRealDataBlockNum. @@ -305,7 +306,7 @@ public long getTotalECBlockGroups() { private final double storageInfoDefragmentRatio; /** - * Mapping: Block -> { BlockCollection, datanodes, self ref } + * Mapping: Block {@literal ->} { BlockCollection, datanodes, self ref } * Updated only in response to client-sent information. */ final BlocksMap blocksMap; @@ -321,7 +322,9 @@ public long getTotalECBlockGroups() { private final BlockReportProcessingThread blockReportThread = new BlockReportProcessingThread(); - /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ + /** + * Store blocks {@literal ->} datanodedescriptor(s) map of corrupt replicas. + */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** @@ -2105,7 +2108,7 @@ public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src, * Choose target datanodes for creating a new block. * * @throws IOException - * if the number of targets < minimum replication. + * if the number of targets {@literal <} minimum replication. * @see BlockPlacementPolicy#chooseTarget(String, int, Node, * Set, long, List, BlockStoragePolicy, EnumSet) */ @@ -2487,7 +2490,8 @@ private static class BlockInfoToAdd { /** * The given storage is reporting all its blocks. - * Update the (storage-->block list) and (block-->storage list) maps. + * Update the (storage{@literal -->}block list) and + * (block{@literal -->}storage list) maps. * * @return true if all known storages of the given DN have finished reporting. * @throws IOException @@ -3777,8 +3781,8 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, } /** - * Modify (block-->datanode) map. Possibly generate replication tasks, if the - * removed block is still valid. + * Modify (block{@literal -->}datanode) map. Possibly generate replication + * tasks, if the removed block is still valid. */ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node); @@ -4341,7 +4345,7 @@ public void checkRedundancy(BlockCollection bc) { } /** - * Get blocks to invalidate for nodeId + * Get blocks to invalidate for {@code nodeId} * in {@link #invalidateBlocks}. * * @return number of blocks scheduled for removal during this iteration. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 23e3e40486..897bf69471 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -152,7 +152,6 @@ protected abstract void initialize(Configuration conf, FSClusterStats stats, /** * Check if the move is allowed. Used by balancer and other tools. - * @ * * @param candidates all replicas including source and target * @param source source replica of the move diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java index d6a0972064..d607789420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java @@ -47,13 +47,11 @@ /** * This class manages datanode configuration using a json file. * Please refer to {@link CombinedHostsFileReader} for the json format. - *

- *

+ *

* Entries may or may not specify a port. If they don't, we consider * them to apply to every DataNode on that host. The code canonicalizes the * entries into IP addresses. - *

- *

+ *

* The code ignores all entries that the DNS fails to resolve their IP * addresses. This is okay because by default the NN rejects the registrations * of DNs when it fails to do a forward and reverse lookup. Note that DNS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index fe1224cdb0..fc31584ae6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -38,7 +38,7 @@ * corrupt. While reporting replicas of a Block, we hide any corrupt * copies. These copies are removed once Block is found to have * expected number of good replicas. - * Mapping: Block -> TreeSet + * Mapping: Block {@literal -> TreeSet} */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index a1dff08b5d..abc0f7c331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -57,7 +57,7 @@ * Manages decommissioning and maintenance state for DataNodes. A background * monitor thread periodically checks the status of DataNodes that are * decommissioning or entering maintenance state. - *

+ *

* A DataNode can be decommissioned in a few situations: *

    *
  • If a DN is dead, it is decommissioned immediately.
  • @@ -72,11 +72,11 @@ * determine if they can be DECOMMISSIONED. The monitor also prunes this list * as blocks become replicated, so monitor scans will become more efficient * over time. - *

    + *

    * DECOMMISSION_INPROGRESS nodes that become dead do not progress to * DECOMMISSIONED until they become live again. This prevents potential * durability loss for singly-replicated blocks (see HDFS-6791). - *

    + *

    * DataNodes can also be put under maintenance state for any short duration * maintenance operations. Unlike decommissioning, blocks are not always * re-replicated for the DataNodes to enter maintenance state. When the @@ -88,7 +88,7 @@ * of maintenance expiry time. When DataNodes don't transition or join the * cluster back by expiry time, blocks are re-replicated just as in * decommissioning case as to avoid read or write performance degradation. - *

    + *

    * This class depends on the FSNamesystem lock for synchronization. */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index b7bf6747b1..4ead0ba6f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -33,17 +33,16 @@ /** * This class manages the include and exclude files for HDFS. - *

    + *

    * These files control which DataNodes the NameNode expects to see in the * cluster. Loosely speaking, the include file, if it exists and is not * empty, is a list of everything we expect to see. The exclude file is * a list of everything we want to ignore if we do see it. - *

    + *

    * Entries may or may not specify a port. If they don't, we consider * them to apply to every DataNode on that host. The code canonicalizes the * entries into IP addresses. - *

    - *

    + *

    * The code ignores all entries that the DNS fails to resolve their IP * addresses. This is okay because by default the NN rejects the registrations * of DNs when it fails to do a forward and reverse lookup. Note that DNS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java index 958557b4f8..cf7cfac95a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java @@ -35,9 +35,9 @@ /** * The HostSet allows efficient queries on matching wildcard addresses. - *

    + *

    * For InetSocketAddress A and B with the same host address, - * we define a partial order between A and B, A <= B iff A.getPort() == B + * we define a partial order between A and B, A <= B iff A.getPort() == B * .getPort() || B.getPort() == 0. */ public class HostSet implements Iterable { @@ -46,7 +46,7 @@ public class HostSet implements Iterable { /** * The function that checks whether there exists an entry foo in the set - * so that foo <= addr. + * so that foo <= addr. */ boolean matchedBy(InetSocketAddress addr) { Collection ports = addrs.get(addr.getAddress()); @@ -56,7 +56,7 @@ boolean matchedBy(InetSocketAddress addr) { /** * The function that checks whether there exists an entry foo in the set - * so that addr <= foo. + * so that addr <= foo. */ boolean match(InetSocketAddress addr) { int port = addr.getPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java index 22983ea1b6..03a6918152 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports; import org.apache.hadoop.util.Timer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,7 +83,7 @@ public class SlowPeerTracker { /** * Information about peers that have reported a node as being slow. - * Each outer map entry is a map of (DatanodeId) -> (timestamp), + * Each outer map entry is a map of (DatanodeId) {@literal ->} (timestamp), * mapping reporting nodes to the timestamp of the last report from * that node. * @@ -146,7 +147,7 @@ public Set getReportsForNode(String slowNode) { /** * Retrieve all reports for all nodes. Stale reports are excluded. * - * @return map from SlowNodeId -> (set of nodes reporting peers). + * @return map from SlowNodeId {@literal ->} (set of nodes reporting peers). */ public Map> getReportsForAllDataNodes() { if (allReports.isEmpty()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 4fc47d8e48..539baf114d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -56,9 +56,9 @@ * block pool id, on this DataNode. * * This class supports the following functionality: - *

      + *
        *
      • Formatting a new block pool storage
      • - *
      • Recovering a storage state to a consistent state (if possible>
      • + *
      • Recovering a storage state to a consistent state (if possible)
      • *
      • Taking a snapshot of the block pool during upgrade
      • *
      • Rolling back a block pool to a previous snapshot
      • *
      • Finalizing block storage by deletion of a snapshot
      • @@ -139,11 +139,12 @@ public void addStorageDir(StorageDirectory sd) { /** * Load one storage directory. Recover from previous transitions if required. - * - * @param nsInfo namespace information - * @param dataDir the root path of the storage directory - * @param startOpt startup option - * @return the StorageDirectory successfully loaded. + * @param nsInfo namespace information + * @param location the root path of the storage directory + * @param startOpt startup option + * @param callables list of callable storage directory + * @param conf configuration + * @return * @throws IOException */ private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo, @@ -205,8 +206,10 @@ private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo, * data volume. * * @param nsInfo namespace information - * @param dataDirs storage directories of block pool + * @param location storage directories of block pool * @param startOpt startup option + * @param callables list of callable storage directory + * @param conf configuration * @return an array of loaded block pool directories. * @throws IOException on error */ @@ -240,8 +243,10 @@ List loadBpStorageDirectories(NamespaceInfo nsInfo, * data volume. * * @param nsInfo namespace information - * @param dataDirs storage directories of block pool + * @param location storage directories of block pool * @param startOpt startup option + * @param callables list of callable storage directory + * @param conf configuration * @throws IOException on error */ List recoverTransitionRead(NamespaceInfo nsInfo, @@ -348,13 +353,18 @@ protected void setFieldsFromProperties(Properties props, StorageDirectory sd) * Analyze whether a transition of the BP state is required and * perform it if necessary. *
        - * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime. - * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular - * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime + * Rollback if: + * previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime. + * Upgrade if: + * this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime + * Regular startup if: + * this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime * - * @param sd storage directory /current/ + * @param sd storage directory @{literal /current/} * @param nsInfo namespace info * @param startOpt startup option + * @param callables list of callable storage directory + * @param conf configuration * @return true if the new properties has been written. */ private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo, @@ -416,20 +426,20 @@ private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo, } /** - * Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23 + * Upgrade to any release after 0.22 (0.22 included) release + * e.g. 0.22 => 0.23 * Upgrade procedure is as follows: *
          - *
        1. If /current//previous exists then delete it
        2. - *
        3. Rename /current//current to - * /current/bpid/current/previous.tmp
        4. - *
        5. Create new current//current directory
        6. - *
            + *
          1. If {@literal /current//previous} exists then delete it
          2. + *
          3. Rename {@literal /current//current} to + * {@literal /current/bpid/current/previous.tmp}
          4. + *
          5. Create new {@literal current//current} directory
          6. *
          7. Hard links for block files are created from previous.tmp to current
          8. *
          9. Save new version file in current directory
          10. + *
          11. Rename previous.tmp to previous
          12. *
          - *
        7. Rename previous.tmp to previous
        * - * @param bpSd storage directory /current/ + * @param bpSd storage directory {@literal /current/} * @param nsInfo Namespace Info from the namenode * @throws IOException on error */ @@ -777,12 +787,12 @@ private String getTrashDirectory(File blockFile) { } /** - * Get a target subdirectory under current/ for a given block file that is being - * restored from trash. + * Get a target subdirectory under current/ for a given block file that is + * being restored from trash. * * The subdirectory structure under trash/ mirrors that under current/ to keep * implicit memory of where the files are to be restored. - * + * @param blockFile block file that is being restored from trash. * @return the target directory to restore a previously deleted block file. */ @VisibleForTesting @@ -847,6 +857,7 @@ public boolean trashEnabled() { /** * Create a rolling upgrade marker file for each BP storage root, if it * does not exist already. + * @param dnStorageDirs */ public void setRollingUpgradeMarkers(List dnStorageDirs) throws IOException { @@ -872,6 +883,7 @@ public void setRollingUpgradeMarkers(List dnStorageDirs) * Check whether the rolling upgrade marker file exists for each BP storage * root. If it does exist, then the marker file is cleared and more * importantly the layout upgrade is finalized. + * @param dnStorageDirs */ public void clearRollingUpgradeMarkers(List dnStorageDirs) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java index 34f6c33003..fe0c7f7555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java @@ -338,19 +338,24 @@ void syncBlock(List syncList) throws IOException { /** * blk_0 blk_1 blk_2 blk_3 blk_4 blk_5 blk_6 blk_7 blk_8 - * 64k 64k 64k 64k 64k 64k 64k 64k 64k <-- stripe_0 + * 64k 64k 64k 64k 64k 64k 64k 64k 64k <-- + * stripe_0 * 64k 64k 64k 64k 64k 64k 64k 64k 64k - * 64k 64k 64k 64k 64k 64k 64k 61k <-- startStripeIdx + * 64k 64k 64k 64k 64k 64k 64k 61k <-- + * startStripeIdx * 64k 64k 64k 64k 64k 64k 64k * 64k 64k 64k 64k 64k 64k 59k * 64k 64k 64k 64k 64k 64k - * 64k 64k 64k 64k 64k 64k <-- last full stripe - * 64k 64k 13k 64k 55k 3k <-- target last stripe + * 64k 64k 64k 64k 64k 64k <-- + * last full stripe + * 64k 64k 13k 64k 55k 3k <-- + * target last stripe * 64k 64k 64k 1k * 64k 64k 58k * 64k 64k * 64k 19k - * 64k <-- total visible stripe + * 64k <-- + * total visible stripe * * Due to different speed of streamers, the internal blocks in a block group * could have different lengths when the block group isn't ended normally. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java index 80818957a8..6b1b96fb02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java @@ -189,7 +189,7 @@ public BlockScanner(DataNode datanode, Configuration conf) { } /** - * Returns true if the block scanner is enabled.

        + * Returns true if the block scanner is enabled. * * If the block scanner is disabled, no volume scanners will be created, and * no threads will start. @@ -234,7 +234,7 @@ public synchronized void addVolumeScanner(FsVolumeReference ref) { } /** - * Stops and removes a volume scanner.

        + * Stops and removes a volume scanner. * * This function will block until the volume scanner has stopped. * @@ -260,7 +260,7 @@ public synchronized void removeVolumeScanner(FsVolumeSpi volume) { } /** - * Stops and removes all volume scanners.

        + * Stops and removes all volume scanners. * * This function will block until all the volume scanners have stopped. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 40f80a99c3..99c0a876d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -242,7 +242,7 @@ * DataNodes. * * The DataNode maintains just one critical table: - * block-> stream of bytes (of BLOCK_SIZE or less) + * block{@literal ->} stream of bytes (of BLOCK_SIZE or less) * * This info is stored on a local disk. The DataNode * reports the table's contents to the NameNode upon startup @@ -527,7 +527,7 @@ protected Configuration getNewConf() { } /** - * {@inheritdoc}. + * {@inheritDoc }. */ @Override public String reconfigurePropertyImpl(String property, String newVal) @@ -2713,7 +2713,8 @@ public static List getStorageLocations(Configuration conf) { return locations; } - /** Instantiate & Start a single datanode daemon and wait for it to finish. + /** Instantiate & Start a single datanode daemon and wait for it to + * finish. * If this thread is specifically interrupted, it will stop waiting. */ @VisibleForTesting @@ -2722,7 +2723,8 @@ public static DataNode createDataNode(String args[], return createDataNode(args, conf, null); } - /** Instantiate & Start a single datanode daemon and wait for it to finish. + /** Instantiate & Start a single datanode daemon and wait for it to + * finish. * If this thread is specifically interrupted, it will stop waiting. */ @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index a85ae32fad..a803c0b0b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -215,7 +215,9 @@ public String getTrashDirectoryForReplica(String bpid, ReplicaInfo info) { /** * VolumeBuilder holds the metadata (e.g., the storage directories) of the - * prepared volume returned from {@link prepareVolume()}. Calling {@link build()} + * prepared volume returned from + * {@link #prepareVolume(DataNode, StorageLocation, List)}. + * Calling {@link VolumeBuilder#build()} * to add the metadata to {@link DataStorage} so that this prepared volume can * be active. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java index 7ae9e455d3..445e021204 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java @@ -280,7 +280,6 @@ public String toString() { /** * Create a new directory scanner, but don't cycle it running yet. * - * @param datanode the parent datanode * @param dataset the dataset to scan * @param conf the Configuration object */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java index b8e08d0142..6349062a40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java @@ -178,7 +178,6 @@ public void dirSync(@Nullable FsVolumeSpi volume, File dir) * Call sync_file_range on the given file descriptor. * * @param volume target volume. null if unavailable. - * @throws IOException */ public void syncFileRange( @Nullable FsVolumeSpi volume, FileDescriptor outFd, @@ -198,7 +197,6 @@ public void syncFileRange( * Call posix_fadvise on the given file descriptor. * * @param volume target volume. null if unavailable. - * @throws IOException */ public void posixFadvise( @Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd, @@ -394,7 +392,6 @@ public FileOutputStream getFileOutputStream( * @param volume target volume. null if unavailable. * @param fd File descriptor object. * @return FileOutputStream to the given file object. - * @throws FileNotFoundException */ public FileOutputStream getFileOutputStream( @Nullable FsVolumeSpi volume, FileDescriptor fd) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index 181ef80166..e0afb9b1f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -45,8 +45,8 @@ import org.slf4j.LoggerFactory; /** - * VolumeScanner scans a single volume. Each VolumeScanner has its own thread.

        - * They are all managed by the DataNode's BlockScanner. + * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. + *

        They are all managed by the DataNode's BlockScanner. */ public class VolumeScanner extends Thread { public static final Logger LOG = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java index 2719f71617..ec2b6561ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java @@ -24,7 +24,10 @@ import com.google.common.annotations.GwtCompatible; import com.google.common.base.Preconditions; import static com.google.common.base.Preconditions.checkNotNull; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import com.google.common.util.concurrent.Uninterruptibles; import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater .newUpdater; @@ -52,9 +55,7 @@ * {@link ListeningExecutorService}, and deriving a {@code Future} from an * existing one, typically using methods like {@link Futures#transform * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and {@link Futures#catching(ListenableFuture, Class, - * com.google.common.base.Function, java.util.concurrent.Executor) - * Futures.catching}. + * and its overloaded versions. *

        *

        This class implements all methods in {@code ListenableFuture}. * Subclasses should provide a way to set the result of the computation @@ -1265,12 +1266,6 @@ private static CancellationException cancellationExceptionWithCause( * r.run(); * } * }}

- *

- *

This should be preferred to {@link #newDirectExecutorService()} - * because implementing the {@link ExecutorService} subinterface - * necessitates significant performance overhead. - * - * @since 18.0 */ public static Executor directExecutor() { return DirectExecutor.INSTANCE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index f4bf8398ca..bd9ed7f119 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -192,7 +192,7 @@ public void close() throws IOException { FsVolumeReferences getFsVolumeReferences(); /** - * Add a new volume to the FsDataset.

+ * Add a new volume to the FsDataset. * * If the FSDataset supports block scanning, this function registers * the new volume with the block scanner. @@ -226,7 +226,7 @@ StorageReport[] getStorageReports(String bpid) /** @return the volume that contains a replica of the block. */ V getVolume(ExtendedBlock b); - /** @return a volume information map (name => info). */ + /** @return a volume information map (name {@literal =>} info). */ Map getVolumeInfoMap(); /** @@ -273,7 +273,8 @@ LengthInputStream getMetaDataInputStream(ExtendedBlock b /** * Get reference to the replica meta info in the replicasMap. - * To be called from methods that are synchronized on {@link FSDataset} + * To be called from methods that are synchronized on + * implementations of {@link FsDatasetSpi} * @return replica from the replicas map */ @Deprecated @@ -394,7 +395,7 @@ Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen * Finalizes the block previously opened for writing using writeToBlock. * The block size is what is in the parameter b and it must match the amount * of data written - * @param block Block to be finalized + * @param b Block to be finalized * @param fsyncDir whether to sync the directory changes to durable device. * @throws IOException * @throws ReplicaNotFoundException if the replica can not be found when the @@ -488,14 +489,13 @@ void checkBlock(ExtendedBlock b, long minLength, ReplicaState state) /** * Determine if the specified block is cached. * @param bpid Block pool id - * @param blockIds - block id + * @param blockId - block id * @return true if the block is cached */ boolean isCached(String bpid, long blockId); /** * Check if all the data directories are healthy - * @return A set of unhealthy data directories. * @param failedVolumes */ void handleVolumeFailures(Set failedVolumes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java index 7329ba38e4..be978d75e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java @@ -98,17 +98,17 @@ public interface FsVolumeSpi /** * BlockIterator will return ExtendedBlock entries from a block pool in - * this volume. The entries will be returned in sorted order.

+ * this volume. The entries will be returned in sorted order.

* * BlockIterator objects themselves do not always have internal * synchronization, so they can only safely be used by a single thread at a - * time.

+ * time.

* * Closing the iterator does not save it. You must call save to save it. */ interface BlockIterator extends Closeable { /** - * Get the next block.

+ * Get the next block.

* * Note that this block may be removed in between the time we list it, * and the time the caller tries to use it, or it may represent a stale @@ -146,7 +146,7 @@ interface BlockIterator extends Closeable { void save() throws IOException; /** - * Set the maximum staleness of entries that we will return.

+ * Set the maximum staleness of entries that we will return.

* * A maximum staleness of 0 means we will never return stale entries; a * larger value will allow us to reduce resource consumption in exchange @@ -211,12 +211,12 @@ interface BlockIterator extends Closeable { * Because millions of these structures may be created, we try to save * memory here. So instead of storing full paths, we store path suffixes. * The block file, if it exists, will have a path like this: - * / + * {@literal /} * So we don't need to store the volume path, since we already know what the * volume is. * * The metadata file, if it exists, will have a path like this: - * /_.meta + * {@literal /_.meta} * So if we have a block file, there isn't any need to store the block path * again. * @@ -439,6 +439,7 @@ byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile) * @param bpid block pool id to scan * @param report the list onto which blocks reports are placed * @param reportCompiler + * @throws InterruptedException * @throws IOException */ void compileReport(String bpid, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java index 771a17bb5d..401fc8bd71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java @@ -94,7 +94,8 @@ public OutlierDetector(long minNumResources, long lowThresholdMs) { /** * Return a set of nodes/ disks whose latency is much higher than - * their counterparts. The input is a map of (resource -> aggregate latency) + * their counterparts. The input is a map of (resource {@literal ->} aggregate + * latency) * entries. * * The aggregate may be an arithmetic mean or a percentile e.g. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java index 642cf2186d..7824f95114 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java @@ -60,7 +60,6 @@ public DiskBalancerException(String message, Result result) { /** * Constructs an {@code IOException} with the specified detail message and * cause. - *

*

Note that the detail message associated with {@code cause} is * not * automatically incorporated into this exception's detail message. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java index 8de19aa28a..1307983c80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java @@ -50,21 +50,20 @@ /** * DiskBalancerCluster represents the nodes that we are working against. - *

+ *

* Please Note : - *

* Semantics of inclusionList and exclusionLists. - *

+ *

* If a non-empty inclusionList is specified then the diskBalancer assumes that * the user is only interested in processing that list of nodes. This node list * is checked against the exclusionList and only the nodes in inclusionList but * not in exclusionList is processed. - *

+ *

* if inclusionList is empty, then we assume that all live nodes in the nodes is * to be processed by diskBalancer. In that case diskBalancer will avoid any * nodes specified in the exclusionList but will process all nodes in the * cluster. - *

+ *

* In other words, an empty inclusionList is means all the nodes otherwise * only a given list is processed and ExclusionList is always honored. */ @@ -291,7 +290,7 @@ public void createSnapshot(String snapShotName) throws IOException { /** * Compute plan takes a node and constructs a planner that creates a plan that * we would like to follow. - *

+ *

* This function creates a thread pool and executes a planner on each node * that we are supposed to plan for. Each of these planners return a NodePlan * that we can persist or schedule for execution with a diskBalancer diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java index a200f4dbe7..6cf244be94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java @@ -186,7 +186,7 @@ public int hashCode() { } /** - * returns NodeDataDensity Metric. + * Returns NodeDataDensity Metric. * * @return float */ @@ -195,8 +195,8 @@ public double getNodeDataDensity() { } /** - * computes nodes data density. - *

+ * Computes nodes data density. + * * This metric allows us to compare different nodes and how well the data is * spread across a set of volumes inside the node. */ @@ -231,8 +231,8 @@ public boolean isBalancingNeeded(double threshold) { /** * Adds a volume to the DataNode. - *

- * it is assumed that we have one thread per node hence this call is not + * + * It is assumed that we have one thread per node hence this call is not * synchronised neither is the map is protected. * * @param volume - volume diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java index fb83eebb30..568c1e62dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java @@ -34,7 +34,7 @@ /** * Greedy Planner is a simple planner that computes the largest possible move at * any point of time given a volumeSet. - *

+ *

* This is done by choosing the disks with largest amount of data above and * below the idealStorage and then a move is scheduled between them. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java index 10ecc23833..fa268c72a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java @@ -156,7 +156,7 @@ public static boolean copyINodeDefaultAcl(INode child) { * * @param inode INode to read * @param snapshotId int ID of snapshot to read - * @return List containing extended inode ACL entries + * @return {@literal List} containing extended inode ACL entries */ public static List readINodeAcl(INode inode, int snapshotId) { AclFeature f = inode.getAclFeature(snapshotId); @@ -167,7 +167,7 @@ public static List readINodeAcl(INode inode, int snapshotId) { * Reads the existing extended ACL entries of an INodeAttribute object. * * @param inodeAttr INode to read - * @return List containing extended inode ACL entries + * @return {@code List} containing extended inode ACL entries */ public static List readINodeAcl(INodeAttributes inodeAttr) { AclFeature f = inodeAttr.getAclFeature(); @@ -175,7 +175,7 @@ public static List readINodeAcl(INodeAttributes inodeAttr) { } /** - * Build list of AclEntries from the AclFeature + * Build list of AclEntries from the {@link AclFeature} * @param aclFeature AclFeature * @return List of entries */ @@ -204,7 +204,7 @@ static ImmutableList getEntriesFromAclFeature(AclFeature aclFeature) { * ACL modification APIs, which always apply a delta on top of current state. * * @param inode INode to read - * @return List containing all logical inode ACL entries + * @return {@code List} containing all logical inode ACL entries */ public static List readINodeLogicalAcl(INode inode) { FsPermission perm = inode.getFsPermission(); @@ -262,7 +262,7 @@ public static List readINodeLogicalAcl(INode inode) { * {@link AclFeature}. * * @param inode INode to update - * @param newAcl List containing new ACL entries + * @param newAcl {@code List} containing new ACL entries * @param snapshotId int latest snapshot ID of inode * @throws AclException if the ACL is invalid for the given inode * @throws QuotaExceededException if quota limit is exceeded @@ -312,8 +312,8 @@ private AclStorage() { /** * Creates an AclFeature from the given ACL entries. * - * @param accessEntries List access ACL entries - * @param defaultEntries List default ACL entries + * @param accessEntries {@code List} access ACL entries + * @param defaultEntries {@code List} default ACL entries * @return AclFeature containing the required ACL entries */ private static AclFeature createAclFeature(List accessEntries, @@ -347,7 +347,7 @@ private static AclFeature createAclFeature(List accessEntries, * POSIX ACLs model, which presents the mask as the permissions of the group * class. * - * @param accessEntries List access ACL entries + * @param accessEntries {@code List} access ACL entries * @param existingPerm FsPermission existing permissions * @return FsPermission new permissions */ @@ -365,7 +365,7 @@ private static FsPermission createFsPermissionForExtendedAcl( * group and other permissions are in order. Also preserve sticky bit and * toggle ACL bit off. * - * @param accessEntries List access ACL entries + * @param accessEntries {@code List} access ACL entries * @param existingPerm FsPermission existing permissions * @return FsPermission new permissions */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 5604a218d0..8fa9578725 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -67,7 +67,7 @@ /** * Manages the list of encryption zones in the filesystem. - *

+ *

* The EncryptionZoneManager has its own lock, but relies on the FSDirectory * lock being held for many operations. The FSDirectory lock should not be * taken if the manager lock is already held. @@ -294,7 +294,7 @@ void stopReencryptThread() { /** * Add a new encryption zone. - *

+ *

* Called while holding the FSDirectory lock. * * @param inodeId of the encryption zone @@ -308,7 +308,7 @@ void addEncryptionZone(Long inodeId, CipherSuite suite, /** * Add a new encryption zone. - *

+ *

* Does not assume that the FSDirectory lock is held. * * @param inodeId of the encryption zone @@ -326,7 +326,7 @@ void unprotectedAddEncryptionZone(Long inodeId, /** * Remove an encryption zone. - *

+ *

* Called while holding the FSDirectory lock. */ void removeEncryptionZone(Long inodeId) { @@ -344,7 +344,7 @@ void removeEncryptionZone(Long inodeId) { /** * Returns true if an IIP is within an encryption zone. - *

+ *

* Called while holding the FSDirectory lock. */ boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, @@ -355,7 +355,7 @@ boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, /** * Returns the full path from an INode id. - *

+ *

* Called while holding the FSDirectory lock. */ String getFullPathName(Long nodeId) { @@ -370,7 +370,7 @@ String getFullPathName(Long nodeId) { /** * Get the key name for an encryption zone. Returns null if iip is * not within an encryption zone. - *

+ *

* Called while holding the FSDirectory lock. */ String getKeyName(final INodesInPath iip) throws IOException { @@ -385,7 +385,7 @@ String getKeyName(final INodesInPath iip) throws IOException { /** * Looks up the EncryptionZoneInt for a path within an encryption zone. * Returns null if path is not within an EZ. - *

+ *

* Called while holding the FSDirectory lock. */ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) @@ -434,7 +434,7 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) * Looks up the nearest ancestor EncryptionZoneInt that contains the given * path (excluding itself). * Returns null if path is not within an EZ, or the path is the root dir '/' - *

+ *

* Called while holding the FSDirectory lock. */ private EncryptionZoneInt getParentEncryptionZoneForPath(INodesInPath iip) @@ -467,7 +467,7 @@ EncryptionZone getEZINodeForPath(INodesInPath iip) /** * Throws an exception if the provided path cannot be renamed into the * destination because of differing parent encryption zones. - *

+ *

* Called while holding the FSDirectory lock. * * @param srcIIP source IIP @@ -529,7 +529,7 @@ private void checkMoveValidityForReencryption(final String pathName, /** * Create a new encryption zone. - *

+ *

* Called while holding the FSDirectory lock. */ XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite, @@ -573,7 +573,7 @@ XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite, /** * Cursor-based listing of encryption zones. - *

+ *

* Called while holding the FSDirectory lock. */ BatchedListEntries listEncryptionZones(long prevId) @@ -621,6 +621,8 @@ BatchedListEntries listEncryptionZones(long prevId) * @param zoneId * @param zonePath * @return true if path resolve to the id, false if not. + * @throws AccessControlException + * @throws ParentNotDirectoryException * @throws UnresolvedLinkException */ private boolean pathResolvesToId(final long zoneId, final String zonePath) @@ -645,6 +647,9 @@ private boolean pathResolvesToId(final long zoneId, final String zonePath) /** * Re-encrypts the given encryption zone path. If the given path is not the * root of an encryption zone, an exception is thrown. + * @param zoneIIP + * @param keyVersionName + * @throws IOException */ List reencryptEncryptionZone(final INodesInPath zoneIIP, final String keyVersionName) throws IOException { @@ -673,7 +678,9 @@ List reencryptEncryptionZone(final INodesInPath zoneIIP, /** * Cancels the currently-running re-encryption of the given encryption zone. * If the given path is not the root of an encryption zone, - * * an exception is thrown. + * an exception is thrown. + * @param zoneIIP + * @throws IOException */ List cancelReencryptEncryptionZone(final INodesInPath zoneIIP) throws IOException { @@ -693,8 +700,10 @@ List cancelReencryptEncryptionZone(final INodesInPath zoneIIP) /** * Cursor-based listing of zone re-encryption status. - *

+ *

* Called while holding the FSDirectory lock. + * @param prevId + * @throws IOException */ BatchedListEntries listReencryptionStatus( final long prevId) throws IOException { @@ -735,6 +744,10 @@ BatchedListEntries listReencryptionStatus( /** * Return whether an INode is an encryption zone root. + * @param inode + * @param name + * @return true when INode is an encryption zone root else false + * @throws FileNotFoundException */ boolean isEncryptionZoneRoot(final INode inode, final String name) throws FileNotFoundException { @@ -756,6 +769,7 @@ boolean isEncryptionZoneRoot(final INode inode, final String name) * Return whether an INode is an encryption zone root. * * @param inode the zone inode + * @param name * @throws IOException if the inode is not a directory, * or is a directory but not the root of an EZ. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0140912043..712a327696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -634,12 +634,10 @@ void disableQuotaChecks() { * no permission checks. * @param src The path to resolve. * @param dirOp The {@link DirOp} that controls additional checks. - * @param resolveLink If false, only ancestor symlinks will be checked. If - * true, the last inode will also be checked. * @return if the path indicates an inode, return path after replacing up to - * with the corresponding path of the inode, else the path - * in {@code src} as is. If the path refers to a path in the "raw" - * directory, return the non-raw pathname. + * {@code } with the corresponding path of the inode, else + * the path in {@code src} as is. If the path refers to a path in + * the "raw" directory, return the non-raw pathname. * @throws FileNotFoundException * @throws AccessControlException * @throws ParentNotDirectoryException diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index eda1164f5d..d1904fa910 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -341,10 +341,11 @@ * * This class and its contents keep: * - * 1) Valid fsname --> blocklist (kept on disk, logged) + * 1) Valid fsname {@literal -->} blocklist (kept on disk, logged) * 2) Set of all valid blocks (inverted #1) - * 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) - * 4) machine --> blocklist (inverted #2) + * 3) block {@literal -->} machinelist (kept in memory, rebuilt dynamically + * from reports) + * 4) machine {@literal -->} blocklist (inverted #2) * 5) LRU cache of updated-heartbeat machines */ @InterfaceAudience.Private @@ -1732,11 +1733,12 @@ public boolean isInStandbyState() { } /** - * return a list of blocks & their locations on datanode whose - * total size is size + * return a list of blocks & their locations on {@code datanode} whose + * total size is {@code size} * * @param datanode on which blocks are located * @param size total size of blocks + * @param minimumBlockSize */ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long minimumBlockSize) throws IOException { @@ -1753,6 +1755,7 @@ public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long /** * Dump all metadata into specified file + * @param filename */ void metaSave(String filename) throws IOException { String operationName = "metaSave"; @@ -1884,6 +1887,8 @@ public FsServerDefaults getServerDefaults() throws StandbyException { ///////////////////////////////////////////////////////// /** * Set permissions for an existing file. + * @param src + * @param permission * @throws IOException */ void setPermission(String src, FsPermission permission) throws IOException { @@ -1908,6 +1913,9 @@ void setPermission(String src, FsPermission permission) throws IOException { /** * Set owner for an existing file. + * @param src + * @param group + * @param username * @throws IOException */ void setOwner(String src, String username, String group) @@ -2188,6 +2196,7 @@ void createSymlink(String target, String link, * @param replication new replication * @return true if successful; * false if file does not exist or is a directory + * @throws IOException */ boolean setReplication(final String src, final short replication) throws IOException { @@ -2219,6 +2228,7 @@ boolean setReplication(final String src, final short replication) * * @param src file/directory path * @param policyName storage policy name + * @throws IOException */ void setStoragePolicy(String src, String policyName) throws IOException { final String operationName = "setStoragePolicy"; @@ -2245,6 +2255,7 @@ void setStoragePolicy(String src, String policyName) throws IOException { * Satisfy the storage policy for a file or a directory. * * @param src file/directory path + * @throws IOException */ void satisfyStoragePolicy(String src, boolean logRetryCache) throws IOException { @@ -2295,6 +2306,7 @@ private void validateStoragePolicySatisfy() * unset storage policy set for a given file or a directory. * * @param src file/directory path + * @throws IOException */ void unsetStoragePolicy(String src) throws IOException { final String operationName = "unsetStoragePolicy"; @@ -2321,6 +2333,7 @@ void unsetStoragePolicy(String src) throws IOException { * @param src * file/directory path * @return storage policy object + * @throws IOException */ BlockStoragePolicy getStoragePolicy(String src) throws IOException { checkOperation(OperationCategory.READ); @@ -2336,6 +2349,7 @@ BlockStoragePolicy getStoragePolicy(String src) throws IOException { /** * @return All the existing block storage policies + * @throws IOException */ BlockStoragePolicy[] getStoragePolicies() throws IOException { checkOperation(OperationCategory.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 2123f4ea99..03b1ca3e3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -523,8 +523,8 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { * 2. For a {@link WithName} node, since the node must be in a snapshot, we * only count the quota usage for those nodes that still existed at the * creation time of the snapshot associated with the {@link WithName} node. - * We do not count in the size of the diff list. - *

+   * We do not count in the size of the diff list.
+   * 
* * @param bsps Block storage policy suite to calculate intended storage type usage * @param blockStoragePolicyId block storage policy id of the current INode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index e4e14f7303..8655bb332b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -669,10 +669,10 @@ public void cleanSubtree(ReclaimContext reclaimContext, int snapshot, /** * {@inheritDoc} - *
+ *
* To destroy a DstReference node, we first remove its link with the - * referred node. If the reference number of the referred node is <= 0, we - * destroy the subtree of the referred node. Otherwise, we clean the + * referred node. If the reference number of the referred node is <= 0, + * we destroy the subtree of the referred node. Otherwise, we clean the * referred node's subtree and delete everything created after the last * rename operation, i.e., everything outside of the scope of the prior * WithName nodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 04fb50e351..f072220677 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -329,8 +329,8 @@ public int getPathSnapshotId() { } /** - * @return the i-th inode if i >= 0; - * otherwise, i < 0, return the (length + i)-th inode. + * @return the i-th inode if i {@literal >=} 0; + * otherwise, i {@literal <} 0, return the (length + i)-th inode. */ public INode getINode(int i) { return inodes[(i < 0) ? inodes.length + i : i]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index d6d20945bc..7331676c97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -112,7 +112,7 @@ boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, void doRollback() throws IOException; /** - * Discard the segments whose first txid is >= the given txid. + * Discard the segments whose first txid is {@literal >=} the given txid. * @param startTxId The given txid should be right at the segment boundary, * i.e., it should be the first txid of some segment, if segment corresponding * to the txid exists. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 31fb2bb284..75db8def74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -208,7 +208,7 @@ private synchronized INode[] getINodesWithLease() { * read or write lock. * * @param ancestorDir the ancestor {@link INodeDirectory} - * @return Set + * @return {@code Set} */ public Set getINodeWithLeases(final INodeDirectory ancestorDir) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java index dad5779b68..ead56a9f61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java @@ -52,7 +52,7 @@ public MetaRecoveryContext(int force) { * Display a prompt to the user and get his or her choice. * * @param prompt The prompt to display - * @param default First choice (will be taken if autoChooseDefault is + * @param firstChoice First choice (will be taken if autoChooseDefault is * true) * @param choices Other choies * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 9f82cbd3de..b91e7ba6bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -176,8 +176,8 @@ * is a second backup/failover NameNode, or when using federated NameNodes.) * * The NameNode controls two critical tables: - * 1) filename->blocksequence (namespace) - * 2) block->machinelist ("inodes") + * 1) filename{@literal ->}blocksequence (namespace) + * 2) block{@literal ->}machinelist ("inodes") * * The first table is stored on disk and is very precious. * The second table is rebuilt every time the NameNode comes up. @@ -1111,7 +1111,7 @@ public InetSocketAddress getHttpsAddress() { } /** - * @return NameNodeHttpServer, used by unit tests to ensure a full shutdown, + * NameNodeHttpServer, used by unit tests to ensure a full shutdown, * so that no bind exception is thrown during restart. */ @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 5d664cbb33..56607f08a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -94,14 +94,13 @@ *

The tool scans all files and directories, starting from an indicated * root path. The following abnormal conditions are detected and handled:

*
    - *
  • files with blocks that are completely missing from all datanodes.
    + *
  • files with blocks that are completely missing from all datanodes.
    * In this case the tool can perform one of the following actions: *
      - *
    • none ({@link #FIXING_NONE})
    • *
    • move corrupted files to /lost+found directory on DFS - * ({@link #FIXING_MOVE}). Remaining data blocks are saved as a + * ({@link #doMove}). Remaining data blocks are saved as a * block chains, representing longest consecutive series of valid blocks.
    • - *
    • delete corrupted files ({@link #FIXING_DELETE})
    • + *
    • delete corrupted files ({@link #doDelete})
    • *
    *
  • *
  • detect files with under-replicated or over-replicated blocks
  • @@ -201,7 +200,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { */ NamenodeFsck(Configuration conf, NameNode namenode, NetworkTopology networktopology, - Map pmap, PrintWriter out, + Map pmap, PrintWriter out, int totalDatanodes, InetAddress remoteAddress) { this.conf = conf; this.namenode = namenode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java index 5e708bea87..a195bf1815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java @@ -47,7 +47,7 @@ public static Counts newInstance() { /** * Is quota violated? - * The quota is violated if quota is set and usage > quota. + * The quota is violated if quota is set and usage > quota. */ public static boolean isViolated(final long quota, final long usage) { return quota >= 0 && usage > quota; @@ -55,7 +55,8 @@ public static boolean isViolated(final long quota, final long usage) { /** * Is quota violated? - * The quota is violated if quota is set, delta > 0 and usage + delta > quota. + * The quota is violated if quota is set, delta > 0 and + * usage + delta > quota. */ static boolean isViolated(final long quota, final long usage, final long delta) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index a8acccdd96..2e13df5fd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -319,7 +319,7 @@ void addDummyTracker(final long zoneId, ZoneSubmissionTracker zst) { /** * Main loop. It takes at most 1 zone per scan, and executes until the zone * is completed. - * {@see #reencryptEncryptionZoneInt(Long)}. + * {@link #reencryptEncryptionZone(long)}. */ @Override public void run() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index e1bf02755c..b6f4f64ca5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -31,7 +31,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; /** - * There are four types of extended attributes defined by the + * There are four types of extended attributes <XAttr> defined by the * following namespaces: *
    * USER - extended user attributes: these can be assigned to files and @@ -56,7 +56,7 @@ * is called on a file or directory in the /.reserved/raw HDFS directory * hierarchy. These attributes can only be accessed by the user who have * read access. - *
    + *
    */ @InterfaceAudience.Private public class XAttrPermissionFilter { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java index 1dab69c4dd..d856f6d142 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java @@ -33,7 +33,7 @@ public class XAttrStorage { /** * Reads the extended attribute of an inode by name with prefix. - *

    + *

    * * @param inode INode to read * @param snapshotId the snapshotId of the requested path @@ -48,11 +48,11 @@ public static XAttr readINodeXAttrByPrefixedName(INode inode, int snapshotId, /** * Reads the existing extended attributes of an inode. - *

    + *

    * Must be called while holding the FSDirectory read lock. * * @param inodeAttr INodeAttributes to read. - * @return List XAttr list. + * @return {@code XAttr} list. */ public static List readINodeXAttrs(INodeAttributes inodeAttr) { XAttrFeature f = inodeAttr.getXAttrFeature(); @@ -61,7 +61,7 @@ public static List readINodeXAttrs(INodeAttributes inodeAttr) { /** * Update xattrs of inode. - *

    + *

    * Must be called while holding the FSDirectory write lock. * * @param inode INode to update diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index d1156564c2..1ba59a2b5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -157,10 +157,10 @@ public final int getLastSnapshotId() { /** * Find the latest snapshot before a given snapshot. - * @param anchorId The returned snapshot's id must be <= or < this given - * snapshot id. - * @param exclusive True means the returned snapshot's id must be < the given - * id, otherwise <=. + * @param anchorId The returned snapshot's id must be <= or < this + * given snapshot id. + * @param exclusive True means the returned snapshot's id must be < the + * given id, otherwise <=. * @return The id of the latest snapshot before the given snapshot. */ public final int getPrior(int anchorId, boolean exclusive) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java index 85d9a6d654..705b8d6937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java @@ -44,15 +44,15 @@ * and created after a particular snapshot and before the next snapshot. The * sequence will look like this: *

    - * s0->s1->s2->s3->s4->s5->s6->s7->s8->s9. + * {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}. *

    * Assuming a skip interval of 3, which means a new diff will be added at a * level higher than the current level after we have ore than 3 snapshots. * Next level promotion happens after 9 snapshots and so on. *

    - * level 2: s08------------------------------->s9 - * level 1: S02------->s35-------->s68-------->s9 - * level 0: s0->s1->s2->s3->s4->s5->s6->s7->s8->s9 + * level 2: {@literal s08------------------------------->s9} + * level 1: {@literal S02------->s35-------->s68-------->s9} + * level 0: {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9} *

    * s02 will be created by combining diffs for s0, s1, s2 once s3 gets created. * Similarly, s08 will be created by combining s02, s35 and s68 once s9 gets @@ -143,6 +143,7 @@ final static class SkipListNode implements Comparable { * and level. * * @param diff The element to be stored in the node. + * @param level */ SkipListNode(DirectoryDiff diff, int level) { this.diff = diff; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java index 02b9cffe4f..b3f8de97b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java @@ -106,8 +106,8 @@ public synchronized void addAll(long startPath, List itemInfoList, * Add the itemInfo to tracking list for which storage movement expected if * necessary. * - * @param itemInfoList - * - List of child in the directory + * @param itemInfo + * - child in the directory * @param scanCompleted * -Indicates whether the ItemInfo start id directory has no more * elements to scan. @@ -191,7 +191,6 @@ public synchronized void removeItemTrackInfo(ItemInfo trackInfo, /** * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded * and notify to clean up required resources. - * @throws IOException */ public synchronized void clearQueuesWithNotification() { // Remove xAttr from directories diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java index d4e514b813..be8d01fbb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java @@ -69,7 +69,7 @@ public DatanodeCacheManager(Configuration conf) { /** * Returns the live datanodes and its storage details, which has available - * space (> 0) to schedule block moves. This will return array of datanodes + * space (> 0) to schedule block moves. This will return array of datanodes * from its local cache. It has a configurable refresh interval in millis and * periodically refresh the datanode cache by fetching latest * {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java index 074eab64ba..14cf05d420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java @@ -39,7 +39,7 @@ * configured by the administrator. * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything, just maintains the sps invoked path ids. Administrator * requires to start external sps service explicitly, to fetch the sps invoked * path ids from namenode, then do necessary computations and block movement in @@ -48,7 +48,7 @@ * external sps service functionality. * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then it + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then it * will disable the sps feature completely by clearing all queued up sps path's * hint. * @@ -88,12 +88,12 @@ public StoragePolicySatisfyManager(Configuration conf, * This function will do following logic based on the configured sps mode: * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything. Administrator requires to start external sps service * explicitly. * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the * service is disabled and won't do any action. */ public void start() { @@ -121,12 +121,12 @@ public void start() { * This function will do following logic based on the configured sps mode: * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then + * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then * it won't do anything. Administrator requires to stop external sps service * explicitly, if needed. * *

    - * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the + * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the * service is disabled and won't do any action. */ public void stop() { @@ -225,6 +225,7 @@ public Long getNextPathId() { /** * Verify that satisfier queue limit exceeds allowed outstanding limit. + * @throws IOException */ public void verifyOutstandingPathQLimit() throws IOException { long size = pathsToBeTraveresed.size(); @@ -269,6 +270,7 @@ public void removeAllPathIds() { /** * Adds the sps path to SPSPathIds list. + * @param id */ public void addPathId(long id) { synchronized (pathsToBeTraveresed) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java index 3101741d95..a4453a5049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java @@ -179,7 +179,7 @@ public float getPercentComplete(Phase phase, Step step) { /** * Returns all phases. * - * @return Iterable containing all phases + * @return {@code Iterable} containing all phases */ public Iterable getPhases() { return EnumSet.allOf(Phase.class); @@ -189,7 +189,7 @@ public Iterable getPhases() { * Returns all steps within a phase. * * @param phase Phase to get - * @return Iterable all steps + * @return {@code Iterable} all steps */ public Iterable getSteps(Phase phase) { return new TreeSet(phases.get(phase).steps.keySet()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java index 4d61d0f95b..72ec9f2344 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java @@ -47,22 +47,22 @@ /** * The interface to the top metrics. - *

    + *

    * Metrics are collected by a custom audit logger, {@link org.apache.hadoop * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to * increment per-operation, per-user counts on every audit log call. These * counts are used to show the top users by NameNode operation as well as * across all operations. - *

    + *

    * TopMetrics maintains these counts for a configurable number of time * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a * RollingWindowManager. - *

    + *

    * These metrics are published as a JSON string via {@link org.apache.hadoop * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window * .RollingWindowManager#snapshot} on each RollingWindowManager. - *

    + *

    * Thread-safe: relies on thread-safety of RollingWindowManager */ @InterfaceAudience.Private @@ -119,6 +119,13 @@ public List getTopWindows() { * log file. This is to be consistent when {@link TopMetrics} is charged with * data read back from log files instead of being invoked directly by the * FsNamesystem + * @param succeeded + * @param userName + * @param addr + * @param cmd + * @param src + * @param dst + * @param status */ public void report(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus status) { @@ -147,6 +154,8 @@ public void report(long currTime, String userName, String cmd) { * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by * external metrics systems. Each metrics record added corresponds to the * reporting period a.k.a window length of the configured rolling windows. + * @param collector + * @param all */ @Override public void getMetrics(MetricsCollector collector, boolean all) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java index 63ff12585b..f927106c34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java @@ -29,23 +29,24 @@ * Events are reported based on occurrence time. The total number of events in * the last period covered by the rolling window can be retrieved by the * {@link #getSum(long)} method. - *

    + *

    * * Assumptions: - *

    + *

    * * (1) Concurrent invocation of {@link #incAt} method are possible - *

    + *

    * * (2) The time parameter of two consecutive invocation of {@link #incAt} could * be in any given order - *

    + *

    * * (3) The buffering delays are not more than the window length, i.e., after two * consecutive invocation {@link #incAt(long time1, long)} and - * {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < windowLenMs. + * {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < + * windowLenMs. * This assumption helps avoiding unnecessary synchronizations. - *

    + *

    * * Thread-safety is built in the {@link RollingWindow.Bucket} */ @@ -85,7 +86,7 @@ public class RollingWindow { /** * When an event occurs at the specified time, this method reflects that in * the rolling window. - *

    + *

    * * @param time the time at which the event occurred * @param delta the delta that will be added to the window @@ -153,6 +154,7 @@ void safeReset(long time) { * performed. We do not need to update the {@link #updateTime} because as * long as the {@link #updateTime} belongs to the current view of the * rolling window, the algorithm works fine. + * @param delta */ void inc(long delta) { value.addAndGet(delta); @@ -161,7 +163,7 @@ void inc(long delta) { /** * Get value represented by this window at the specified time - *

    + *

    * * If time lags behind the latest update time, the new updates are still * included in the sum diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java index bdd0ab01b9..095294e811 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java @@ -38,7 +38,7 @@ * A class to manage the set of {@link RollingWindow}s. This class is the * interface of metrics system to the {@link RollingWindow}s to retrieve the * current top metrics. - *

    + *

    * Thread-safety is provided by each {@link RollingWindow} being thread-safe as * well as {@link ConcurrentHashMap} for the collection of them. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java index e90317da1a..6b0b261731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java @@ -30,11 +30,12 @@ * * Upon receiving this command, this DataNode pass the array of block movement * details to - * {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker} - * service. Later, StoragePolicySatisfyWorker will schedule block movement tasks - * for these blocks and monitors the completion of each task. After the block - * movement attempt is finished(with success or failure) this DataNode will send - * response back to NameNode about the block movement attempt finished details. + * {@link org.apache.hadoop.hdfs.server.sps.ExternalSPSBlockMoveTaskHandler} + * service. Later, ExternalSPSBlockMoveTaskHandler will schedule block movement + * tasks for these blocks and monitors the completion of each task. After the + * block movement attempt is finished(with success or failure) this DataNode + * will send response back to NameNode about the block movement attempt + * finished details. */ public class BlockStorageMovementCommand extends DatanodeCommand { private final String blockPoolId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 311b68f624..5680ef3162 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -158,7 +158,7 @@ public DatanodeCommand blockReport(DatanodeRegistration registration, * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)}, * which is used to communicated blocks stored on disk. * - * @param The datanode registration. + * @param registration The datanode registration. * @param poolId The block pool ID for the blocks. * @param blockIds A list of block IDs. * @return The DatanodeCommand. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java index f80477b7f5..5d609de9a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java @@ -73,7 +73,7 @@ public interface NamenodeProtocol { * @param datanode a data node * @param size requested size * @param minBlockSize each block should be of this minimum Block Size - * @return a list of blocks & their locations + * @return BlocksWithLocations a list of blocks & their locations * @throws IOException if size is less than or equal to 0 or datanode does not exist */ @@ -183,7 +183,8 @@ public void endCheckpoint(NamenodeRegistration registration, /** * Return a structure containing details about all edit logs * available to be fetched from the NameNode. - * @param sinceTxId return only logs that contain transactions >= sinceTxId + * @param sinceTxId return only logs that contain transactions {@literal >=} + * sinceTxId */ @Idempotent public RemoteEditLogManifest getEditLogManifest(long sinceTxId) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java index 3ea0294710..64dec8bbc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java @@ -36,8 +36,10 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 31714972c9..2afc97cb7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -50,14 +50,17 @@ *

    The tool scans all files and directories, starting from an indicated * root path. The following abnormal conditions are detected and handled:

    *
      - *
    • files with blocks that are completely missing from all datanodes.
      + *
    • files with blocks that are completely missing from all datanodes.
      * In this case the tool can perform one of the following actions: *
        - *
      • none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})
      • *
      • move corrupted files to /lost+found directory on DFS - * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a - * block chains, representing longest consecutive series of valid blocks.
      • - *
      • delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})
      • + * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doMove}). + * Remaining data blocks are saved as a + * block chains, representing longest consecutive series of valid blocks. + * + *
      • delete corrupted files + * ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doDelete}) + *
      • *
      *
    • *
    • detect files with under-replicated or over-replicated blocks
    • diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java index f075ed29ec..43eedf8d58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java @@ -121,8 +121,8 @@ public static Options buildOptions() { /** Process an edit log using the chosen processor or visitor. * - * @param inputFilename The file to process - * @param outputFilename The output file name + * @param inputFileName The file to process + * @param outputFileName The output file name * @param processor If visitor is null, the processor to use * @param visitor If non-null, the visitor to use. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java index eb477e16e6..1383f4e12b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java @@ -52,7 +52,7 @@ abstract public interface OfflineEditsVisitor { * Begin visiting an element that encloses another element, such as * the beginning of the list of blocks that comprise a file. * - * @param value Token being visited + * @param op Token being visited */ abstract void visitOp(FSEditLogOp op) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index c84e2edbc5..cc97ea71d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -50,9 +50,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor { * Create a processor that writes to the file named and may or may not * also output to the screen, as specified. * - * @param filename Name of file to write output to - * @param tokenizer Input tokenizer - * @param printToScreen Mirror output to screen? + * @param out Name of file to write output to */ public StatisticsEditsVisitor(OutputStream out) throws IOException { this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java index 28bcf103ee..9c7b7f0ac0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java @@ -26,11 +26,13 @@ /** * File name distribution visitor. *

      - * It analyzes file names in fsimage and prints the following information: + * It analyzes file names in fsimage and prints the following information: + *

        *
      • Number of unique file names
      • *
      • Number file names and the corresponding number range of files that use * these same names
      • *
      • Heap saved if the file name objects are reused
      • + *
      */ @InterfaceAudience.Private public class NameDistributionVisitor extends TextWriterImageVisitor { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index 1f87a7a9b2..188537b756 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -470,23 +470,23 @@ public List apply2Current(final List current) { *
          * 1. For (c, 0) in the posterior diff, check the element in this diff:
          * 1.1 (c', 0)  in this diff: impossible
      -   * 1.2 (0, d')  in this diff: put in c-list --> (c, d')
      +   * 1.2 (0, d')  in this diff: put in c-list --> (c, d')
          * 1.3 (c', d') in this diff: impossible
      -   * 1.4 (0, 0)   in this diff: put in c-list --> (c, 0)
      +   * 1.4 (0, 0)   in this diff: put in c-list --> (c, 0)
          * This is the same logic as create(E).
          * 
          * 2. For (0, d) in the posterior diff,
      -   * 2.1 (c', 0)  in this diff: remove from c-list --> (0, 0)
      +   * 2.1 (c', 0)  in this diff: remove from c-list --> (0, 0)
          * 2.2 (0, d')  in this diff: impossible
      -   * 2.3 (c', d') in this diff: remove from c-list --> (0, d')
      -   * 2.4 (0, 0)   in this diff: put in d-list --> (0, d)
      +   * 2.3 (c', d') in this diff: remove from c-list --> (0, d')
      +   * 2.4 (0, 0)   in this diff: put in d-list --> (0, d)
          * This is the same logic as delete(E).
          * 
          * 3. For (c, d) in the posterior diff,
      -   * 3.1 (c', 0)  in this diff: replace the element in c-list --> (c, 0)
      +   * 3.1 (c', 0)  in this diff: replace the element in c-list --> (c, 0)
          * 3.2 (0, d')  in this diff: impossible
      -   * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d')
      -   * 3.4 (0, 0)   in this diff: put in c-list and d-list --> (c, d)
      +   * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d')
      +   * 3.4 (0, 0)   in this diff: put in c-list and d-list --> (c, d)
          * This is the same logic as modify(E, E).
          * 
      * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java index f23b02185a..8c783d873e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java @@ -116,8 +116,8 @@ private static String codePointToEntityRef(int cp) { * * There are three kinds of code points in XML: * - Those that can be represented normally, - * - Those that have to be escaped (for example, & must be represented - * as &) + * - Those that have to be escaped (for example, & must be represented + * as {@literal &}) * - Those that cannot be represented at all in XML. * * The built-in SAX functions will handle the first two types for us just