diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 062b2e2914..25da013fd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -517,6 +517,9 @@ Release 2.8.0 - UNRELEASED HDFS-8142. DistributedFileSystem encryption zone commands should resolve relative paths. (Rakesh R via wang) + HDFS-7863. Missing description of some methods and parameters in javadoc of + FSDirDeleteOp. (Brahma Reddy Battula via ozawa) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 2fc4711ea1..02eb1de9ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -33,6 +33,7 @@ class FSDirDeleteOp { /** * Delete the target directory and collect the blocks under it * + * @param fsd the FSDirectory instance * @param iip the INodesInPath instance containing all the INodes for the path * @param collectedBlocks Blocks under the deleted directory * @param removedINodes INodes that should be removed from inodeMap @@ -71,6 +72,13 @@ static long delete( *
* For small directory or file the deletion is done in one shot.
*
+ * @param fsn namespace
+ * @param src path name to be deleted
+ * @param recursive boolean true to apply to all sub-directories recursively
+ * @param logRetryCache whether to record RPC ids in editlog for retry cache
+ * rebuilding
+ * @return blocks collected from the deleted path
+ * @throws IOException
*/
static BlocksMapUpdateInfo delete(
FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
@@ -99,6 +107,8 @@ static BlocksMapUpdateInfo delete(
* Note: This is to be used by
* {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only.
*
+ *
+ * @param fsd the FSDirectory instance
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
@@ -134,6 +144,13 @@ static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
* the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock.
*
* For small directory or file the deletion is done in one shot. + * @param fsn namespace + * @param src path name to be deleted + * @param iip the INodesInPath instance containing all the INodes for the path + * @param logRetryCache whether to record RPC ids in editlog for retry cache + * rebuilding + * @return blocks collected from the deleted path + * @throws IOException */ static BlocksMapUpdateInfo deleteInternal( FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache) @@ -192,6 +209,7 @@ private static boolean deleteAllowed(final INodesInPath iip, /** * Delete a path from the name space * Update the count at each ancestor directory with quota + * @param fsd the FSDirectory instance * @param iip the inodes resolved from the path * @param collectedBlocks blocks collected from the deleted path * @param removedINodes inodes that should be removed from inodeMap