HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in INode.collectSubtreeBlocksAndClear(..). Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406326 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-06 21:04:32 +00:00
parent 54b70db347
commit 1734215a10
7 changed files with 89 additions and 31 deletions

View File

@ -154,6 +154,9 @@ Trunk (Unreleased)
HDFS-4151. Change the methods in FSDirectory to pass INodesInPath instead
of INode[] as a parameter. (szetszwo)
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
OPTIMIZATIONS
BUG FIXES

View File

@ -57,6 +57,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.util.ByteArray;
@ -757,7 +758,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
if (removedDst != null) {
INode rmdst = removedDst;
removedDst = null;
List<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
}
@ -996,7 +997,7 @@ public void unprotectedConcat(String target, String [] srcs, long timestamp)
* @param collectedBlocks Blocks under the deleted directory
* @return true on successful deletion; else false
*/
boolean delete(String src, List<Block>collectedBlocks)
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks)
throws UnresolvedLinkException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
@ -1049,7 +1050,7 @@ boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException {
void unprotectedDelete(String src, long mtime)
throws UnresolvedLinkException {
assert hasWriteLock();
List<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime);
if (filesRemoved > 0) {
getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
@ -1064,7 +1065,7 @@ void unprotectedDelete(String src, long mtime)
* @param mtime the time the inode is removed
* @return the number of inodes deleted; 0 if no inodes are deleted.
*/
int unprotectedDelete(String src, List<Block> collectedBlocks,
int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks,
long mtime) throws UnresolvedLinkException {
assert hasWriteLock();
src = normalizePath(src);

View File

@ -17,20 +17,20 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
@ -159,6 +159,7 @@
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@ -2667,7 +2668,7 @@ private boolean deleteInternal(String src, boolean recursive,
boolean enforcePermission)
throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException {
ArrayList<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
writeLock();
try {
@ -2698,21 +2699,26 @@ private boolean deleteInternal(String src, boolean recursive,
return true;
}
/**
/**
* From the given list, incrementally remove the blocks from blockManager
* Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
* ensure that other waiters on the lock can get in. See HDFS-2938
*
* @param blocks
* An instance of {@link BlocksMapUpdateInfo} which contains a list
* of blocks that need to be removed from blocksMap
*/
private void removeBlocks(List<Block> blocks) {
private void removeBlocks(BlocksMapUpdateInfo blocks) {
int start = 0;
int end = 0;
while (start < blocks.size()) {
List<Block> toDeleteList = blocks.getToDeleteList();
while (start < toDeleteList.size()) {
end = BLOCK_DELETION_INCREMENT + start;
end = end > blocks.size() ? blocks.size() : end;
end = end > toDeleteList.size() ? toDeleteList.size() : end;
writeLock();
try {
for (int i = start; i < end; i++) {
blockManager.removeBlock(blocks.get(i));
blockManager.removeBlock(toDeleteList.get(i));
}
} finally {
writeUnlock();
@ -2721,7 +2727,12 @@ private void removeBlocks(List<Block> blocks) {
}
}
void removePathAndBlocks(String src, List<Block> blocks) {
/**
* Remove leases and blocks related to a given path
* @param src The given path
* @param blocks Containing the list of blocks to be deleted from blocksMap
*/
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks) {
assert hasWriteLock();
leaseManager.removeLeaseWithPrefixPath(src);
if (blocks == null) {
@ -2734,7 +2745,7 @@ void removePathAndBlocks(String src, List<Block> blocks) {
boolean trackBlockCounts = isSafeModeTrackingBlocks();
int numRemovedComplete = 0, numRemovedSafe = 0;
for (Block b : blocks) {
for (Block b : blocks.getToDeleteList()) {
if (trackBlockCounts) {
BlockInfo bi = blockManager.getStoredBlock(b);
if (bi.isComplete()) {

View File

@ -188,11 +188,15 @@ public boolean isDirectory() {
}
/**
* Collect all the blocks in all children of this INode.
* Count and return the number of files in the sub tree.
* Also clears references since this INode is deleted.
* Collect all the blocks in all children of this INode. Count and return the
* number of files in the sub tree. Also clears references since this INode is
* deleted.
*
* @param info
* Containing all the blocks collected from the children of this
* INode. These blocks later should be removed from the blocksMap.
*/
abstract int collectSubtreeBlocksAndClear(List<Block> v);
abstract int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info);
/** Compute {@link ContentSummary}. */
public final ContentSummary computeContentSummary() {
@ -488,4 +492,48 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix) {
out.print(s.substring(s.lastIndexOf(getClass().getSimpleName())));
out.println(")");
}
/**
* Information used for updating the blocksMap when deleting files.
*/
public static class BlocksMapUpdateInfo {
/**
* The list of blocks that need to be removed from blocksMap
*/
private List<Block> toDeleteList;
public BlocksMapUpdateInfo(List<Block> toDeleteList) {
this.toDeleteList = toDeleteList == null ? new ArrayList<Block>()
: toDeleteList;
}
public BlocksMapUpdateInfo() {
toDeleteList = new ArrayList<Block>();
}
/**
* @return The list of blocks that need to be removed from blocksMap
*/
public List<Block> getToDeleteList() {
return toDeleteList;
}
/**
* Add a to-be-deleted block into the
* {@link BlocksMapUpdateInfo#toDeleteList}
* @param toDelete the to-be-deleted block
*/
public void addDeleteBlock(Block toDelete) {
if (toDelete != null) {
toDeleteList.add(toDelete);
}
}
/**
* Clear {@link BlocksMapUpdateInfo#toDeleteList}
*/
public void clear() {
toDeleteList.clear();
}
}
}

View File

@ -27,7 +27,6 @@
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import com.google.common.annotations.VisibleForTesting;
@ -429,13 +428,13 @@ public List<INode> getChildren() {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
int total = 1;
if (children == null) {
return total;
}
for (INode child : children) {
total += child.collectSubtreeBlocksAndClear(v);
total += child.collectSubtreeBlocksAndClear(info);
}
parent = null;
children = null;

View File

@ -19,7 +19,6 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction;
@ -152,11 +151,11 @@ public void setBlocks(BlockInfo[] blocks) {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
parent = null;
if(blocks != null && v != null) {
if(blocks != null && info != null) {
for (BlockInfo blk : blocks) {
v.add(blk);
info.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}

View File

@ -17,12 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
/**
* An INode representing a symbolic link.
@ -64,7 +61,7 @@ DirCounts spaceConsumedInTree(DirCounts counts) {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
return 1;
}