HDFS-10979. Pass IIP for FSDirDeleteOp methods. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2016-10-07 14:14:47 -05:00
parent 69620f9559
commit 3565c9af17
3 changed files with 38 additions and 38 deletions

View File

@ -55,7 +55,7 @@ static long delete(FSDirectory fsd, INodesInPath iip,
FSNamesystem fsn = fsd.getFSNamesystem();
fsd.writeLock();
try {
if (deleteAllowed(iip, iip.getPath()) ) {
if (deleteAllowed(iip)) {
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs);
ReclaimContext context = new ReclaimContext(
@ -98,20 +98,24 @@ static BlocksMapUpdateInfo delete(
FSDirectory fsd = fsn.getFSDirectory();
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
src = iip.getPath();
if (!recursive && fsd.isNonEmptyDirectory(iip)) {
throw new PathIsNotEmptyDirectoryException(src + " is non empty");
if (FSDirectory.isExactReservedName(src)) {
throw new InvalidPathException(src);
}
final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
FsAction.ALL, true);
}
if (recursive && fsd.isNonEmptyDirectory(iip)) {
checkProtectedDescendants(fsd, src);
if (fsd.isNonEmptyDirectory(iip)) {
if (!recursive) {
throw new PathIsNotEmptyDirectoryException(
iip.getPath() + " is non empty");
}
checkProtectedDescendants(fsd, iip);
}
return deleteInternal(fsn, src, iip, logRetryCache);
return deleteInternal(fsn, iip, logRetryCache);
}
/**
@ -126,17 +130,14 @@ static BlocksMapUpdateInfo delete(
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)
throws IOException {
assert fsd.hasWriteLock();
FSNamesystem fsn = fsd.getFSNamesystem();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
List<Long> removedUCFiles = new ChunkedArrayList<>();
final INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), false);
if (!deleteAllowed(iip, src)) {
if (!deleteAllowed(iip)) {
return;
}
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
@ -162,7 +163,6 @@ static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
* <p>
* For small directory or file the deletion is done in one shot.
* @param fsn namespace
* @param src path name to be deleted
* @param iip the INodesInPath instance containing all the INodes for the path
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
@ -170,15 +170,11 @@ static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
* @throws IOException
*/
static BlocksMapUpdateInfo deleteInternal(
FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
FSNamesystem fsn, INodesInPath iip, boolean logRetryCache)
throws IOException {
assert fsn.hasWriteLock();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
}
if (FSDirectory.isExactReservedName(src)) {
throw new InvalidPathException(src);
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath());
}
FSDirectory fsd = fsn.getFSDirectory();
@ -193,14 +189,14 @@ static BlocksMapUpdateInfo deleteInternal(
if (filesRemoved < 0) {
return null;
}
fsd.getEditLog().logDelete(src, mtime, logRetryCache);
fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
+ src +" is removed");
NameNode.stateChangeLog.debug(
"DIR* Namesystem.delete: " + iip.getPath() +" is removed");
}
return collectedBlocks;
}
@ -209,19 +205,18 @@ static void incrDeletedFileCount(long count) {
NameNode.getNameNodeMetrics().incrFilesDeleted(count);
}
private static boolean deleteAllowed(final INodesInPath iip,
final String src) {
private static boolean deleteAllowed(final INodesInPath iip) {
if (iip.length() < 1 || iip.getLastINode() == null) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"DIR* FSDirectory.unprotectedDelete: failed to remove "
+ src + " because it does not exist");
+ iip.getPath() + " because it does not exist");
}
return false;
} else if (iip.length() == 1) { // src is the root
NameNode.stateChangeLog.warn(
"DIR* FSDirectory.unprotectedDelete: failed to remove " + src +
" because the root is not allowed to be deleted");
"DIR* FSDirectory.unprotectedDelete: failed to remove " +
iip.getPath() + " because the root is not allowed to be deleted");
return false;
}
return true;
@ -278,15 +273,19 @@ private static boolean unprotectedDelete(FSDirectory fsd, INodesInPath iip,
* Throw if the given directory has any non-empty protected descendants
* (including itself).
*
* @param src directory whose descendants are to be checked. The caller
* must ensure src is not terminated with {@link Path#SEPARATOR}.
* @param iip directory whose descendants are to be checked.
* @throws AccessControlException if a non-empty protected descendant
* was found.
*/
private static void checkProtectedDescendants(FSDirectory fsd, String src)
throws AccessControlException, UnresolvedLinkException {
private static void checkProtectedDescendants(
FSDirectory fsd, INodesInPath iip)
throws AccessControlException, UnresolvedLinkException {
final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
if (protectedDirs.isEmpty()) {
return;
}
String src = iip.getPath();
// Is src protected? Caller has already checked it is non-empty.
if (protectedDirs.contains(src)) {
throw new AccessControlException(

View File

@ -356,7 +356,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
if (oldFile != null && addCloseOp.overwrite) {
// This is OP_ADD with overwrite
FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime);
FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime);
iip = INodesInPath.replace(iip, iip.length() - 1, null);
oldFile = null;
}
@ -565,10 +565,11 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
}
case OP_DELETE: {
DeleteOp deleteOp = (DeleteOp)op;
FSDirDeleteOp.deleteForEditLog(
fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
deleteOp.timestamp);
final String src = renameReservedPathsOnUpgrade(
deleteOp.path, logVersion);
final INodesInPath iip = fsDir.getINodesInPath4Write(src, false);
FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp);
if (toAddRetryCache) {
fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
}

View File

@ -3805,7 +3805,7 @@ private void clearCorruptLazyPersistFiles()
LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
BlocksMapUpdateInfo toRemoveBlocks =
FSDirDeleteOp.deleteInternal(
FSNamesystem.this, bc.getName(),
FSNamesystem.this,
INodesInPath.fromINode((INodeFile) bc), false);
changed |= toRemoveBlocks != null;
if (toRemoveBlocks != null) {