diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8150a54bd9..95da136e52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -471,6 +471,9 @@ Release 2.7.0 - UNRELEASED HDFS-7373. Clean up temporary files after fsimage transfer failures. (kihwal) + HDFS-7543. Avoid path resolution when getting FileStatus for audit logs. + (wheat9) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 0d2b34ce8c..7aaa21c99a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -41,10 +41,10 @@ static HdfsFileStatus modifyAclEntries( FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); + INodesInPath iip; fsd.writeLock(); try { - INodesInPath iip = fsd.getINodesInPath4Write( - FSDirectory.normalizePath(src), true); + iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true); fsd.checkOwner(pc, iip); INode inode = FSDirectory.resolveLastINode(iip); int snapshotId = iip.getLatestSnapshotId(); @@ -56,7 +56,7 @@ static HdfsFileStatus modifyAclEntries( } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus removeAclEntries( @@ -67,10 +67,10 @@ static HdfsFileStatus removeAclEntries( FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); + INodesInPath iip; fsd.writeLock(); try { - INodesInPath iip = fsd.getINodesInPath4Write( - FSDirectory.normalizePath(src), true); + iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true); fsd.checkOwner(pc, iip); INode inode = FSDirectory.resolveLastINode(iip); int snapshotId = iip.getLatestSnapshotId(); @@ -82,7 +82,7 @@ static HdfsFileStatus removeAclEntries( } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) @@ -92,10 +92,10 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); + INodesInPath iip; fsd.writeLock(); try { - INodesInPath iip = fsd.getINodesInPath4Write( - FSDirectory.normalizePath(src), true); + iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true); fsd.checkOwner(pc, iip); INode inode = FSDirectory.resolveLastINode(iip); int snapshotId = iip.getLatestSnapshotId(); @@ -107,7 +107,7 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg) } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) @@ -117,16 +117,17 @@ static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg) FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); + INodesInPath iip; fsd.writeLock(); try { - INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); unprotectedRemoveAcl(fsd, iip); } finally { fsd.writeUnlock(); } fsd.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST); - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus setAcl( @@ -137,16 +138,17 @@ static HdfsFileStatus setAcl( byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); src = fsd.resolvePath(pc, src, pathComponents); + INodesInPath iip; fsd.writeLock(); try { - INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); List newAcl = unprotectedSetAcl(fsd, src, aclSpec); fsd.getEditLog().logSetAcl(src, newAcl); } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static AclStatus getAclStatus( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index 1e3c401af4..6c1890ecc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -50,17 +50,18 @@ static HdfsFileStatus setPermission( String src = srcArg; FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + INodesInPath iip; fsd.writeLock(); try { src = fsd.resolvePath(pc, src, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); unprotectedSetPermission(fsd, src, permission); } finally { fsd.writeUnlock(); } fsd.getEditLog().logSetPermissions(src, permission); - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus setOwner( @@ -68,10 +69,11 @@ static HdfsFileStatus setOwner( throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + INodesInPath iip; fsd.writeLock(); try { src = fsd.resolvePath(pc, src, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); if (!pc.isSuperUser()) { if (username != null && !pc.getUser().equals(username)) { @@ -86,7 +88,7 @@ static HdfsFileStatus setOwner( fsd.writeUnlock(); } fsd.getEditLog().logSetOwner(src, username, group); - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static HdfsFileStatus setTimes( @@ -102,10 +104,11 @@ static HdfsFileStatus setTimes( FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + INodesInPath iip; fsd.writeLock(); try { src = fsd.resolvePath(pc, src, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); // Write access is required to set access and modification times if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); @@ -123,7 +126,7 @@ static HdfsFileStatus setTimes( } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static boolean setReplication( @@ -165,10 +168,11 @@ static HdfsFileStatus setStoragePolicy( } FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + INodesInPath iip; fsd.writeLock(); try { src = FSDirectory.resolvePath(src, pathComponents, fsd); - final INodesInPath iip = fsd.getINodesInPath4Write(src); + iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); @@ -185,7 +189,7 @@ static HdfsFileStatus setStoragePolicy( } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static BlockStoragePolicy[] getStoragePolicies(BlockManager bm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index f7e57beedd..43d3b205f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -168,7 +168,7 @@ static HdfsFileStatus concat( fsd.writeUnlock(); } fsd.getEditLog().logConcat(target, srcs, timestamp, logRetryCache); - return fsd.getAuditFileInfo(target, false); + return fsd.getAuditFileInfo(trgIip); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index 7e62d2c64d..4ea77e6650 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -42,7 +42,6 @@ static HdfsFileStatus mkdirs( FSNamesystem fsn, String src, PermissionStatus permissions, boolean createParent) throws IOException { FSDirectory fsd = fsn.getFSDirectory(); - final String srcArg = src; if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src); } @@ -70,12 +69,12 @@ static HdfsFileStatus mkdirs( // heuristic because the mkdirs() operation might need to // create multiple inodes. fsn.checkFsObjectLimit(); - - if (mkdirsRecursively(fsd, iip, permissions, false, now()) == null) { + iip = mkdirsRecursively(fsd, iip, permissions, false, now()); + if (iip == null) { throw new IOException("Failed to create directory: " + src); } } - return fsd.getAuditFileInfo(srcArg, false); + return fsd.getAuditFileInfo(iip); } static INode unprotectedMkdir( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 4b4dc8ccb2..4239f46094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -70,7 +70,8 @@ static RenameOldResult renameToInt( @SuppressWarnings("deprecation") final boolean status = renameTo(fsd, pc, src, dst, logRetryCache); if (status) { - resultingStat = fsd.getAuditFileInfo(dst, false); + INodesInPath dstIIP = fsd.getINodesInPath(dst, false); + resultingStat = fsd.getAuditFileInfo(dstIIP); } return new RenameOldResult(status, resultingStat); } @@ -122,6 +123,7 @@ static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP, *
*/ @Deprecated + @SuppressWarnings("deprecation") static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst, long timestamp) throws IOException { if (fsd.isDir(dst)) { @@ -246,10 +248,11 @@ static Map.Entry renameToInt( src = fsd.resolvePath(pc, src, srcComponents); dst = fsd.resolvePath(pc, dst, dstComponents); renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options); - HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dst, false); + INodesInPath dstIIP = fsd.getINodesInPath(dst, false); + HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP); - return new AbstractMap.SimpleImmutableEntry(collectedBlocks, resultingStat); + return new AbstractMap.SimpleImmutableEntry<>( + collectedBlocks, resultingStat); } /** @@ -357,7 +360,7 @@ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst, fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); - List snapshottableDirs = new ArrayList(); + List snapshottableDirs = new ArrayList<>(); if (dstInode != null) { // Destination exists validateOverwrite(src, dst, overwrite, srcInode, dstInode); FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs); @@ -419,7 +422,7 @@ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst, if (removedDst != null) { undoRemoveDst = false; if (removedNum > 0) { - List removedINodes = new ChunkedArrayList(); + List removedINodes = new ChunkedArrayList<>(); if (!removedDst.isInLatestSnapshot(tx.dstIIP.getLatestSnapshotId())) { removedDst.destroyAndCollectBlocks(collectedBlocks, removedINodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 6ca30ad0a1..dc0fe1f5f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -264,29 +264,46 @@ private static DirectoryListing getSnapshotsListing( /** Get the file info for a specific file. * @param fsd FSDirectory * @param src The string representation of the path to the file - * @param resolveLink whether to throw UnresolvedLinkException * @param isRawPath true if a /.reserved/raw pathname was passed by the user * @param includeStoragePolicy whether to include storage policy * @return object containing information regarding the file * or null if file not found */ + static HdfsFileStatus getFileInfo( + FSDirectory fsd, INodesInPath src, boolean isRawPath, + boolean includeStoragePolicy) + throws IOException { + fsd.readLock(); + try { + final INode i = src.getLastINode(); + byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ? + i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED; + return i == null ? null : createFileStatus( + fsd, HdfsFileStatus.EMPTY_NAME, i, policyId, + src.getPathSnapshotId(), isRawPath, src); + } finally { + fsd.readUnlock(); + } + } + static HdfsFileStatus getFileInfo( FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath, boolean includeStoragePolicy) throws IOException { String srcs = FSDirectory.normalizePath(src); + if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { + if (fsd.getINode4DotSnapshot(srcs) != null) { + return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, + HdfsFileStatus.EMPTY_NAME, -1L, 0, null, + BlockStoragePolicySuite.ID_UNSPECIFIED); + } + return null; + } + fsd.readLock(); try { - if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { - return getFileInfo4DotSnapshot(fsd, srcs); - } - final INodesInPath inodesInPath = fsd.getINodesInPath(srcs, resolveLink); - final INode i = inodesInPath.getLastINode(); - byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ? - i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED; - return i == null ? null : createFileStatus(fsd, - HdfsFileStatus.EMPTY_NAME, i, policyId, - inodesInPath.getPathSnapshotId(), isRawPath, inodesInPath); + final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink); + return getFileInfo(fsd, iip, isRawPath, includeStoragePolicy); } finally { fsd.readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java index 3380d0af3d..d232b87180 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -52,10 +52,11 @@ static HdfsFileStatus createSymlinkInt( FSPermissionChecker pc = fsn.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link); + INodesInPath iip; fsd.writeLock(); try { link = fsd.resolvePath(pc, link, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(link, false); + iip = fsd.getINodesInPath4Write(link, false); if (!createParent) { fsd.verifyParentDir(iip, link); } @@ -76,7 +77,7 @@ static HdfsFileStatus createSymlinkInt( fsd.writeUnlock(); } NameNode.getNameNodeMetrics().incrCreateSymlinkOps(); - return fsd.getAuditFileInfo(link, false); + return fsd.getAuditFileInfo(iip); } static INodeSymlink unprotectedAddSymlink( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index 47a995d8da..45e63f2eff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -66,21 +66,21 @@ static HdfsFileStatus setXAttr( FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi( pc, xAttr, FSDirectory.isReservedRawName(src)); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( - src); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(src); - checkXAttrChangeAccess(fsd, iip, xAttr, pc); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); + INodesInPath iip; fsd.writeLock(); try { + iip = fsd.getINodesInPath4Write(src); + checkXAttrChangeAccess(fsd, iip, xAttr, pc); unprotectedSetXAttrs(fsd, src, xAttrs, flag); } finally { fsd.writeUnlock(); } fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static List getXAttrs(FSDirectory fsd, final String srcArg, @@ -164,14 +164,16 @@ static HdfsFileStatus removeXAttr( pc, xAttr, FSDirectory.isReservedRawName(src)); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( src); - src = fsd.resolvePath(pc, src, pathComponents); - final INodesInPath iip = fsd.getINodesInPath4Write(src); - checkXAttrChangeAccess(fsd, iip, xAttr, pc); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); + INodesInPath iip; fsd.writeLock(); try { + src = fsd.resolvePath(pc, src, pathComponents); + iip = fsd.getINodesInPath4Write(src); + checkXAttrChangeAccess(fsd, iip, xAttr, pc); + List removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs); if (removedXAttrs != null && !removedXAttrs.isEmpty()) { fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); @@ -182,7 +184,7 @@ static HdfsFileStatus removeXAttr( } finally { fsd.writeUnlock(); } - return fsd.getAuditFileInfo(src, false); + return fsd.getAuditFileInfo(iip); } static List unprotectedRemoveXAttrs( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9ddc5c0326..c025e01e60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1647,11 +1647,10 @@ void checkPermission(FSPermissionChecker pc, INodesInPath iip, } } - HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink) - throws IOException { + HdfsFileStatus getAuditFileInfo(INodesInPath iip) + throws IOException { return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) - ? FSDirStatAndListingOp.getFileInfo(this, path, resolveSymlink, false, - false) : null; + ? FSDirStatAndListingOp.getFileInfo(this, iip, false, false) : null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b411c67cba..bb1c4ed3af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -337,11 +337,6 @@ public boolean isAuditEnabled() { return !isDefaultAuditLogger || auditLog.isInfoEnabled(); } - private HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink) - throws IOException { - return dir.getAuditFileInfo(path, resolveSymlink); - } - private void logAuditEvent(boolean succeeded, String cmd, String src) throws IOException { logAuditEvent(succeeded, cmd, src, null, null); @@ -7669,7 +7664,8 @@ private void createEncryptionZoneInt(final String srcArg, String cipher, List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(ezXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); - resultingStat = getAuditFileInfo(src, false); + final INodesInPath iip = dir.getINodesInPath4Write(src, false); + resultingStat = dir.getAuditFileInfo(iip); } finally { writeUnlock(); } @@ -7703,7 +7699,7 @@ EncryptionZone getEZForPath(final String srcArg) dir.checkPathAccess(pc, iip, FsAction.READ); } final EncryptionZone ret = dir.getEZForPath(iip); - resultingStat = getAuditFileInfo(src, false); + resultingStat = dir.getAuditFileInfo(iip); success = true; return ret; } finally {