From 313dd0250543177752ebbad7f7f6a6bcf3a8ab42 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Sat, 20 Jul 2013 16:22:11 +0000 Subject: [PATCH] HDFS-5010. Reduce the frequency of getCurrentUser() calls from namenode. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1505160 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/blockmanagement/BlockManager.java | 6 ++++-- .../hdfs/server/namenode/FSNamesystem.java | 20 +++++++++---------- .../server/namenode/FSPermissionChecker.java | 12 ++++------- .../hadoop/hdfs/server/namenode/NameNode.java | 9 +++++++++ .../server/namenode/NameNodeRpcServer.java | 14 +++++++++---- 6 files changed, 39 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ade78d3978..f545f6d2c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -3276,6 +3276,9 @@ Release 0.23.10 - UNRELEASED IMPROVEMENTS + HDFS-5010. Reduce the frequency of getCurrentUser() calls from namenode + (kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 1585dad902..e6025f7827 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -860,8 +860,10 @@ public ExportedBlockKeys getBlockKeys() { public void setBlockToken(final LocatedBlock b, final BlockTokenSecretManager.AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { - b.setBlockToken(blockTokenSecretManager.generateToken(b.getBlock(), - EnumSet.of(mode))); + // Use cached UGI if serving RPC calls. + b.setBlockToken(blockTokenSecretManager.generateToken( + NameNode.getRemoteUser().getShortUserName(), + b.getBlock(), EnumSet.of(mode))); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2b54007ac7..3fee048250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -168,6 +168,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; @@ -2943,7 +2944,11 @@ private boolean deleteInt(String src, boolean recursive) private FSPermissionChecker getPermissionChecker() throws AccessControlException { - return new FSPermissionChecker(fsOwnerShortUserName, supergroup); + try { + return new FSPermissionChecker(fsOwnerShortUserName, supergroup, getRemoteUser()); + } catch (IOException ioe) { + throw new AccessControlException(ioe); + } } /** * Remove a file/directory from the namespace. @@ -3153,9 +3158,7 @@ boolean isFileClosed(String src) return !INodeFile.valueOf(dir.getINode(src), src).isUnderConstruction(); } catch (AccessControlException e) { if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(false, UserGroupInformation.getCurrentUser(), - getRemoteIp(), - "isFileClosed", src, null, null); + logAuditEvent(false, "isFileClosed", src); } throw e; } finally { @@ -5825,11 +5828,7 @@ private static InetAddress getRemoteIp() { // optimize ugi lookup for RPC operations to avoid a trip through // UGI.getCurrentUser which is synch'ed private static UserGroupInformation getRemoteUser() throws IOException { - UserGroupInformation ugi = null; - if (Server.isRpcInvocation()) { - ugi = Server.getRemoteUser(); - } - return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); + return NameNode.getRemoteUser(); } /** @@ -6334,8 +6333,7 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() readLock(); try { checkOperation(OperationCategory.READ); - FSPermissionChecker checker = new FSPermissionChecker( - fsOwner.getShortUserName(), supergroup); + FSPermissionChecker checker = getPermissionChecker(); final String user = checker.isSuperUser()? null : checker.getUser(); status = snapshotManager.getSnapshottableDirListing(user); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index ffba6568dc..a02bc4044d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -56,14 +56,10 @@ private static String toAccessControlString(INode inode) { /** A set with group namess. Not synchronized since it is unmodifiable */ private final Set groups; private final boolean isSuper; - - FSPermissionChecker(String fsOwner, String supergroup - ) throws AccessControlException{ - try { - ugi = UserGroupInformation.getCurrentUser(); - } catch (IOException e) { - throw new AccessControlException(e); - } + + FSPermissionChecker(String fsOwner, String supergroup, + UserGroupInformation callerUgi) { + ugi = callerUgi; HashSet s = new HashSet(Arrays.asList(ugi.getGroupNames())); groups = Collections.unmodifiableSet(s); user = ugi.getShortUserName(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 717f07fe87..c4443480ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -431,6 +431,15 @@ NamenodeRegistration setRegistration() { return nodeRegistration; } + /* optimize ugi lookup for RPC operations to avoid a trip through + * UGI.getCurrentUser which is synch'ed + */ + public static UserGroupInformation getRemoteUser() throws IOException { + UserGroupInformation ugi = Server.getRemoteUser(); + return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); + } + + /** * Login as the configured user for the NameNode. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index ce8121bade..7add68e110 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; @@ -349,6 +350,11 @@ InetSocketAddress getRpcAddress() { return clientRpcAddress; } + private static UserGroupInformation getRemoteUser() throws IOException { + return NameNode.getRemoteUser(); + } + + ///////////////////////////////////////////////////// // NamenodeProtocol ///////////////////////////////////////////////////// @@ -457,7 +463,7 @@ public HdfsFileStatus create(String src, FsPermission masked, + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus( - UserGroupInformation.getCurrentUser().getShortUserName(), null, masked), + getRemoteUser().getShortUserName(), null, masked), clientName, clientMachine, flag.get(), createParent, replication, blockSize); metrics.incrFilesCreated(); @@ -690,7 +696,7 @@ public boolean mkdirs(String src, FsPermission masked, boolean createParent) + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } return namesystem.mkdirs(src, - new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(), + new PermissionStatus(getRemoteUser().getShortUserName(), null, masked), createParent); } @@ -882,7 +888,7 @@ public void createSymlink(String target, String link, FsPermission dirPerms, if ("".equals(target)) { throw new IOException("Invalid symlink target"); } - final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + final UserGroupInformation ugi = getRemoteUser(); namesystem.createSymlink(target, link, new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent); } @@ -1017,7 +1023,7 @@ public void refreshServiceAcl() throws IOException { @Override // RefreshAuthorizationPolicyProtocol public void refreshUserToGroupsMappings() throws IOException { LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + - UserGroupInformation.getCurrentUser().getShortUserName()); + getRemoteUser().getShortUserName()); Groups.getUserToGroupsMappingService().refresh(); }