HDFS-16259. Catch and re-throw sub-classes of AccessControlException thrown by any permission provider plugins (eg Ranger) (#3598)

(cherry picked from commit 2f35cc36cd)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
This commit is contained in:
Stephen O'Donnell 2021-11-02 11:14:48 +00:00 committed by S O'Donnell
parent 5fbd9d333e
commit 9cf841b1a6
2 changed files with 90 additions and 25 deletions

View File

@ -216,31 +216,41 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
AccessControlEnforcer enforcer = getAccessControlEnforcer();
String opType = operationType.get();
if (this.authorizeWithContext && opType != null) {
INodeAttributeProvider.AuthorizationContext.Builder builder =
new INodeAttributeProvider.AuthorizationContext.Builder();
builder.fsOwner(fsOwner).
supergroup(supergroup).
callerUgi(callerUgi).
inodeAttrs(inodeAttrs).
inodes(inodes).
pathByNameArr(components).
snapshotId(snapshotId).
path(path).
ancestorIndex(ancestorIndex).
doCheckOwner(doCheckOwner).
ancestorAccess(ancestorAccess).
parentAccess(parentAccess).
access(access).
subAccess(subAccess).
ignoreEmptyDir(ignoreEmptyDir).
operationName(opType).
callerContext(CallerContext.getCurrent());
enforcer.checkPermissionWithContext(builder.build());
} else {
enforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs,
inodes, components, snapshotId, path, ancestorIndex, doCheckOwner,
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
try {
if (this.authorizeWithContext && opType != null) {
INodeAttributeProvider.AuthorizationContext.Builder builder =
new INodeAttributeProvider.AuthorizationContext.Builder();
builder.fsOwner(fsOwner).
supergroup(supergroup).
callerUgi(callerUgi).
inodeAttrs(inodeAttrs).
inodes(inodes).
pathByNameArr(components).
snapshotId(snapshotId).
path(path).
ancestorIndex(ancestorIndex).
doCheckOwner(doCheckOwner).
ancestorAccess(ancestorAccess).
parentAccess(parentAccess).
access(access).
subAccess(subAccess).
ignoreEmptyDir(ignoreEmptyDir).
operationName(opType).
callerContext(CallerContext.getCurrent());
enforcer.checkPermissionWithContext(builder.build());
} else {
enforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs,
inodes, components, snapshotId, path, ancestorIndex, doCheckOwner,
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
} catch (AccessControlException ace) {
Class<?> exceptionClass = ace.getClass();
if (exceptionClass.equals(AccessControlException.class)
|| exceptionClass.equals(TraverseAccessControlException.class)) {
throw ace;
}
// Only form a new ACE for subclasses which come from external enforcers
throw new AccessControlException(ace);
}
}

View File

@ -47,6 +47,7 @@
import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
import static org.junit.Assert.fail;
public class TestINodeAttributeProvider {
private static final Logger LOG =
@ -57,6 +58,15 @@ public class TestINodeAttributeProvider {
private static final short HDFS_PERMISSION = 0777;
private static final short PROVIDER_PERMISSION = 0770;
private static boolean runPermissionCheck = false;
private static boolean shouldThrowAccessException = false;
public static class MyAuthorizationProviderAccessException
extends AccessControlException {
public MyAuthorizationProviderAccessException() {
super();
}
};
public static class MyAuthorizationProvider extends INodeAttributeProvider {
@ -82,6 +92,9 @@ public void checkPermission(String fsOwner, String supergroup,
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
}
CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
if (shouldThrowAccessException) {
throw new MyAuthorizationProviderAccessException();
}
}
@Override
@ -96,6 +109,9 @@ public void checkPermissionWithContext(
CALLED.add("checkPermission|" + authzContext.getAncestorAccess()
+ "|" + authzContext.getParentAccess() + "|" + authzContext
.getAccess());
if (shouldThrowAccessException) {
throw new MyAuthorizationProviderAccessException();
}
}
}
@ -238,6 +254,7 @@ public void cleanUp() throws IOException {
miniDFS = null;
}
runPermissionCheck = false;
shouldThrowAccessException = false;
Assert.assertTrue(CALLED.contains("stop"));
}
@ -457,6 +474,44 @@ public Void run() throws Exception {
});
}
@Test
// HDFS-16529 - Ensure enforcer AccessControlException subclass are caught
// and re-thrown as plain ACE exceptions.
public void testSubClassedAccessControlExceptions() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
shouldThrowAccessException = true;
final Path userPath = new Path("/user");
final Path authz = new Path("/user/authz");
final Path authzChild = new Path("/user/authz/child2");
fs.mkdirs(userPath);
fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authz);
fs.setPermission(authz, new FsPermission(HDFS_PERMISSION));
fs.mkdirs(authzChild);
fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION));
UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
new String[]{"g1"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
try {
fs.access(authzChild, FsAction.ALL);
fail("Exception should be thrown");
// The DFS Client will get a RemoteException containing an
// AccessControlException (ACE). If the ACE is a subclass of ACE then
// the client does not unwrap it correctly. The change in HDFS-16529
// is to ensure ACE is always thrown rather than a sub class to avoid
// this issue.
} catch (AccessControlException ace) {
Assert.assertEquals(AccessControlException.class, ace.getClass());
}
return null;
}
});
}
@Test
// HDFS-15165 - ContentSummary calls should use the provider permissions(if