From 24f5f708df0dff0ea16018b511a020559ac54230 Mon Sep 17 00:00:00 2001 From: ahmarsuhail Date: Wed, 26 Jul 2023 17:26:49 +0100 Subject: [PATCH] HADOOP-18778. Fixes failing tests when CSE is enabled. (#5763) Contributed By: Ahmar Suhail --- .../apache/hadoop/fs/s3a/S3AFileSystem.java | 4 ++-- .../hadoop/fs/s3a/auth/RolePolicies.java | 2 +- .../fs/s3a/ITestS3APrefetchingCacheFiles.java | 1 + .../s3a/ITestS3APrefetchingInputStream.java | 3 +++ .../hadoop/fs/s3a/ITestS3ARequesterPays.java | 2 +- .../hadoop/fs/s3a/auth/ITestAssumeRole.java | 18 ++++++------------ .../ITestAssumedRoleCommitOperations.java | 3 +-- .../s3a/auth/ITestRestrictedReadAccess.java | 3 +-- .../s3a/impl/ITestPartialRenamesDeletes.java | 19 ++++++++++++------- .../fs/s3a/s3guard/ITestS3GuardTool.java | 10 ++++++---- 10 files changed, 34 insertions(+), 31 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 999186f8cd..2c828a5ef3 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -213,7 +213,7 @@ import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN; -import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_SSE_KMS_RW; +import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_KMS_RW; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding; @@ -4222,7 +4222,7 @@ public List listAWSPolicyRules( // no attempt is made to qualify KMS access; there's no // way to predict read keys, and not worried about granting // too much encryption access. - statements.add(STATEMENT_ALLOW_SSE_KMS_RW); + statements.add(STATEMENT_ALLOW_KMS_RW); return statements; } diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java index 940742c11e..b2da2c8009 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/RolePolicies.java @@ -80,7 +80,7 @@ private RolePolicies() { * Statement to allow KMS R/W access access, so full use of * SSE-KMS. */ - public static final Statement STATEMENT_ALLOW_SSE_KMS_RW = + public static final Statement STATEMENT_ALLOW_KMS_RW = statement(true, KMS_ALL_KEYS, KMS_ALL_OPERATIONS); /** diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java index 6ad8ef58a7..e678df700b 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java @@ -105,6 +105,7 @@ public synchronized void teardown() throws Exception { @Test public void testCacheFileExistence() throws Throwable { describe("Verify that FS cache files exist on local FS"); + skipIfClientSideEncryption(); try (FSDataInputStream in = fs.open(testFile)) { byte[] buffer = new byte[prefetchBlockSize]; diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java index a7b59bb5d4..4998cbc946 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java @@ -106,6 +106,7 @@ private static int calculateNumBlocks(long largeFileSize, int blockSize) { @Test public void testReadLargeFileFully() throws Throwable { describe("read a large file fully, uses S3ACachingInputStream"); + skipIfClientSideEncryption(); IOStatistics ioStats; createLargeFile(); @@ -139,6 +140,7 @@ public void testReadLargeFileFully() throws Throwable { public void testReadLargeFileFullyLazySeek() throws Throwable { describe("read a large file using readFully(position,buffer,offset,length)," + " uses S3ACachingInputStream"); + skipIfClientSideEncryption(); IOStatistics ioStats; createLargeFile(); @@ -170,6 +172,7 @@ public void testReadLargeFileFullyLazySeek() throws Throwable { @Test public void testRandomReadLargeFile() throws Throwable { describe("random read on a large file, uses S3ACachingInputStream"); + skipIfClientSideEncryption(); IOStatistics ioStats; createLargeFile(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java index d3925d35a9..c58f13efbf 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ARequesterPays.java @@ -59,7 +59,7 @@ protected Configuration createConfiguration() { @Test public void testRequesterPaysOptionSuccess() throws Throwable { describe("Test requester pays enabled case by reading last then first byte"); - + skipIfClientSideEncryption(); Configuration conf = this.createConfiguration(); conf.setBoolean(ALLOW_REQUESTER_PAYS, true); // Enable bucket exists check, the first failure point people may encounter diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java index 9fb09b4ced..658c81cd8f 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumeRole.java @@ -426,8 +426,7 @@ public void testAssumeRolePoliciesOverrideRolePerms() throws Throwable { bindRolePolicy(conf, policy( statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT), - ALLOW_S3_GET_BUCKET_LOCATION, - STATEMENT_ALLOW_SSE_KMS_RW)); + ALLOW_S3_GET_BUCKET_LOCATION, STATEMENT_ALLOW_KMS_RW)); Path path = path("testAssumeRoleStillIncludesRolePerms"); roleFS = (S3AFileSystem) path.getFileSystem(conf); assertTouchForbidden(roleFS, path); @@ -447,8 +446,7 @@ public void testReadOnlyOperations() throws Throwable { bindRolePolicy(conf, policy( statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS), - STATEMENT_ALL_S3, - STATEMENT_ALLOW_SSE_KMS_READ)); + STATEMENT_ALL_S3, STATEMENT_ALLOW_KMS_RW)); Path path = methodPath(); roleFS = (S3AFileSystem) path.getFileSystem(conf); // list the root path, expect happy @@ -495,8 +493,7 @@ public void testRestrictedWriteSubdir() throws Throwable { Configuration conf = createAssumedRoleConfig(); bindRolePolicyStatements(conf, - STATEMENT_ALL_BUCKET_READ_ACCESS, - STATEMENT_ALLOW_SSE_KMS_RW, + STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALLOW_KMS_RW, new Statement(Effects.Allow) .addActions(S3_ALL_OPERATIONS) .addResources(directory(restrictedDir))); @@ -563,8 +560,7 @@ public void testRestrictedCommitActions() throws Throwable { fs.delete(basePath, true); fs.mkdirs(readOnlyDir); - bindRolePolicyStatements(conf, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, STATEMENT_ALL_BUCKET_READ_ACCESS, new Statement(Effects.Allow) .addActions(S3_PATH_RW_OPERATIONS) @@ -714,8 +710,7 @@ public void executePartialDelete(final Configuration conf, S3AFileSystem fs = getFileSystem(); fs.delete(destDir, true); - bindRolePolicyStatements(conf, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), new Statement(Effects.Deny) .addActions(S3_PATH_WRITE_OPERATIONS) @@ -746,8 +741,7 @@ public void testBucketLocationForbidden() throws Throwable { describe("Restrict role to read only"); Configuration conf = createAssumedRoleConfig(); - bindRolePolicyStatements(conf, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), statement(false, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION)); Path path = methodPath(); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java index dabc0abc2a..2dc8497d61 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestAssumedRoleCommitOperations.java @@ -61,8 +61,7 @@ public void setup() throws Exception { restrictedDir = super.path("restricted"); Configuration conf = newAssumedRoleConfig(getConfiguration(), getAssumedRoleARN()); - bindRolePolicyStatements(conf, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS), new RoleModel.Statement(RoleModel.Effects.Allow) .addActions(S3_PATH_RW_OPERATIONS) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java index a16e1b5e49..7151c38ad3 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/ITestRestrictedReadAccess.java @@ -260,8 +260,7 @@ public void initNoReadAccess() throws Throwable { // it still has write access, which can be explored in the final // step to delete files and directories. roleConfig = createAssumedRoleConfig(); - bindRolePolicyStatements(roleConfig, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW, statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), new Statement(Effects.Deny) .addActions(S3_ALL_GET) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java index 378f4a7043..24f5ddf6d8 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects; import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement; import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory; +import static org.apache.hadoop.fs.s3a.auth.RoleModel.resource; import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*; import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements; @@ -144,6 +145,11 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase { */ private Path writableDir; + /** + * Instruction file created when using CSE, required to be added to policies. + */ + private Path writableDirInstructionFile; + /** * A directory to which restricted roles have only read access. */ @@ -216,6 +222,7 @@ public void setup() throws Exception { basePath = uniquePath(); readOnlyDir = new Path(basePath, "readonlyDir"); writableDir = new Path(basePath, "writableDir"); + writableDirInstructionFile = new Path(basePath, "writableDir.instruction"); readOnlyChild = new Path(readOnlyDir, "child"); noReadDir = new Path(basePath, "noReadDir"); // the full FS @@ -225,8 +232,7 @@ public void setup() throws Exception { // create the baseline assumed role assumedRoleConfig = createAssumedRoleConfig(); - bindRolePolicyStatements(assumedRoleConfig, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(assumedRoleConfig, STATEMENT_ALLOW_KMS_RW, STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x new Statement(Effects.Allow) // dest: rwx .addActions(S3_PATH_RW_OPERATIONS) @@ -365,13 +371,13 @@ public void testMultiDeleteOptionPropagated() throws Throwable { public void testRenameParentPathNotWriteable() throws Throwable { describe("rename with parent paths not writeable; multi=%s", multiDelete); final Configuration conf = createAssumedRoleConfig(); - bindRolePolicyStatements(conf, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW, STATEMENT_ALL_BUCKET_READ_ACCESS, new Statement(Effects.Allow) .addActions(S3_PATH_RW_OPERATIONS) .addResources(directory(readOnlyDir)) - .addResources(directory(writableDir))); + .addResources(directory(writableDir)) + .addResources(resource(writableDirInstructionFile, false, false))); roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf); S3AFileSystem fs = getFileSystem(); @@ -733,8 +739,7 @@ public void testRenamePermissionRequirements() throws Throwable { // s3:DeleteObjectVersion permission, and attempt rename // and then delete. Configuration roleConfig = createAssumedRoleConfig(); - bindRolePolicyStatements(roleConfig, - STATEMENT_ALLOW_SSE_KMS_RW, + bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW, STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x new Statement(Effects.Allow) // dest: rwx .addActions(S3_PATH_RW_OPERATIONS) diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java index 23b14fd379..f7b9ad4f24 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestS3GuardTool.java @@ -70,6 +70,7 @@ public void testLandsatBucketRequireGuarded() throws Throwable { @Test public void testLandsatBucketRequireUnencrypted() throws Throwable { + skipIfClientSideEncryption(); run(BucketInfo.NAME, "-" + BucketInfo.ENCRYPTION_FLAG, "none", getLandsatCSVFile(getConfiguration())); @@ -178,8 +179,9 @@ public void testUploadListByAge() throws Throwable { // least a second old describe("Sleeping 1 second then confirming upload still there"); Thread.sleep(1000); - LambdaTestUtils.eventually(5000, 1000, - () -> { assertNumUploadsAge(path, 1, 1); }); + LambdaTestUtils.eventually(5000, 1000, () -> { + assertNumUploadsAge(path, 1, 1); + }); // 7. Assert deletion works when age filter matches describe("Doing aged deletion"); @@ -231,8 +233,8 @@ private void assertNumDeleted(S3AFileSystem fs, Path path, int numDeleted) * search all parts * @throws Exception on failure */ - private void uploadCommandAssertCount(S3AFileSystem fs, String options[], - Path path, int numUploads, int ageSeconds) + private void uploadCommandAssertCount(S3AFileSystem fs, String[] options, Path path, + int numUploads, int ageSeconds) throws Exception { List allOptions = new ArrayList<>(); List output = new ArrayList<>();