HADOOP-18778. Fixes failing tests when CSE is enabled. (#5763)

Contributed By: Ahmar Suhail <ahmarsu@amazon.co.uk>
This commit is contained in:
ahmarsuhail 2023-07-26 17:26:49 +01:00 committed by GitHub
parent 068d8c7e4d
commit 24f5f708df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 34 additions and 31 deletions

View File

@ -213,7 +213,7 @@
import static org.apache.hadoop.fs.s3a.S3AUtils.*; import static org.apache.hadoop.fs.s3a.S3AUtils.*;
import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.Statistic.*;
import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN; import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_SSE_KMS_RW; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_KMS_RW;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations;
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable;
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding; import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding;
@ -4222,7 +4222,7 @@ public List<RoleModel.Statement> listAWSPolicyRules(
// no attempt is made to qualify KMS access; there's no // no attempt is made to qualify KMS access; there's no
// way to predict read keys, and not worried about granting // way to predict read keys, and not worried about granting
// too much encryption access. // too much encryption access.
statements.add(STATEMENT_ALLOW_SSE_KMS_RW); statements.add(STATEMENT_ALLOW_KMS_RW);
return statements; return statements;
} }

View File

@ -80,7 +80,7 @@ private RolePolicies() {
* Statement to allow KMS R/W access access, so full use of * Statement to allow KMS R/W access access, so full use of
* SSE-KMS. * SSE-KMS.
*/ */
public static final Statement STATEMENT_ALLOW_SSE_KMS_RW = public static final Statement STATEMENT_ALLOW_KMS_RW =
statement(true, KMS_ALL_KEYS, KMS_ALL_OPERATIONS); statement(true, KMS_ALL_KEYS, KMS_ALL_OPERATIONS);
/** /**

View File

@ -105,6 +105,7 @@ public synchronized void teardown() throws Exception {
@Test @Test
public void testCacheFileExistence() throws Throwable { public void testCacheFileExistence() throws Throwable {
describe("Verify that FS cache files exist on local FS"); describe("Verify that FS cache files exist on local FS");
skipIfClientSideEncryption();
try (FSDataInputStream in = fs.open(testFile)) { try (FSDataInputStream in = fs.open(testFile)) {
byte[] buffer = new byte[prefetchBlockSize]; byte[] buffer = new byte[prefetchBlockSize];

View File

@ -106,6 +106,7 @@ private static int calculateNumBlocks(long largeFileSize, int blockSize) {
@Test @Test
public void testReadLargeFileFully() throws Throwable { public void testReadLargeFileFully() throws Throwable {
describe("read a large file fully, uses S3ACachingInputStream"); describe("read a large file fully, uses S3ACachingInputStream");
skipIfClientSideEncryption();
IOStatistics ioStats; IOStatistics ioStats;
createLargeFile(); createLargeFile();
@ -139,6 +140,7 @@ public void testReadLargeFileFully() throws Throwable {
public void testReadLargeFileFullyLazySeek() throws Throwable { public void testReadLargeFileFullyLazySeek() throws Throwable {
describe("read a large file using readFully(position,buffer,offset,length)," describe("read a large file using readFully(position,buffer,offset,length),"
+ " uses S3ACachingInputStream"); + " uses S3ACachingInputStream");
skipIfClientSideEncryption();
IOStatistics ioStats; IOStatistics ioStats;
createLargeFile(); createLargeFile();
@ -170,6 +172,7 @@ public void testReadLargeFileFullyLazySeek() throws Throwable {
@Test @Test
public void testRandomReadLargeFile() throws Throwable { public void testRandomReadLargeFile() throws Throwable {
describe("random read on a large file, uses S3ACachingInputStream"); describe("random read on a large file, uses S3ACachingInputStream");
skipIfClientSideEncryption();
IOStatistics ioStats; IOStatistics ioStats;
createLargeFile(); createLargeFile();

View File

@ -59,7 +59,7 @@ protected Configuration createConfiguration() {
@Test @Test
public void testRequesterPaysOptionSuccess() throws Throwable { public void testRequesterPaysOptionSuccess() throws Throwable {
describe("Test requester pays enabled case by reading last then first byte"); describe("Test requester pays enabled case by reading last then first byte");
skipIfClientSideEncryption();
Configuration conf = this.createConfiguration(); Configuration conf = this.createConfiguration();
conf.setBoolean(ALLOW_REQUESTER_PAYS, true); conf.setBoolean(ALLOW_REQUESTER_PAYS, true);
// Enable bucket exists check, the first failure point people may encounter // Enable bucket exists check, the first failure point people may encounter

View File

@ -426,8 +426,7 @@ public void testAssumeRolePoliciesOverrideRolePerms() throws Throwable {
bindRolePolicy(conf, bindRolePolicy(conf,
policy( policy(
statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT), statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT),
ALLOW_S3_GET_BUCKET_LOCATION, ALLOW_S3_GET_BUCKET_LOCATION, STATEMENT_ALLOW_KMS_RW));
STATEMENT_ALLOW_SSE_KMS_RW));
Path path = path("testAssumeRoleStillIncludesRolePerms"); Path path = path("testAssumeRoleStillIncludesRolePerms");
roleFS = (S3AFileSystem) path.getFileSystem(conf); roleFS = (S3AFileSystem) path.getFileSystem(conf);
assertTouchForbidden(roleFS, path); assertTouchForbidden(roleFS, path);
@ -447,8 +446,7 @@ public void testReadOnlyOperations() throws Throwable {
bindRolePolicy(conf, bindRolePolicy(conf,
policy( policy(
statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS), statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS),
STATEMENT_ALL_S3, STATEMENT_ALL_S3, STATEMENT_ALLOW_KMS_RW));
STATEMENT_ALLOW_SSE_KMS_READ));
Path path = methodPath(); Path path = methodPath();
roleFS = (S3AFileSystem) path.getFileSystem(conf); roleFS = (S3AFileSystem) path.getFileSystem(conf);
// list the root path, expect happy // list the root path, expect happy
@ -495,8 +493,7 @@ public void testRestrictedWriteSubdir() throws Throwable {
Configuration conf = createAssumedRoleConfig(); Configuration conf = createAssumedRoleConfig();
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf,
STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
new Statement(Effects.Allow) new Statement(Effects.Allow)
.addActions(S3_ALL_OPERATIONS) .addActions(S3_ALL_OPERATIONS)
.addResources(directory(restrictedDir))); .addResources(directory(restrictedDir)));
@ -563,8 +560,7 @@ public void testRestrictedCommitActions() throws Throwable {
fs.delete(basePath, true); fs.delete(basePath, true);
fs.mkdirs(readOnlyDir); fs.mkdirs(readOnlyDir);
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALL_BUCKET_READ_ACCESS,
new Statement(Effects.Allow) new Statement(Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS) .addActions(S3_PATH_RW_OPERATIONS)
@ -714,8 +710,7 @@ public void executePartialDelete(final Configuration conf,
S3AFileSystem fs = getFileSystem(); S3AFileSystem fs = getFileSystem();
fs.delete(destDir, true); fs.delete(destDir, true);
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
new Statement(Effects.Deny) new Statement(Effects.Deny)
.addActions(S3_PATH_WRITE_OPERATIONS) .addActions(S3_PATH_WRITE_OPERATIONS)
@ -746,8 +741,7 @@ public void testBucketLocationForbidden() throws Throwable {
describe("Restrict role to read only"); describe("Restrict role to read only");
Configuration conf = createAssumedRoleConfig(); Configuration conf = createAssumedRoleConfig();
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
statement(false, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION)); statement(false, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION));
Path path = methodPath(); Path path = methodPath();

View File

@ -61,8 +61,7 @@ public void setup() throws Exception {
restrictedDir = super.path("restricted"); restrictedDir = super.path("restricted");
Configuration conf = newAssumedRoleConfig(getConfiguration(), Configuration conf = newAssumedRoleConfig(getConfiguration(),
getAssumedRoleARN()); getAssumedRoleARN());
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS), statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS),
new RoleModel.Statement(RoleModel.Effects.Allow) new RoleModel.Statement(RoleModel.Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS) .addActions(S3_PATH_RW_OPERATIONS)

View File

@ -260,8 +260,7 @@ public void initNoReadAccess() throws Throwable {
// it still has write access, which can be explored in the final // it still has write access, which can be explored in the final
// step to delete files and directories. // step to delete files and directories.
roleConfig = createAssumedRoleConfig(); roleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(roleConfig, bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS), statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
new Statement(Effects.Deny) new Statement(Effects.Deny)
.addActions(S3_ALL_GET) .addActions(S3_ALL_GET)

View File

@ -56,6 +56,7 @@
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects; import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement; import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory; import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.resource;
import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement; import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement;
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*; import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements; import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements;
@ -144,6 +145,11 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase {
*/ */
private Path writableDir; private Path writableDir;
/**
* Instruction file created when using CSE, required to be added to policies.
*/
private Path writableDirInstructionFile;
/** /**
* A directory to which restricted roles have only read access. * A directory to which restricted roles have only read access.
*/ */
@ -216,6 +222,7 @@ public void setup() throws Exception {
basePath = uniquePath(); basePath = uniquePath();
readOnlyDir = new Path(basePath, "readonlyDir"); readOnlyDir = new Path(basePath, "readonlyDir");
writableDir = new Path(basePath, "writableDir"); writableDir = new Path(basePath, "writableDir");
writableDirInstructionFile = new Path(basePath, "writableDir.instruction");
readOnlyChild = new Path(readOnlyDir, "child"); readOnlyChild = new Path(readOnlyDir, "child");
noReadDir = new Path(basePath, "noReadDir"); noReadDir = new Path(basePath, "noReadDir");
// the full FS // the full FS
@ -225,8 +232,7 @@ public void setup() throws Exception {
// create the baseline assumed role // create the baseline assumed role
assumedRoleConfig = createAssumedRoleConfig(); assumedRoleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(assumedRoleConfig, bindRolePolicyStatements(assumedRoleConfig, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
new Statement(Effects.Allow) // dest: rwx new Statement(Effects.Allow) // dest: rwx
.addActions(S3_PATH_RW_OPERATIONS) .addActions(S3_PATH_RW_OPERATIONS)
@ -365,13 +371,13 @@ public void testMultiDeleteOptionPropagated() throws Throwable {
public void testRenameParentPathNotWriteable() throws Throwable { public void testRenameParentPathNotWriteable() throws Throwable {
describe("rename with parent paths not writeable; multi=%s", multiDelete); describe("rename with parent paths not writeable; multi=%s", multiDelete);
final Configuration conf = createAssumedRoleConfig(); final Configuration conf = createAssumedRoleConfig();
bindRolePolicyStatements(conf, bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALL_BUCKET_READ_ACCESS,
new Statement(Effects.Allow) new Statement(Effects.Allow)
.addActions(S3_PATH_RW_OPERATIONS) .addActions(S3_PATH_RW_OPERATIONS)
.addResources(directory(readOnlyDir)) .addResources(directory(readOnlyDir))
.addResources(directory(writableDir))); .addResources(directory(writableDir))
.addResources(resource(writableDirInstructionFile, false, false)));
roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf); roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf);
S3AFileSystem fs = getFileSystem(); S3AFileSystem fs = getFileSystem();
@ -733,8 +739,7 @@ public void testRenamePermissionRequirements() throws Throwable {
// s3:DeleteObjectVersion permission, and attempt rename // s3:DeleteObjectVersion permission, and attempt rename
// and then delete. // and then delete.
Configuration roleConfig = createAssumedRoleConfig(); Configuration roleConfig = createAssumedRoleConfig();
bindRolePolicyStatements(roleConfig, bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
STATEMENT_ALLOW_SSE_KMS_RW,
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
new Statement(Effects.Allow) // dest: rwx new Statement(Effects.Allow) // dest: rwx
.addActions(S3_PATH_RW_OPERATIONS) .addActions(S3_PATH_RW_OPERATIONS)

View File

@ -70,6 +70,7 @@ public void testLandsatBucketRequireGuarded() throws Throwable {
@Test @Test
public void testLandsatBucketRequireUnencrypted() throws Throwable { public void testLandsatBucketRequireUnencrypted() throws Throwable {
skipIfClientSideEncryption();
run(BucketInfo.NAME, run(BucketInfo.NAME,
"-" + BucketInfo.ENCRYPTION_FLAG, "none", "-" + BucketInfo.ENCRYPTION_FLAG, "none",
getLandsatCSVFile(getConfiguration())); getLandsatCSVFile(getConfiguration()));
@ -178,8 +179,9 @@ public void testUploadListByAge() throws Throwable {
// least a second old // least a second old
describe("Sleeping 1 second then confirming upload still there"); describe("Sleeping 1 second then confirming upload still there");
Thread.sleep(1000); Thread.sleep(1000);
LambdaTestUtils.eventually(5000, 1000, LambdaTestUtils.eventually(5000, 1000, () -> {
() -> { assertNumUploadsAge(path, 1, 1); }); assertNumUploadsAge(path, 1, 1);
});
// 7. Assert deletion works when age filter matches // 7. Assert deletion works when age filter matches
describe("Doing aged deletion"); describe("Doing aged deletion");
@ -231,8 +233,8 @@ private void assertNumDeleted(S3AFileSystem fs, Path path, int numDeleted)
* search all parts * search all parts
* @throws Exception on failure * @throws Exception on failure
*/ */
private void uploadCommandAssertCount(S3AFileSystem fs, String options[], private void uploadCommandAssertCount(S3AFileSystem fs, String[] options, Path path,
Path path, int numUploads, int ageSeconds) int numUploads, int ageSeconds)
throws Exception { throws Exception {
List<String> allOptions = new ArrayList<>(); List<String> allOptions = new ArrayList<>();
List<String> output = new ArrayList<>(); List<String> output = new ArrayList<>();