HADOOP-18778. Fixes failing tests when CSE is enabled. (#5763)
Contributed By: Ahmar Suhail <ahmarsu@amazon.co.uk>
This commit is contained in:
parent
068d8c7e4d
commit
24f5f708df
@ -213,7 +213,7 @@
|
||||
import static org.apache.hadoop.fs.s3a.S3AUtils.*;
|
||||
import static org.apache.hadoop.fs.s3a.Statistic.*;
|
||||
import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.INITIALIZE_SPAN;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_SSE_KMS_RW;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.STATEMENT_ALLOW_KMS_RW;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.allowS3Operations;
|
||||
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.TokenIssuingPolicy.NoTokensAvailable;
|
||||
import static org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens.hasDelegationTokenBinding;
|
||||
@ -4222,7 +4222,7 @@ public List<RoleModel.Statement> listAWSPolicyRules(
|
||||
// no attempt is made to qualify KMS access; there's no
|
||||
// way to predict read keys, and not worried about granting
|
||||
// too much encryption access.
|
||||
statements.add(STATEMENT_ALLOW_SSE_KMS_RW);
|
||||
statements.add(STATEMENT_ALLOW_KMS_RW);
|
||||
|
||||
return statements;
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ private RolePolicies() {
|
||||
* Statement to allow KMS R/W access access, so full use of
|
||||
* SSE-KMS.
|
||||
*/
|
||||
public static final Statement STATEMENT_ALLOW_SSE_KMS_RW =
|
||||
public static final Statement STATEMENT_ALLOW_KMS_RW =
|
||||
statement(true, KMS_ALL_KEYS, KMS_ALL_OPERATIONS);
|
||||
|
||||
/**
|
||||
|
@ -105,6 +105,7 @@ public synchronized void teardown() throws Exception {
|
||||
@Test
|
||||
public void testCacheFileExistence() throws Throwable {
|
||||
describe("Verify that FS cache files exist on local FS");
|
||||
skipIfClientSideEncryption();
|
||||
|
||||
try (FSDataInputStream in = fs.open(testFile)) {
|
||||
byte[] buffer = new byte[prefetchBlockSize];
|
||||
|
@ -106,6 +106,7 @@ private static int calculateNumBlocks(long largeFileSize, int blockSize) {
|
||||
@Test
|
||||
public void testReadLargeFileFully() throws Throwable {
|
||||
describe("read a large file fully, uses S3ACachingInputStream");
|
||||
skipIfClientSideEncryption();
|
||||
IOStatistics ioStats;
|
||||
createLargeFile();
|
||||
|
||||
@ -139,6 +140,7 @@ public void testReadLargeFileFully() throws Throwable {
|
||||
public void testReadLargeFileFullyLazySeek() throws Throwable {
|
||||
describe("read a large file using readFully(position,buffer,offset,length),"
|
||||
+ " uses S3ACachingInputStream");
|
||||
skipIfClientSideEncryption();
|
||||
IOStatistics ioStats;
|
||||
createLargeFile();
|
||||
|
||||
@ -170,6 +172,7 @@ public void testReadLargeFileFullyLazySeek() throws Throwable {
|
||||
@Test
|
||||
public void testRandomReadLargeFile() throws Throwable {
|
||||
describe("random read on a large file, uses S3ACachingInputStream");
|
||||
skipIfClientSideEncryption();
|
||||
IOStatistics ioStats;
|
||||
createLargeFile();
|
||||
|
||||
|
@ -59,7 +59,7 @@ protected Configuration createConfiguration() {
|
||||
@Test
|
||||
public void testRequesterPaysOptionSuccess() throws Throwable {
|
||||
describe("Test requester pays enabled case by reading last then first byte");
|
||||
|
||||
skipIfClientSideEncryption();
|
||||
Configuration conf = this.createConfiguration();
|
||||
conf.setBoolean(ALLOW_REQUESTER_PAYS, true);
|
||||
// Enable bucket exists check, the first failure point people may encounter
|
||||
|
@ -426,8 +426,7 @@ public void testAssumeRolePoliciesOverrideRolePerms() throws Throwable {
|
||||
bindRolePolicy(conf,
|
||||
policy(
|
||||
statement(false, S3_ALL_BUCKETS, S3_GET_OBJECT_TORRENT),
|
||||
ALLOW_S3_GET_BUCKET_LOCATION,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW));
|
||||
ALLOW_S3_GET_BUCKET_LOCATION, STATEMENT_ALLOW_KMS_RW));
|
||||
Path path = path("testAssumeRoleStillIncludesRolePerms");
|
||||
roleFS = (S3AFileSystem) path.getFileSystem(conf);
|
||||
assertTouchForbidden(roleFS, path);
|
||||
@ -447,8 +446,7 @@ public void testReadOnlyOperations() throws Throwable {
|
||||
bindRolePolicy(conf,
|
||||
policy(
|
||||
statement(false, S3_ALL_BUCKETS, S3_PATH_WRITE_OPERATIONS),
|
||||
STATEMENT_ALL_S3,
|
||||
STATEMENT_ALLOW_SSE_KMS_READ));
|
||||
STATEMENT_ALL_S3, STATEMENT_ALLOW_KMS_RW));
|
||||
Path path = methodPath();
|
||||
roleFS = (S3AFileSystem) path.getFileSystem(conf);
|
||||
// list the root path, expect happy
|
||||
@ -495,8 +493,7 @@ public void testRestrictedWriteSubdir() throws Throwable {
|
||||
Configuration conf = createAssumedRoleConfig();
|
||||
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS, STATEMENT_ALLOW_KMS_RW,
|
||||
new Statement(Effects.Allow)
|
||||
.addActions(S3_ALL_OPERATIONS)
|
||||
.addResources(directory(restrictedDir)));
|
||||
@ -563,8 +560,7 @@ public void testRestrictedCommitActions() throws Throwable {
|
||||
fs.delete(basePath, true);
|
||||
fs.mkdirs(readOnlyDir);
|
||||
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS,
|
||||
new Statement(Effects.Allow)
|
||||
.addActions(S3_PATH_RW_OPERATIONS)
|
||||
@ -714,8 +710,7 @@ public void executePartialDelete(final Configuration conf,
|
||||
S3AFileSystem fs = getFileSystem();
|
||||
fs.delete(destDir, true);
|
||||
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
|
||||
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
|
||||
new Statement(Effects.Deny)
|
||||
.addActions(S3_PATH_WRITE_OPERATIONS)
|
||||
@ -746,8 +741,7 @@ public void testBucketLocationForbidden() throws Throwable {
|
||||
describe("Restrict role to read only");
|
||||
Configuration conf = createAssumedRoleConfig();
|
||||
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
|
||||
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
|
||||
statement(false, S3_ALL_BUCKETS, S3_GET_BUCKET_LOCATION));
|
||||
Path path = methodPath();
|
||||
|
@ -61,8 +61,7 @@ public void setup() throws Exception {
|
||||
restrictedDir = super.path("restricted");
|
||||
Configuration conf = newAssumedRoleConfig(getConfiguration(),
|
||||
getAssumedRoleARN());
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
|
||||
statement(true, S3_ALL_BUCKETS, S3_BUCKET_READ_OPERATIONS),
|
||||
new RoleModel.Statement(RoleModel.Effects.Allow)
|
||||
.addActions(S3_PATH_RW_OPERATIONS)
|
||||
|
@ -260,8 +260,7 @@ public void initNoReadAccess() throws Throwable {
|
||||
// it still has write access, which can be explored in the final
|
||||
// step to delete files and directories.
|
||||
roleConfig = createAssumedRoleConfig();
|
||||
bindRolePolicyStatements(roleConfig,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
|
||||
statement(true, S3_ALL_BUCKETS, S3_ALL_OPERATIONS),
|
||||
new Statement(Effects.Deny)
|
||||
.addActions(S3_ALL_GET)
|
||||
|
@ -56,6 +56,7 @@
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Effects;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleModel.Statement;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleModel.directory;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleModel.resource;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleModel.statement;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RolePolicies.*;
|
||||
import static org.apache.hadoop.fs.s3a.auth.RoleTestUtils.bindRolePolicyStatements;
|
||||
@ -144,6 +145,11 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase {
|
||||
*/
|
||||
private Path writableDir;
|
||||
|
||||
/**
|
||||
* Instruction file created when using CSE, required to be added to policies.
|
||||
*/
|
||||
private Path writableDirInstructionFile;
|
||||
|
||||
/**
|
||||
* A directory to which restricted roles have only read access.
|
||||
*/
|
||||
@ -216,6 +222,7 @@ public void setup() throws Exception {
|
||||
basePath = uniquePath();
|
||||
readOnlyDir = new Path(basePath, "readonlyDir");
|
||||
writableDir = new Path(basePath, "writableDir");
|
||||
writableDirInstructionFile = new Path(basePath, "writableDir.instruction");
|
||||
readOnlyChild = new Path(readOnlyDir, "child");
|
||||
noReadDir = new Path(basePath, "noReadDir");
|
||||
// the full FS
|
||||
@ -225,8 +232,7 @@ public void setup() throws Exception {
|
||||
|
||||
// create the baseline assumed role
|
||||
assumedRoleConfig = createAssumedRoleConfig();
|
||||
bindRolePolicyStatements(assumedRoleConfig,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(assumedRoleConfig, STATEMENT_ALLOW_KMS_RW,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
|
||||
new Statement(Effects.Allow) // dest: rwx
|
||||
.addActions(S3_PATH_RW_OPERATIONS)
|
||||
@ -365,13 +371,13 @@ public void testMultiDeleteOptionPropagated() throws Throwable {
|
||||
public void testRenameParentPathNotWriteable() throws Throwable {
|
||||
describe("rename with parent paths not writeable; multi=%s", multiDelete);
|
||||
final Configuration conf = createAssumedRoleConfig();
|
||||
bindRolePolicyStatements(conf,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(conf, STATEMENT_ALLOW_KMS_RW,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS,
|
||||
new Statement(Effects.Allow)
|
||||
.addActions(S3_PATH_RW_OPERATIONS)
|
||||
.addResources(directory(readOnlyDir))
|
||||
.addResources(directory(writableDir)));
|
||||
.addResources(directory(writableDir))
|
||||
.addResources(resource(writableDirInstructionFile, false, false)));
|
||||
roleFS = (S3AFileSystem) readOnlyDir.getFileSystem(conf);
|
||||
|
||||
S3AFileSystem fs = getFileSystem();
|
||||
@ -733,8 +739,7 @@ public void testRenamePermissionRequirements() throws Throwable {
|
||||
// s3:DeleteObjectVersion permission, and attempt rename
|
||||
// and then delete.
|
||||
Configuration roleConfig = createAssumedRoleConfig();
|
||||
bindRolePolicyStatements(roleConfig,
|
||||
STATEMENT_ALLOW_SSE_KMS_RW,
|
||||
bindRolePolicyStatements(roleConfig, STATEMENT_ALLOW_KMS_RW,
|
||||
STATEMENT_ALL_BUCKET_READ_ACCESS, // root: r-x
|
||||
new Statement(Effects.Allow) // dest: rwx
|
||||
.addActions(S3_PATH_RW_OPERATIONS)
|
||||
|
@ -70,6 +70,7 @@ public void testLandsatBucketRequireGuarded() throws Throwable {
|
||||
|
||||
@Test
|
||||
public void testLandsatBucketRequireUnencrypted() throws Throwable {
|
||||
skipIfClientSideEncryption();
|
||||
run(BucketInfo.NAME,
|
||||
"-" + BucketInfo.ENCRYPTION_FLAG, "none",
|
||||
getLandsatCSVFile(getConfiguration()));
|
||||
@ -178,8 +179,9 @@ public void testUploadListByAge() throws Throwable {
|
||||
// least a second old
|
||||
describe("Sleeping 1 second then confirming upload still there");
|
||||
Thread.sleep(1000);
|
||||
LambdaTestUtils.eventually(5000, 1000,
|
||||
() -> { assertNumUploadsAge(path, 1, 1); });
|
||||
LambdaTestUtils.eventually(5000, 1000, () -> {
|
||||
assertNumUploadsAge(path, 1, 1);
|
||||
});
|
||||
|
||||
// 7. Assert deletion works when age filter matches
|
||||
describe("Doing aged deletion");
|
||||
@ -231,8 +233,8 @@ private void assertNumDeleted(S3AFileSystem fs, Path path, int numDeleted)
|
||||
* search all parts
|
||||
* @throws Exception on failure
|
||||
*/
|
||||
private void uploadCommandAssertCount(S3AFileSystem fs, String options[],
|
||||
Path path, int numUploads, int ageSeconds)
|
||||
private void uploadCommandAssertCount(S3AFileSystem fs, String[] options, Path path,
|
||||
int numUploads, int ageSeconds)
|
||||
throws Exception {
|
||||
List<String> allOptions = new ArrayList<>();
|
||||
List<String> output = new ArrayList<>();
|
||||
|
Loading…
Reference in New Issue
Block a user