diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java index 19dc32a896..22274ee774 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java @@ -479,6 +479,20 @@ public String getUsage() { public int run(String[] args, PrintStream out) throws Exception { List paths = parseArgs(args); Map options = new HashMap<>(); + String s3Path = paths.get(0); + + // Check if DynamoDB url is set from arguments. + String metadataStoreUri = getCommandFormat().getOptValue(META_FLAG); + if(metadataStoreUri == null || metadataStoreUri.isEmpty()) { + // If not set, check if filesystem is guarded by creating an + // S3AFileSystem and check if hasMetadataStore is true + try (S3AFileSystem s3AFileSystem = (S3AFileSystem) + S3AFileSystem.newInstance(toUri(s3Path), getConf())){ + Preconditions.checkState(s3AFileSystem.hasMetadataStore(), + "The S3 bucket is unguarded. " + getName() + + " can not be used on an unguarded bucket."); + } + } String readCap = getCommandFormat().getOptValue(READ_FLAG); if (StringUtils.isNotEmpty(readCap)) { diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java index 96aac15fc0..242f61681e 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java @@ -29,6 +29,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.Set; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; @@ -53,6 +54,7 @@ import org.apache.hadoop.util.StringUtils; import static org.apache.hadoop.fs.s3a.Constants.METADATASTORE_AUTHORITATIVE; +import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_CREATE_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY; import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL; import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL; @@ -296,7 +298,7 @@ public void testPruneCommandConf() throws Exception { } @Test - public void testSetCapacityFailFast() throws Exception{ + public void testSetCapacityFailFastOnReadWriteOfZero() throws Exception{ Configuration conf = getConfiguration(); conf.set(S3GUARD_DDB_TABLE_NAME_KEY, getFileSystem().getBucket()); @@ -312,6 +314,21 @@ public void testSetCapacityFailFast() throws Exception{ S3GuardTool.SetCapacity.WRITE_CAP_INVALID, () -> cmdW.run(argsW)); } + @Test + public void testSetCapacityFailFastIfNotGuarded() throws Exception{ + Configuration conf = getConfiguration(); + conf.set(S3GUARD_DDB_TABLE_NAME_KEY, UUID.randomUUID().toString()); + conf.set(S3GUARD_DDB_TABLE_CREATE_KEY, Boolean.FALSE.toString()); + conf.set(S3_METADATA_STORE_IMPL, S3GUARD_METASTORE_NULL); + + S3GuardTool.SetCapacity cmdR = new S3GuardTool.SetCapacity(conf); + String[] argsR = new String[]{cmdR.getName(), + "s3a://" + getFileSystem().getBucket()}; + + intercept(IllegalStateException.class, "unguarded", + () -> run(argsR)); + } + @Test public void testDestroyNoBucket() throws Throwable { intercept(FileNotFoundException.class,