HDDS-761. Create S3 subcommand to run S3 related operations.

Closes #579
This commit is contained in:
Vivek Ratnavel Subramanian 2019-03-14 21:16:59 -07:00 committed by Bharat Viswanadham
parent 2627dad333
commit 9001508485
13 changed files with 364 additions and 137 deletions

View File

@ -122,7 +122,7 @@ The above command gives user bilbo read/write permission to the bucket.
The bucket command to provide ozone mapping for s3 bucket (Created via aws cli)
{{< highlight bash >}}
ozone sh bucket path <<s3Bucket>>
ozone s3 path <<s3Bucket>>
{{< /highlight >}}
The above command will print VolumeName and the mapping created for s3Bucket.

View File

@ -94,7 +94,7 @@ Security is not yet implemented, you can *use* any AWS_ACCESS_KEY_ID and AWS_SEC
Note: Ozone has a notion for 'volumes' which is missing from the S3 Rest endpoint. Under the hood S3 bucket names are mapped to Ozone 'volume/bucket' locations (depending on the given authentication information).
To show the storage location of a S3 bucket, use the `ozone sh bucket path <bucketname>` command.
To show the storage location of a S3 bucket, use the `ozone s3 path <bucketname>` command.
```
aws s3api --endpoint-url http://localhost:9878 create-bucket --bucket=bucket1

View File

@ -32,7 +32,7 @@ User should get the kerberos ticket before using this option.
{{< highlight bash >}}
ozone sh s3 getkey
ozone s3 getsecret
{{< /highlight >}}
Prints the AWS_SECRET_ACCESS_KEY and AWS_ACCESS_KEY_ID for the current user.

View File

@ -118,7 +118,7 @@ Download any text file and put it to the `/tmp/alice.txt` first.
kubectl port-forward s3g-0 9878:9878
aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test
aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt
kubectl exec -it scm-0 ozone sh bucket path test
kubectl exec -it scm-0 ozone s3 path test
```
The output of the last command is something like this:
@ -138,13 +138,13 @@ kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount
```
## Execute the job
Execute the following spar-submit command, but change at least the following values:
Execute the following spark-submit command, but change at least the following values:
* the kubernetes master url (you can check your ~/.kube/config to find the actual value)
* the kubernetes namespace (yournamespace in this example)
* serviceAccountName (you can use the _spark_ value if you folllowed the previous steps)
* container.image (in this example this is myrepo/spark-ozone. This is pushed to the registry in the previous steps)
* location of the input file (o3fs://...), use the string which is identified earlier with the `ozone sh bucket path` command
* location of the input file (o3fs://...), use the string which is identified earlier with the `ozone s3 path <bucketname>` command
```
bin/spark-submit \

View File

@ -48,6 +48,7 @@ function hadoop_usage
hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway"
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
hadoop_add_subcommand "sh" client "command line interface for object store operations"
hadoop_add_subcommand "s3" client "command line interface for s3 related operations"
hadoop_add_subcommand "version" client "print the version"
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@ -129,6 +130,10 @@ function ozonecmd_case
HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
;;
s3)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.s3.S3Shell
OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager"
;;
scm)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManager'

View File

@ -37,7 +37,7 @@ Install aws cli
Setup credentials
${hostname}= Execute hostname
Execute kinit -k testuser/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
${result} = Execute ozone sh s3 getsecret
${result} = Execute ozone s3 getsecret
${accessKey} = Get Regexp Matches ${result} (?<=awsAccessKey=).*
${secret} = Get Regexp Matches ${result} (?<=awsSecret=).*
Execute aws configure set default.s3.signature_version s3v4

View File

@ -76,13 +76,11 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@ -1176,67 +1174,6 @@ public void testListKey() throws Exception {
executeWithError(shell, args, "the length should be a positive number");
}
@Test
public void testS3BucketMapping() throws IOException {
String setOmAddress =
"--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
String s3Bucket = "bucket1";
String commandOutput;
createS3Bucket("ozone", s3Bucket);
//WHEN
String[] args =
new String[] {setOmAddress, "bucket",
"path", s3Bucket};
execute(shell, args);
//THEN
commandOutput = out.toString();
String volumeName = client.getOzoneVolumeName(s3Bucket);
assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
volumeName));
assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
s3Bucket + "." + volumeName));
out.reset();
//Trying to get map for an unknown bucket
args = new String[] {setOmAddress, "bucket", "path",
"unknownbucket"};
executeWithError(shell, args, S3_BUCKET_NOT_FOUND);
// No bucket name
args = new String[] {setOmAddress, "bucket", "path"};
executeWithError(shell, args, "Missing required parameter");
// Invalid bucket name
args = new String[] {setOmAddress, "bucket", "path", "/asd/multipleslash"};
executeWithError(shell, args, S3_BUCKET_NOT_FOUND);
}
@Test
public void testS3Secret() throws Exception {
String setOmAddress =
"--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
String output;
String[] args = new String[] {setOmAddress, "s3", "getsecret"};
execute(shell, args);
// Get the first line of output
output = out.toString().split("\n")[0];
assertTrue(output.equals(OZONE_GETS3SECRET_ERROR));
}
private void createS3Bucket(String userName, String s3Bucket) {
try {
client.createS3Bucket("ozone", s3Bucket);
} catch (IOException ex) {
GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
}
}
private OzoneVolume creatVolume() throws OzoneException, IOException {
String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
VolumeArgs volumeArgs = VolumeArgs.newBuilder()

View File

@ -0,0 +1,292 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.ozShell;
import com.google.common.base.Strings;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
import org.apache.hadoop.ozone.web.ozShell.s3.S3Shell;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.IExceptionHandler2;
import picocli.CommandLine.ParameterException;
import picocli.CommandLine.ParseResult;
import picocli.CommandLine.RunLast;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.List;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND;
import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* This test class specified for testing Ozone s3Shell command.
*/
public class TestS3Shell {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3Shell.class);
/**
* Set the timeout for every test.
*/
@Rule
public Timeout testTimeout = new Timeout(300000);
private static String url;
private static File baseDir;
private static OzoneConfiguration conf = null;
private static MiniOzoneCluster cluster = null;
private static ClientProtocol client = null;
private static S3Shell s3Shell = null;
private final ByteArrayOutputStream out = new ByteArrayOutputStream();
private final ByteArrayOutputStream err = new ByteArrayOutputStream();
private static final PrintStream OLD_OUT = System.out;
private static final PrintStream OLD_ERR = System.err;
/**
* Create a MiniOzoneCluster for testing with using distributed Ozone
* handler type.
*
* @throws Exception
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
String path = GenericTestUtils.getTempPath(
TestS3Shell.class.getSimpleName());
baseDir = new File(path);
baseDir.mkdirs();
s3Shell = new S3Shell();
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.build();
conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
conf.setQuietMode(false);
client = new RpcClient(conf);
cluster.waitForClusterToBeReady();
}
/**
* shutdown MiniOzoneCluster.
*/
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
if (baseDir != null) {
FileUtil.fullyDelete(baseDir, true);
}
}
@Before
public void setup() {
System.setOut(new PrintStream(out));
System.setErr(new PrintStream(err));
url = "o3://" + getOmAddress();
}
@After
public void reset() {
// reset stream after each unit test
out.reset();
err.reset();
// restore system streams
System.setOut(OLD_OUT);
System.setErr(OLD_ERR);
}
@Test
public void testS3BucketMapping() throws IOException {
String setOmAddress =
"--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
String s3Bucket = "bucket1";
String commandOutput;
createS3Bucket("ozone", s3Bucket);
// WHEN
String[] args =
new String[] {setOmAddress, "path", s3Bucket};
execute(s3Shell, args);
// THEN
commandOutput = out.toString();
String volumeName = client.getOzoneVolumeName(s3Bucket);
assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
volumeName));
assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
s3Bucket + "." + volumeName));
out.reset();
// Trying to get map for an unknown bucket
args = new String[] {setOmAddress, "path", "unknownbucket"};
executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
// No bucket name
args = new String[] {setOmAddress, "path"};
executeWithError(s3Shell, args, "Missing required parameter");
// Invalid bucket name
args = new String[] {setOmAddress, "path", "/asd/multipleslash"};
executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND);
}
@Test
public void testS3SecretUnsecuredCluster() throws Exception {
String setOmAddress =
"--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
String output;
String[] args = new String[] {setOmAddress, "getsecret"};
execute(s3Shell, args);
// Get the first line of output
output = out.toString().split("\n")[0];
assertTrue(output.equals(OZONE_GETS3SECRET_ERROR));
}
private void createS3Bucket(String userName, String s3Bucket) {
try {
client.createS3Bucket("ozone", s3Bucket);
} catch (IOException ex) {
GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex);
}
}
private void execute(S3Shell shell, String[] args) {
LOG.info("Executing s3Shell command with args {}", Arrays.asList(args));
CommandLine cmd = shell.getCmd();
IExceptionHandler2<List<Object>> exceptionHandler =
new IExceptionHandler2<List<Object>>() {
@Override
public List<Object> handleParseException(ParameterException ex,
String[] args) {
throw ex;
}
@Override
public List<Object> handleExecutionException(ExecutionException ex,
ParseResult parseRes) {
throw ex;
}
};
cmd.parseWithHandlers(new RunLast(),
exceptionHandler, args);
}
/**
* Execute command, assert exception message and returns true if error
* was thrown.
*/
private void executeWithError(S3Shell shell, String[] args,
OMException.ResultCodes code) {
try {
execute(shell, args);
fail("Exception is expected from command execution " + Arrays
.asList(args));
} catch (Exception ex) {
Assert.assertEquals(OMException.class, ex.getCause().getClass());
Assert.assertEquals(code, ((OMException) ex.getCause()).getResult());
}
}
/**
* Execute command, assert exception message and returns true if error
* was thrown.
*/
private void executeWithError(S3Shell shell, String[] args,
String expectedError) {
if (Strings.isNullOrEmpty(expectedError)) {
execute(shell, args);
} else {
try {
execute(shell, args);
fail("Exception is expected from command execution " + Arrays
.asList(args));
} catch (Exception ex) {
if (!Strings.isNullOrEmpty(expectedError)) {
Throwable exceptionToCheck = ex;
if (exceptionToCheck.getCause() != null) {
exceptionToCheck = exceptionToCheck.getCause();
}
Assert.assertTrue(
String.format(
"Error of s3Shell code doesn't contain the " +
"exception [%s] in [%s]",
expectedError, exceptionToCheck.getMessage()),
exceptionToCheck.getMessage().contains(expectedError));
}
}
}
}
private String getOmAddress() {
List<ServiceInfo> services;
try {
services = cluster.getOzoneManager().getServiceList();
} catch (IOException e) {
fail("Could not get service list from OM");
return null;
}
return services.stream()
.filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType()))
.findFirst()
.map(s -> s.getServiceAddress(ServicePort.Type.RPC))
.orElseThrow(IllegalStateException::new);
}
}

View File

@ -24,7 +24,6 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.web.ozShell.bucket.BucketCommands;
import org.apache.hadoop.ozone.web.ozShell.keys.KeyCommands;
import org.apache.hadoop.ozone.web.ozShell.s3.S3Commands;
import org.apache.hadoop.ozone.web.ozShell.token.TokenCommands;
import org.apache.hadoop.ozone.web.ozShell.volume.VolumeCommands;
@ -46,8 +45,7 @@
VolumeCommands.class,
BucketCommands.class,
KeyCommands.class,
TokenCommands.class,
S3Commands.class
TokenCommands.class
},
versionProvider = HddsVersionProvider.class,
mixinStandardHelpOptions = true)

View File

@ -39,8 +39,7 @@
ListBucketHandler.class,
CreateBucketHandler.class,
UpdateBucketHandler.class,
DeleteBucketHandler.class,
S3BucketMapping.class
DeleteBucketHandler.class
},
mixinStandardHelpOptions = true,
versionProvider = HddsVersionProvider.class)

View File

@ -30,7 +30,7 @@
* Executes getsecret calls.
*/
@Command(name = "getsecret",
description = "returns s3 secret for current user")
description = "Returns s3 secret for current user")
public class GetS3SecretHandler extends Handler {
public static final String OZONE_GETS3SECRET_ERROR = "This command is not" +

View File

@ -1,60 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.ozShell.s3;
import org.apache.hadoop.hdds.cli.GenericParentCommand;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.cli.MissingSubcommandException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.web.ozShell.Shell;
import picocli.CommandLine.Command;
import picocli.CommandLine.ParentCommand;
import java.util.concurrent.Callable;
/**
* Subcommand to group s3 related operations.
*/
@Command(name = "s3",
description = "S3 specific operations",
subcommands = {
GetS3SecretHandler.class
},
mixinStandardHelpOptions = true,
versionProvider = HddsVersionProvider.class)
public class S3Commands implements GenericParentCommand, Callable<Void> {
@ParentCommand
private Shell shell;
@Override
public Void call() throws Exception {
throw new MissingSubcommandException(
this.shell.getCmd().getSubcommands().get("s3").getUsageMessage());
}
@Override
public boolean isVerbose() {
return shell.isVerbose();
}
@Override
public OzoneConfiguration createOzoneConfiguration() {
return shell.createOzoneConfiguration();
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.web.ozShell.s3;
import io.opentracing.Scope;
import io.opentracing.util.GlobalTracer;
import org.apache.hadoop.hdds.tracing.TracingUtil;
import org.apache.hadoop.ozone.web.ozShell.Shell;
import org.apache.hadoop.ozone.web.ozShell.bucket.S3BucketMapping;
import picocli.CommandLine.Command;
/**
* Shell for s3 related operations.
*/
@Command(name = "ozone s3",
description = "Shell for S3 specific operations",
subcommands = {
GetS3SecretHandler.class,
S3BucketMapping.class
})
public class S3Shell extends Shell {
@Override
public void execute(String[] argv) {
TracingUtil.initTracing("s3shell");
try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) {
super.execute(argv);
}
}
/**
* Main for the S3Shell Command handling.
*
* @param argv - System Args Strings[]
* @throws Exception
*/
public static void main(String[] argv) throws Exception {
new S3Shell().run(argv);
}
}