HDDS-657. Remove {volume} path segments from all the remaining rest endpoints. Contributed by Elek, Marton.

This commit is contained in:
Márton Elek 2018-10-16 15:14:05 +02:00
parent 0bf8a110a5
commit 0c2914e582
38 changed files with 906 additions and 833 deletions

View File

@ -0,0 +1,27 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
## Ozone S3 Gatway Acceptance Tests
Note: the aws cli based acceptance tests can be cross-checked with the original AWS s3 endpoint.
You need to
1. Create a bucket
2. Configure your local aws cli
3. Set bucket/endpointurl during the robot test execution
```
robot -v bucket:ozonetest -v OZONE_S3_SET_CREDENTIALS:false -v ENDPOINT_URL:https://s3.us-east-2.amazonaws.com smoketest/s3
```

View File

@ -0,0 +1,21 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Test Setup Setup s3 tests

View File

@ -18,46 +18,30 @@ Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource ./commonawslib.robot
Suite Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
*** Keywords ***
Execute AWSCli
[Arguments] ${command}
${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL}/${VOLUME} ${command}
[return] ${output}
${BUCKET} generated
*** Test Cases ***
Create volume and bucket for the tests
${postfix} = Generate Random String 5 [NUMBERS]
Set Suite Variable ${BUCKET} bucket-${postfix}
Set Suite Variable ${VOLUME} vol-${postfix}
Log Testing s3 commands in /${VOLUME}/${BUCKET}
${result} = Execute ozone sh volume create /${VOLUME} --user hadoop
${result} = Execute ozone sh bucket create /${VOLUME}/${BUCKET}
Install aws s3 cli
Execute sudo apt-get install -y awscli
Set Environment Variable AWS_ACCESS_KEY_ID ANYID
Set Environment Variable AWS_SECRET_ACCESS_KEY ANYKEY
File upload and directory list
Execute date > /tmp/testfile
${result} = Execute AWSCli cp /tmp/testfile s3://${BUCKET}
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}
Should contain ${result} upload
${result} = Execute AWSCli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file
Should contain ${result} upload
${result} = Execute AWSCli ls s3://${BUCKET}
${result} = Execute AWSS3Cli ls s3://${BUCKET}
Should contain ${result} testfile
Should contain ${result} dir1
Should not contain ${result} dir2
${result} = Execute AWSCli ls s3://${BUCKET}/dir1/
${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/
Should not contain ${result} testfile
Should not contain ${result} dir1
Should contain ${result} dir2
${result} = Execute AWSCli ls s3://${BUCKET}/dir1/dir2/
${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/dir2/
Should not contain ${result} testfile
Should not contain ${result} dir1
Should contain ${result} file

View File

@ -0,0 +1,34 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource commonawslib.robot
Test Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${BUCKET} generated
*** Test Cases ***
Head Bucket not existent
${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
${result} = Execute AWSS3APICli and checkrc head-bucket --bucket ozonenosuchbucketqqweqwe 255
Should contain ${result} Bad Request
Should contain ${result} 400

View File

@ -0,0 +1,32 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource commonawslib.robot
Test Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${BUCKET} generated
*** Test Cases ***
List buckets
${result} = Execute AWSS3APICli list-buckets | jq -r '.Buckets[].Name'
Should contain ${result} ${BUCKET}

View File

@ -1,66 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli for bucket operations
Library String
Library OperatingSystem
Resource commonawslib.robot
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${OZONE_TEST} true
${BUCKET} generated
${NONEXIST-BUCKET} generated1
*** Keywords ***
Install aws s3 cli
Execute sudo apt-get install -y awscli
Set Environment Variable AWS_ACCESS_KEY_ID default
Set Environment Variable AWS_SECRET_ACCESS_KEY defaultsecret
${postfix1} = Generate Random String 5 [NUMBERS]
Set Suite Variable ${BUCKET} bucket-${postfix1}
Check Volume
# as we know bucket to volume map. Volume name bucket mapped is s3 + AWS_ACCESS_KEY_ID
${result} = Execute ozone sh volume info /s3default
Should contain ${result} s3default
Should not contain ${result} VOLUME_NOT_FOUND
*** Test Cases ***
Setup s3 Tests
Run Keyword if '${OZONE_TEST}' == 'true' Install aws s3 cli
Create Bucket
${result} = Execute AWSS3APICli create-bucket --bucket ${BUCKET}
Should contain ${result} ${BUCKET}
Should contain ${result} Location
# create an already existing bucket
${result} = Execute AWSS3APICli create-bucket --bucket ${BUCKET}
Should contain ${result} ${BUCKET}
Should contain ${result} Location
Run Keyword if '${OZONE_TEST}' == 'true' Check Volume
Head Bucket
${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
${result} = Execute AWSS3APICli and checkrc head-bucket --bucket ${NONEXIST-BUCKET} 255
Should contain ${result} Not Found
Should contain ${result} 404
Delete Bucket
${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
${result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${NONEXIST-BUCKET} 255
Should contain ${result} NoSuchBucket

View File

@ -1,71 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli for bucket operations
Library String
Library OperatingSystem
Resource commonawslib.robot
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${OZONE_TEST} true
${BUCKET} generated
${NONEXIST-BUCKET} generated1
*** Keywords ***
Install aws s3 cli
Execute sudo apt-get install -y awscli
Remove Environment Variable AWS_ACCESS_KEY_ID
Remove Environment Variable AWS_SECRET_ACCESS_KEY
Execute aws configure set default.s3.signature_version s3v4
Execute aws configure set aws_access_key_id default1
Execute aws configure set aws_secret_access_key defaultsecret
Execute aws configure set region us-west-1
${postfix1} = Generate Random String 5 [NUMBERS]
Set Suite Variable ${BUCKET} bucket-${postfix1}
Check Volume
# as we know bucket to volume map. Volume name bucket mapped is s3 + AWS_ACCESS_KEY_ID
${result} = Execute ozone sh volume info /s3default1
Should contain ${result} s3default1
Should not contain ${result} VOLUME_NOT_FOUND
*** Test Cases ***
Setup s3 Tests
Run Keyword if '${OZONE_TEST}' == 'true' Install aws s3 cli
Create Bucket
${result} = Execute AWSS3APICli create-bucket --bucket ${BUCKET}
Should contain ${result} ${BUCKET}
Should contain ${result} Location
# create an already existing bucket
${result} = Execute AWSS3APICli create-bucket --bucket ${BUCKET}
Should contain ${result} ${BUCKET}
Should contain ${result} Location
Run Keyword if '${OZONE_TEST}' == 'true' Check Volume
Head Bucket
${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
${result} = Execute AWSS3APICli and checkrc head-bucket --bucket ${NONEXIST-BUCKET} 255
Should contain ${result} Not Found
Should contain ${result} 404
Delete Bucket
${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET}
${result} = Execute AWSS3APICli and checkrc delete-bucket --bucket ${NONEXIST-BUCKET} 255
Should contain ${result} NoSuchBucket

View File

@ -16,6 +16,10 @@
*** Settings ***
Resource ../commonlib.robot
*** Variables ***
${OZONE_S3_HEADER_VERSION} v2
${OZONE_S3_SET_CREDENTIALS} true
*** Keywords ***
Execute AWSS3APICli
[Arguments] ${command}
@ -23,6 +27,41 @@ Execute AWSS3APICli
[return] ${output}
Execute AWSS3APICli and checkrc
[Arguments] ${command} ${expected_error_code}
[Arguments] ${command} ${expected_error_code}
${output} = Execute and checkrc aws s3api --endpoint-url ${ENDPOINT_URL} ${command} ${expected_error_code}
[return] ${output}
[return] ${output}
Execute AWSS3Cli
[Arguments] ${command}
${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL} ${command}
[return] ${output}
Install aws cli
${rc} ${output} = Run And Return Rc And Output which apt-get
Run Keyword if '${rc}' == '0' Install aws cli s3 debian
Install aws cli s3 debian
Execute sudo apt-get install -y awscli
Setup v2 headers
Set Environment Variable AWS_ACCESS_KEY_ID ANYID
Set Environment Variable AWS_SECRET_ACCESS_KEY ANYKEY
Setup v4 headers
Execute aws configure set default.s3.signature_version s3v4
Execute aws configure set aws_access_key_id default1
Execute aws configure set aws_secret_access_key defaultsecret
Execute aws configure set region us-west-1
Create bucket
${postfix} = Generate Random String 5 [NUMBERS]
Set Suite Variable ${BUCKET} bucket-${postfix}
Execute AWSS3APICli create-bucket --bucket ${BUCKET}
Setup credentials
Run Keyword if '${OZONE_S3_HEADER_VERSION}' == 'v4' Setup v4 headers
Run Keyword if '${OZONE_S3_HEADER_VERSION}' != 'v4' Setup v2 headers
Setup s3 tests
Run Keyword Install aws cli
Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup credentials
Run Keyword if '${BUCKET}' == 'generated' Create bucket

View File

@ -0,0 +1,72 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource commonawslib.robot
Test Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${BUCKET} generated
*** Test Cases ***
Delete file with s3api
Execute date > /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix deletetestapi/
Should contain ${result} f1
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapi/f1
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix deletetestapi/
Should not contain ${result} f1
#In case of HTTP 500, the error code is printed out to the console.
Should not contain ${result} 500
Delete file with s3api, file doesn't exist
${result} = Execute AWSS3Cli ls s3://${BUCKET}/
Should not contain ${result} thereisnosuchfile
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key thereisnosuchfile
${result} = Execute AWSS3Cli ls s3://${BUCKET}/
Should not contain ${result} thereisnosuchfile
Delete dir with s3api
Execute date > /tmp/testfile
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapidir/
Should contain ${result} f1
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapidir/
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapidir/
Should contain ${result} f1
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapidir/f1
Delete file with s3api, file doesn't exist, prefix of a real file
Execute date > /tmp/testfile
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapiprefix/
Should contain ${result} filefile
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapiprefix/file
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapiprefix/
Should contain ${result} filefile
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapiprefix/filefile
Delete file with s3api, bucket doesn't exist
${result} = Execute AWSS3APICli and checkrc delete-object --bucket ${BUCKET}-nosuchbucket --key f1 255
Should contain ${result} NoSuchBucket

View File

@ -0,0 +1,42 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*** Settings ***
Documentation S3 gateway test with aws cli
Library OperatingSystem
Library String
Resource ../commonlib.robot
Resource commonawslib.robot
Test Setup Setup s3 tests
*** Variables ***
${ENDPOINT_URL} http://s3g:9878
${OZONE_TEST} true
${BUCKET} generated
*** Test Cases ***
Put object to s3
Execute date > /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix putobject/
Should contain ${result} f1
#This test depends on the previous test case. Can't be executes alone
Get object from s3
${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 /tmp/testfile.result
${checksumbefore} = Execute md5sum /tmp/testfile | awk '{print $1}'
${checksumafter} = Execute md5sum /tmp/testfile.result | awk '{print $1}'
Should Be Equal ${checksumbefore} ${checksumafter}

View File

@ -43,9 +43,10 @@ execute_tests(){
for TEST in "${TESTS[@]}"; do
TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
set +e
docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$COMPOSE_DIR-${TEST//\//_/}.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
OUTPUT_NAME="$COMPOSE_DIR-${TEST//\//_}"
docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot --log NONE --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "smoketest/$RESULT_DIR/robot-$OUTPUT_NAME.xml" --logtitle "$TITLE" --reporttitle "$TITLE" "smoketest/$TEST"
set -e
docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$COMPOSE_DIR-${TEST//\//_/}.log"
docker-compose -f "$COMPOSE_FILE" logs > "$DIR/$RESULT_DIR/docker-$OUTPUT_NAME.log"
done
if [ "$KEEP_RUNNING" = false ]; then
docker-compose -f "$COMPOSE_FILE" down

View File

@ -1,67 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.bucket;
import javax.ws.rs.DELETE;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.http.HttpStatus;
/**
* Delete a bucket.
*/
@Path("/{bucket}")
public class DeleteBucket extends EndpointBase {
@DELETE
@Produces(MediaType.APPLICATION_XML)
public Response delete(@PathParam("bucket") String bucketName)
throws IOException, OS3Exception {
try {
deleteS3Bucket(bucketName);
} catch (IOException ex) {
if (ex.getMessage().contains("BUCKET_NOT_EMPTY")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.BUCKET_NOT_EMPTY, S3ErrorTable.Resource.BUCKET);
throw os3Exception;
} else if (ex.getMessage().contains("BUCKET_NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_BUCKET, S3ErrorTable.Resource.BUCKET);
throw os3Exception;
} else {
throw ex;
}
}
return Response
.status(HttpStatus.SC_NO_CONTENT)
.build();
}
}

View File

@ -1,61 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.bucket;
import javax.ws.rs.HEAD;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Finds the bucket exists or not.
*/
@Path("/{bucket}")
public class HeadBucket extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(HeadBucket.class);
@HEAD
public Response head(@PathParam("bucket") String bucketName)
throws Exception {
try {
getVolume(getOzoneVolumeName(bucketName)).getBucket(bucketName);
} catch (IOException ex) {
LOG.error("Exception occurred in headBucket", ex);
if (ex.getMessage().contains("NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_BUCKET, Resource.BUCKET);
throw os3Exception;
} else {
throw ex;
}
}
return Response.ok().build();
}
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.bucket;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2;
import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Create new bucket.
*/
@Path("/{bucket}")
public class PutBucket extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(PutBucket.class);
@PUT
public Response put(@PathParam("bucket") String bucketName, @Context
HttpHeaders httpHeaders) throws IOException, OS3Exception {
String auth = httpHeaders.getHeaderString("Authorization");
LOG.info("Auth header string {}", auth);
if (auth == null) {
throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, S3ErrorTable
.Resource.HEADER);
}
String userName;
if (auth.startsWith("AWS4")) {
LOG.info("V4 Header {}", auth);
AuthorizationHeaderV4 authorizationHeader = new AuthorizationHeaderV4(
auth);
userName = authorizationHeader.getAccessKeyID().toLowerCase();
} else {
LOG.info("V2 Header {}", auth);
AuthorizationHeaderV2 authorizationHeader = new AuthorizationHeaderV2(
auth);
userName = authorizationHeader.getAccessKeyID().toLowerCase();
}
String location = createS3Bucket(userName, bucketName);
LOG.info("Location is {}", location);
return Response.status(HttpStatus.SC_OK).header("Location", location)
.build();
}
}

View File

@ -1,30 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Rest endpoint implementation for the bucket specific methods.
*/
@javax.xml.bind.annotation.XmlSchema(
namespace = "http://s3.amazonaws"
+ ".com/doc/2006-03-01/", elementFormDefault =
javax.xml.bind.annotation.XmlNsForm.QUALIFIED,
xmlns = {
@javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws"
+ ".com/doc/2006-03-01/", prefix = "")})
package org.apache.hadoop.ozone.s3.bucket;

View File

@ -31,7 +31,7 @@ public class IsoDateAdapter extends XmlAdapter<String, Instant> {
public IsoDateAdapter() {
iso8861Formatter =
DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mmX")
DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX")
.withZone(ZoneOffset.UTC);
}

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -6,50 +6,61 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.HEAD;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import java.io.IOException;
import java.time.Instant;
import java.util.Iterator;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* List Object Rest endpoint.
* Bucket level rest endpoints.
*/
@Path("/{volume}/{bucket}")
public class ListObject extends EndpointBase {
@Path("/{bucket}")
public class BucketEndpoint extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(BucketEndpoint.class);
/**
* Rest endpoint to list objects in a specific bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
* for more details.
*/
@GET
@Produces(MediaType.APPLICATION_XML)
public ListObjectResponse get(
@PathParam("volume") String volumeName,
public ListObjectResponse list(
@PathParam("bucket") String bucketName,
@QueryParam("delimiter") String delimiter,
@QueryParam("encoding-type") String encodingType,
@ -65,8 +76,7 @@ public ListObjectResponse get(
prefix = "";
}
OzoneVolume volume = getVolume(volumeName);
OzoneBucket bucket = getBucket(volume, bucketName);
OzoneBucket bucket = getBucket(bucketName);
Iterator<? extends OzoneKey> ozoneKeyIterator = bucket.listKeys(prefix);
@ -113,7 +123,77 @@ public ListObjectResponse get(
response.addKey(keyMetadata);
}
}
response.setKeyCount(
response.getCommonPrefixes().size() + response.getContents().size());
return response;
}
@PUT
public Response put(@PathParam("bucket") String bucketName, @Context
HttpHeaders httpHeaders) throws IOException, OS3Exception {
String userName = parseUsername(httpHeaders);
String location = createS3Bucket(userName, bucketName);
LOG.info("Location is {}", location);
return Response.status(HttpStatus.SC_OK).header("Location", location)
.build();
}
/**
* Rest endpoint to check the existence of a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html
* for more details.
*/
@HEAD
public Response head(@PathParam("bucket") String bucketName)
throws OS3Exception, IOException {
try {
getBucket(bucketName);
} catch (OS3Exception ex) {
LOG.error("Exception occurred in headBucket", ex);
//TODO: use a subclass fo OS3Exception and catch it here.
if (ex.getCode().contains("NoSuchBucket")) {
return Response.status(Status.BAD_REQUEST).build();
} else {
throw ex;
}
}
return Response.ok().build();
}
/**
* Rest endpoint to delete specific bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETE.html
* for more details.
*/
@DELETE
public Response delete(@PathParam("bucket") String bucketName)
throws IOException, OS3Exception {
try {
deleteS3Bucket(bucketName);
} catch (IOException ex) {
if (ex.getMessage().contains("BUCKET_NOT_EMPTY")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.BUCKET_NOT_EMPTY, S3ErrorTable.Resource.BUCKET);
throw os3Exception;
} else if (ex.getMessage().contains("BUCKET_NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_BUCKET, S3ErrorTable.Resource.BUCKET);
throw os3Exception;
} else {
throw ex;
}
}
return Response
.status(HttpStatus.SC_NO_CONTENT)
.build();
}
}

View File

@ -15,10 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.inject.Inject;
import javax.ws.rs.NotFoundException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import org.apache.hadoop.ozone.client.OzoneBucket;
@ -27,6 +29,8 @@
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable.Resource;
import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV2;
import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
@ -39,6 +43,7 @@ public class EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(EndpointBase.class);
@Inject
private OzoneClient client;
@ -65,6 +70,25 @@ protected OzoneBucket getBucket(OzoneVolume volume, String bucketName)
return bucket;
}
protected OzoneBucket getBucket(String bucketName)
throws OS3Exception, IOException {
OzoneBucket bucket;
try {
OzoneVolume volume = getVolume(getOzoneVolumeName(bucketName));
bucket = volume.getBucket(bucketName);
} catch (IOException ex) {
LOG.error("Error occurred is {}", ex);
if (ex.getMessage().contains("NOT_FOUND")) {
OS3Exception oex =
S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, Resource.BUCKET);
throw oex;
} else {
throw ex;
}
}
return bucket;
}
protected OzoneVolume getVolume(String volumeName) throws IOException {
OzoneVolume volume = null;
try {
@ -149,6 +173,37 @@ public String getOzoneBucketName(String s3BucketName) throws IOException {
return client.getObjectStore().getOzoneBucketName(s3BucketName);
}
/**
* Retrieve the username based on the authorization header.
*
* @param httpHeaders
* @return Identified username
* @throws OS3Exception
*/
public String parseUsername(
@Context HttpHeaders httpHeaders) throws OS3Exception {
String auth = httpHeaders.getHeaderString("Authorization");
LOG.info("Auth header string {}", auth);
if (auth == null) {
throw S3ErrorTable
.newError(S3ErrorTable.MALFORMED_HEADER, Resource.HEADER);
}
String userName;
if (auth.startsWith("AWS4")) {
LOG.info("V4 Header {}", auth);
AuthorizationHeaderV4 authorizationHeader = new AuthorizationHeaderV4(
auth);
userName = authorizationHeader.getAccessKeyID().toLowerCase();
} else {
LOG.info("V2 Header {}", auth);
AuthorizationHeaderV2 authorizationHeader = new AuthorizationHeaderV2(
auth);
userName = authorizationHeader.getAccessKeyID().toLowerCase();
}
return userName;
}
@VisibleForTesting
public void setClient(OzoneClient ozoneClient) {

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;

View File

@ -16,7 +16,7 @@
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
@ -48,6 +48,9 @@ public class ListObjectResponse {
@XmlElement(name = "MaxKeys")
private int maxKeys;
@XmlElement(name = "KeyCount")
private int keyCount;
@XmlElement(name = "Delimiter")
private String delimiter = "/";
@ -144,4 +147,12 @@ public void addKey(KeyMetadata keyMetadata) {
public void addPrefix(String relativeKeyName) {
commonPrefixes.add(new CommonPrefix(relativeKeyName));
}
public int getKeyCount() {
return keyCount;
}
public void setKeyCount(int keyCount) {
this.keyCount = keyCount;
}
}

View File

@ -0,0 +1,222 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.HEAD;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.StreamingOutput;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Key level rest endpoints.
*/
@Path("/{bucket}/{path:.+}")
public class ObjectEndpoint extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(ObjectEndpoint.class);
private List<String> customizableGetHeaders = new ArrayList<>();
public ObjectEndpoint() {
customizableGetHeaders.add("Content-Type");
customizableGetHeaders.add("Content-Language");
customizableGetHeaders.add("Expires");
customizableGetHeaders.add("Cache-Control");
customizableGetHeaders.add("Content-Disposition");
customizableGetHeaders.add("Content-Encoding");
}
/**
* Rest endpoint to upload object to a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
* more details.
*/
@PUT
public Response put(
@Context HttpHeaders headers,
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath,
@DefaultValue("STAND_ALONE") @QueryParam("replicationType")
ReplicationType replicationType,
@DefaultValue("ONE") @QueryParam("replicationFactor")
ReplicationFactor replicationFactor,
@DefaultValue("32 * 1024 * 1024") @QueryParam("chunkSize")
String chunkSize,
@HeaderParam("Content-Length") long length,
InputStream body) throws IOException, OS3Exception {
try {
Configuration config = new OzoneConfiguration();
config.set(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
OzoneBucket bucket = getBucket(bucketName);
OzoneOutputStream output = bucket
.createKey(keyPath, length, replicationType, replicationFactor);
IOUtils.copy(body, output);
output.close();
return Response.ok().status(HttpStatus.SC_OK)
.build();
} catch (IOException ex) {
LOG.error("Exception occurred in PutObject", ex);
throw ex;
}
}
/**
* Rest endpoint to download object from a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html for
* more details.
*/
@GET
public Response get(
@Context HttpHeaders headers,
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath,
InputStream body) throws IOException, OS3Exception {
try {
OzoneBucket bucket = getBucket(bucketName);
OzoneInputStream key = bucket
.readKey(keyPath);
StreamingOutput output = dest -> IOUtils.copy(key, dest);
ResponseBuilder responseBuilder = Response.ok(output);
for (String responseHeader : customizableGetHeaders) {
String headerValue = headers.getHeaderString(responseHeader);
if (headerValue != null) {
responseBuilder.header(responseHeader, headerValue);
}
}
return responseBuilder.build();
} catch (IOException ex) {
if (ex.getMessage().contains("NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_OBJECT, S3ErrorTable.Resource.OBJECT);
throw os3Exception;
} else {
throw ex;
}
}
}
/**
* Rest endpoint to check existence of an object in a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
* for more details.
*/
@HEAD
public Response head(
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath) throws Exception {
OzoneKeyDetails key;
try {
key = getBucket(bucketName).getKey(keyPath);
// TODO: return the specified range bytes of this object.
} catch (IOException ex) {
LOG.error("Exception occurred in HeadObject", ex);
if (ex.getMessage().contains("KEY_NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_OBJECT, S3ErrorTable.Resource.OBJECT);
throw os3Exception;
} else {
throw ex;
}
}
return Response.ok().status(HttpStatus.SC_OK)
.header("Last-Modified", key.getModificationTime())
.header("ETag", "" + key.getModificationTime())
.header("Content-Length", key.getDataSize())
.header("Content-Type", "binary/octet-stream")
.build();
}
/**
* Delete a specific object from a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
* for more details.
*/
@DELETE
public Response delete(
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath) throws IOException, OS3Exception {
try {
OzoneBucket bucket = getBucket(bucketName);
bucket.getKey(keyPath);
bucket.deleteKey(keyPath);
} catch (IOException ex) {
if (ex.getMessage().contains("BUCKET_NOT_FOUND")) {
throw S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_BUCKET, S3ErrorTable.Resource.BUCKET);
} else if (!ex.getMessage().contains("NOT_FOUND")) {
throw ex;
}
//NOT_FOUND is not a problem, AWS doesn't throw exception for missing
// keys. Just return 204.
}
return Response
.status(Status.NO_CONTENT)
.build();
}
}

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -6,61 +6,69 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.GET;
import javax.ws.rs.NotFoundException;
import javax.ws.rs.Path;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import java.io.IOException;
import java.time.Instant;
import java.util.Iterator;
/**
* List Object Rest endpoint.
*/
@Path("/{volume}")
public class ListBucket extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(ListBucket.class);
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Top level rest endpoint.
*/
@Path("/")
public class RootEndpoint extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(RootEndpoint.class);
/**
* Rest endpoint to list all the buckets of the current user.
*
* See https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
* for more details.
*/
@GET
@Produces(MediaType.APPLICATION_XML)
public ListBucketResponse get(@PathParam("volume") String volumeName)
public ListBucketResponse get(@Context HttpHeaders headers)
throws OS3Exception, IOException {
OzoneVolume volume;
ListBucketResponse response = new ListBucketResponse();
String volumeName = "s3" + parseUsername(headers).toLowerCase();
try {
//TODO: we need a specific s3bucketlist endpoint instead
// of reimplement the naming convention here
volume = getVolume(volumeName);
} catch (NotFoundException ex) {
LOG.error("Exception occurred in ListBucket: volume {} not found.",
volumeName, ex);
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_VOLUME, S3ErrorTable.Resource.VOLUME);
throw os3Exception;
return response;
} catch (IOException e) {
throw e;
}
Iterator<? extends OzoneBucket> volABucketIter = volume.listBuckets(null);
ListBucketResponse response = new ListBucketResponse();
while(volABucketIter.hasNext()) {
while (volABucketIter.hasNext()) {
OzoneBucket next = volABucketIter.next();
BucketMetadata bucketMetadata = new BucketMetadata();
bucketMetadata.setName(next.getName());

View File

@ -17,7 +17,7 @@
*/
/**
* Rest endpoint implementation for the Object specific methods.
* Rest endpoint implementation for the s3 gateway.
*/
@javax.xml.bind.annotation.XmlSchema(
namespace = "http://s3.amazonaws"
@ -26,4 +26,5 @@
xmlns = {
@javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws"
+ ".com/doc/2006-03-01/", prefix = "")})
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;

View File

@ -1,51 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.object;
import javax.ws.rs.DELETE;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.s3.EndpointBase;
/**
* Delete Object rest endpoint.
*/
@Path("/{volume}/{bucket}/{path:.+}")
public class DeleteObject extends EndpointBase {
@DELETE
@Produces(MediaType.APPLICATION_XML)
public Response delete(
@PathParam("volume") String volumeName,
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath) throws IOException {
OzoneBucket bucket = getBucket(volumeName, bucketName);
bucket.deleteKey(keyPath);
return Response.
ok()
.build();
}
}

View File

@ -1,74 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.object;
import javax.ws.rs.HEAD;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Get object info rest endpoint.
*/
@Path("/{volume}/{bucket}/{path:.+}")
public class HeadObject extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(HeadObject.class);
@HEAD
@Produces(MediaType.APPLICATION_XML)
public Response head(
@PathParam("volume") String volumeName,
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath) throws Exception {
OzoneKeyDetails key;
try {
key = getVolume(volumeName).getBucket(bucketName).getKey(keyPath);
// TODO: return the specified range bytes of this object.
} catch (IOException ex) {
LOG.error("Exception occurred in HeadObject", ex);
if (ex.getMessage().contains("KEY_NOT_FOUND")) {
OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
.NO_SUCH_OBJECT, S3ErrorTable.Resource.OBJECT);
throw os3Exception;
} else {
throw ex;
}
}
return Response.ok().status(HttpStatus.SC_OK)
.header("Last-Modified", key.getModificationTime())
.header("ETag", "" + key.getModificationTime())
.header("Content-Length", key.getDataSize())
.header("Content-Type", "binary/octet-stream")
.build();
}
}

View File

@ -1,92 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.object;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.HeaderParam;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.InputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.s3.EndpointBase;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* File upload.
*/
@Path("/{bucket}/{path:.+}")
public class PutObject extends EndpointBase {
private static final Logger LOG =
LoggerFactory.getLogger(PutObject.class);
@PUT
@Produces(MediaType.APPLICATION_XML)
public Response put(
@Context HttpHeaders headers,
@PathParam("bucket") String bucketName,
@PathParam("path") String keyPath,
@DefaultValue("STAND_ALONE" ) @QueryParam("replicationType")
ReplicationType replicationType,
@DefaultValue("ONE") @QueryParam("replicationFactor")
ReplicationFactor replicationFactor,
@DefaultValue("32 * 1024 * 1024") @QueryParam("chunkSize")
String chunkSize,
@HeaderParam("Content-Length") long length,
InputStream body) throws IOException {
try {
Configuration config = new OzoneConfiguration();
config.set(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, chunkSize);
OzoneBucket bucket = getVolume(getOzoneVolumeName(bucketName))
.getBucket(bucketName);
OzoneOutputStream output = bucket
.createKey(keyPath, length, replicationType, replicationFactor);
IOUtils.copy(body, output);
output.close();
return Response.ok().status(HttpStatus.SC_OK)
.header("Content-Length", length)
.build();
} catch (IOException ex) {
LOG.error("Exception occurred in PutObject", ex);
throw ex;
}
}
}

View File

@ -82,7 +82,8 @@ public Iterator<? extends OzoneBucket> listBuckets(String bucketPrefix) {
return bucket.getName().startsWith(bucketPrefix);
} else {
return true;
}})
}
})
.collect(Collectors.toList())
.iterator();
}

View File

@ -1,21 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Unit tests for the bucket related rest endpoints.
*/
package org.apache.hadoop.ozone.s3.bucket;

View File

@ -18,30 +18,31 @@
*
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.ObjectStoreStub;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.http.HttpStatus;
import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.core.Response;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.junit.Before;
import org.junit.Test;
/**
* This class tests delete bucket functionality.
*/
public class TestDeleteBucket {
public class TestBucketDelete {
private String bucketName = "myBucket";
private OzoneClientStub clientStub;
private ObjectStore objectStoreStub;
private DeleteBucket deleteBucket;
private BucketEndpoint bucketEndpoint;
@Before
public void setup() throws Exception {
@ -53,15 +54,15 @@ public void setup() throws Exception {
objectStoreStub.createS3Bucket("ozone", bucketName);
// Create HeadBucket and setClient to OzoneClientStub
deleteBucket = new DeleteBucket();
deleteBucket.setClient(clientStub);
bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(clientStub);
}
@Test
public void testDeleteBucket() throws Exception {
Response response = deleteBucket.delete(bucketName);
public void testBucketEndpoint() throws Exception {
Response response = bucketEndpoint.delete(bucketName);
assertEquals(response.getStatus(), HttpStatus.SC_NO_CONTENT);
}
@ -69,7 +70,7 @@ public void testDeleteBucket() throws Exception {
@Test
public void testDeleteWithNoSuchBucket() throws Exception {
try {
deleteBucket.delete("unknownbucket");
bucketEndpoint.delete("unknownbucket");
} catch (OS3Exception ex) {
assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), ex.getCode());
assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(),
@ -87,7 +88,7 @@ public void testDeleteWithBucketNotEmpty() throws Exception {
objectStoreStub.createS3Bucket("ozone1", bucket);
ObjectStoreStub stub = (ObjectStoreStub) objectStoreStub;
stub.setBucketEmptyStatus(bucket, false);
deleteBucket.delete(bucket);
bucketEndpoint.delete(bucket);
} catch (OS3Exception ex) {
assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getCode(), ex.getCode());
assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getErrorMessage(),

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -17,7 +17,7 @@
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import java.io.IOException;
@ -25,8 +25,6 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.object.ListObject;
import org.apache.hadoop.ozone.s3.object.ListObjectResponse;
import org.junit.Assert;
import org.junit.Test;
@ -34,19 +32,19 @@
/**
* Testing basic object list browsing.
*/
public class TestGetBucket {
public class TestBucketGet {
@Test
public void listRoot() throws OS3Exception, IOException {
ListObject getBucket = new ListObject();
BucketEndpoint getBucket = new BucketEndpoint();
OzoneClient client = createClientWithKeys("file1", "dir1/file2");
getBucket.setClient(client);
ListObjectResponse getBucketResponse =
getBucket.get("vol1", "b1", "/", null, null, 100, "", null);
getBucket.list("b1", "/", null, null, 100, "", null);
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
Assert.assertEquals("dir1/",
@ -61,14 +59,14 @@ public void listRoot() throws OS3Exception, IOException {
@Test
public void listDir() throws OS3Exception, IOException {
ListObject getBucket = new ListObject();
BucketEndpoint getBucket = new BucketEndpoint();
OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2");
getBucket.setClient(client);
ListObjectResponse getBucketResponse =
getBucket.get("vol1", "b1", "/", null, null, 100, "dir1", null);
getBucket.list("b1", "/", null, null, 100, "dir1", null);
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
Assert.assertEquals("dir1/",
@ -81,14 +79,15 @@ public void listDir() throws OS3Exception, IOException {
@Test
public void listSubDir() throws OS3Exception, IOException {
ListObject getBucket = new ListObject();
BucketEndpoint getBucket = new BucketEndpoint();
OzoneClient ozoneClient =
createClientWithKeys("dir1/file2", "dir1/dir2/file2");
getBucket.setClient(ozoneClient);
ListObjectResponse getBucketResponse =
getBucket.get("vol1", "b1", "/", null, null, 100, "dir1/", null);
getBucket.list("b1", "/", null, null, 100, "dir1/", null);
Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
Assert.assertEquals("dir1/dir2/",
@ -102,10 +101,12 @@ public void listSubDir() throws OS3Exception, IOException {
private OzoneClient createClientWithKeys(String... keys) throws IOException {
OzoneClient client = new OzoneClientStub();
client.getObjectStore().createVolume("vol1");
client.getObjectStore().getVolume("vol1").createBucket("b1");
client.getObjectStore().createS3Bucket("bilbo", "b1");
String volume = client.getObjectStore().getOzoneVolumeName("b1");
client.getObjectStore().getVolume(volume).createBucket("b1");
OzoneBucket bucket =
client.getObjectStore().getVolume("vol1").getBucket("b1");
client.getObjectStore().getVolume(volume).getBucket("b1");
for (String key : keys) {
bucket.createKey(key, 0).close();
}

View File

@ -18,30 +18,28 @@
*
*/
package org.apache.hadoop.ozone.s3.bucket;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.junit.Before;
import org.junit.Test;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.junit.Before;
import org.junit.Test;
/**
* This class test HeadBucket functionality.
*/
public class TestHeadBucket {
public class TestBucketHead {
private String bucketName = "myBucket";
private String userName = "ozone";
private OzoneClientStub clientStub;
private ObjectStore objectStoreStub;
private HeadBucket headBucket;
private BucketEndpoint bucketEndpoint;
@Before
public void setup() throws Exception {
@ -53,33 +51,21 @@ public void setup() throws Exception {
objectStoreStub.createS3Bucket(userName, bucketName);
// Create HeadBucket and setClient to OzoneClientStub
headBucket = new HeadBucket();
headBucket.setClient(clientStub);
bucketEndpoint = new BucketEndpoint();
bucketEndpoint.setClient(clientStub);
}
@Test
public void testHeadBucket() throws Exception {
Response response = headBucket.head(bucketName);
Response response = bucketEndpoint.head(bucketName);
assertEquals(200, response.getStatus());
}
@Test
public void testHeadFail() {
try {
headBucket.head("unknownbucket");
} catch (Exception ex) {
if (ex instanceof OS3Exception) {
assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(),
((OS3Exception) ex).getCode());
assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), (
(OS3Exception) ex).getErrorMessage());
} else {
fail("testHeadFail failed");
}
return;
}
fail("testHeadFail failed");
public void testHeadFail() throws Exception {
Response response = bucketEndpoint.head("unknownbucket");
Assert.assertEquals(400, response.getStatus());
}
}

View File

@ -17,13 +17,11 @@
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import org.apache.hadoop.ozone.s3.object.ListObjectResponse;
import org.junit.Test;
/**

View File

@ -17,13 +17,14 @@
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;
import java.io.IOException;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.junit.Assert;
import org.junit.Test;
@ -31,23 +32,26 @@
/**
* Test delete object.
*/
public class TestDeleteObject {
public class TestObjectDelete {
@Test
public void delete() throws IOException {
public void delete() throws IOException, OS3Exception {
//GIVEN
OzoneClient client = new OzoneClientStub();
client.getObjectStore().createVolume("vol1");
client.getObjectStore().getVolume("vol1").createBucket("b1");
client.getObjectStore().createS3Bucket("bilbo", "b1");
String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
OzoneBucket bucket =
client.getObjectStore().getVolume("vol1").getBucket("b1");
client.getObjectStore().getVolume(volumeName).getBucket("b1");
bucket.createKey("key1", 0).close();
DeleteObject rest = new DeleteObject();
ObjectEndpoint rest = new ObjectEndpoint();
rest.setClient(client);
//WHEN
rest.delete("vol1", "b1", "key1");
rest.delete("b1", "key1");
//THEN
Assert.assertFalse("Bucket Should not contain any key after delete",

View File

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.HttpHeaders;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.commons.io.IOUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test get object.
*/
public class TestObjectGet {
public static final String CONTENT = "0123456789";
@Test
public void get() throws IOException, OS3Exception {
//GIVEN
OzoneClientStub client = new OzoneClientStub();
client.getObjectStore().createS3Bucket("bilbo", "b1");
String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
volume.createBucket("b1");
OzoneBucket bucket =
volume.getBucket("b1");
OzoneOutputStream keyStream =
bucket.createKey("key1", CONTENT.getBytes().length);
keyStream.write(CONTENT.getBytes());
keyStream.close();
ObjectEndpoint rest = new ObjectEndpoint();
rest.setClient(client);
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
//WHEN
rest.get(headers, "b1", "key1", body);
//THEN
OzoneInputStream ozoneInputStream =
volume.getBucket("b1")
.readKey("key1");
String keyContent =
IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
Assert.assertEquals(CONTENT, keyContent);
}
}

View File

@ -17,32 +17,33 @@
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.Response;
import java.io.IOException;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.*;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
import org.apache.commons.lang3.RandomStringUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import java.io.IOException;
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
/**
* Test head object.
*/
public class TestHeadObject {
private String volName = "vol1";
public class TestObjectHead {
private String bucketName = "b1";
private OzoneClientStub clientStub;
private ObjectStore objectStoreStub;
private HeadObject headObject;
private ObjectEndpoint keyEndpoint;
private OzoneBucket bucket;
@Before
@ -52,14 +53,14 @@ public void setup() throws IOException {
objectStoreStub = clientStub.getObjectStore();
// Create volume and bucket
objectStoreStub.createVolume(volName);
OzoneVolume volumeStub = objectStoreStub.getVolume(volName);
volumeStub.createBucket(bucketName);
objectStoreStub.createS3Bucket("bilbo", bucketName);
String volName = objectStoreStub.getOzoneVolumeName(bucketName);
bucket = objectStoreStub.getVolume(volName).getBucket(bucketName);
// Create HeadBucket and setClient to OzoneClientStub
headObject = new HeadObject();
headObject.setClient(clientStub);
keyEndpoint = new ObjectEndpoint();
keyEndpoint.setClient(clientStub);
}
@Test
@ -73,7 +74,7 @@ public void testHeadObject() throws Exception {
out.close();
//WHEN
Response response = headObject.head(volName, bucketName, "key1");
Response response = keyEndpoint.head(bucketName, "key1");
//THEN
Assert.assertEquals(200, response.getStatus());
@ -85,7 +86,7 @@ public void testHeadObject() throws Exception {
public void testHeadFailByBadName() throws Exception {
//Head an object that doesn't exist.
try {
headObject.head(volName, bucketName, "badKeyName");
keyEndpoint.head(bucketName, "badKeyName");
} catch (OS3Exception ex) {
Assert.assertTrue(ex.getCode().contains("NoSuchObject"));
Assert.assertTrue(ex.getErrorMessage().contains("object does not exist"));

View File

@ -18,7 +18,7 @@
*
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
@ -26,11 +26,12 @@
import java.io.IOException;
import java.nio.charset.Charset;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.commons.io.IOUtils;
import org.junit.Assert;
@ -48,7 +49,7 @@ public class TestPutObject {
private String keyName = "key1";
private OzoneClientStub clientStub;
private ObjectStore objectStoreStub;
private PutObject putObject;
private ObjectEndpoint objectEndpoint;
@Before
public void setup() throws IOException {
@ -60,23 +61,24 @@ public void setup() throws IOException {
objectStoreStub.createS3Bucket(userName, bucketName);
// Create PutObject and setClient to OzoneClientStub
putObject = new PutObject();
putObject.setClient(clientStub);
objectEndpoint = new ObjectEndpoint();
objectEndpoint.setClient(clientStub);
}
@Test
public void testPutObject() throws IOException {
public void testPutObject() throws IOException, OS3Exception {
//GIVEN
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
//WHEN
Response response = putObject.put(headers, bucketName, keyName,
Response response = objectEndpoint.put(headers, bucketName, keyName,
ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "32 * 1024 * 1024",
CONTENT.length(), body);
//THEN
String volumeName = clientStub.getObjectStore().getOzoneVolumeName(bucketName);
String volumeName = clientStub.getObjectStore()
.getOzoneVolumeName(bucketName);
OzoneInputStream ozoneInputStream =
clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName)
.readKey(keyName);

View File

@ -18,32 +18,31 @@
*
*/
package org.apache.hadoop.ozone.s3.bucket;
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.HttpHeaders;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
import org.apache.commons.lang3.RandomStringUtils;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.Test;
import javax.ws.rs.core.Response;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import org.mockito.Mockito;
import static org.mockito.Mockito.when;
/**
* This class test HeadBucket functionality.
*/
public class TestListBucket {
public class TestRootList {
private String volumeName = "vol1";
private OzoneClientStub clientStub;
private ObjectStore objectStoreStub;
OzoneVolume volumeStub;
private ListBucket listBucket;
private OzoneVolume volumeStub;
private RootEndpoint rootEndpoint;
@Before
public void setup() throws Exception {
@ -51,22 +50,21 @@ public void setup() throws Exception {
//Create client stub and object store stub.
clientStub = new OzoneClientStub();
objectStoreStub = clientStub.getObjectStore();
// Create volume and bucket
objectStoreStub.createVolume(volumeName);
volumeStub = objectStoreStub.getVolume(volumeName);
//volumeStub.createBucket(bucketName);
objectStoreStub.createVolume("s3key");
volumeStub = objectStoreStub.getVolume("s3key");
// Create HeadBucket and setClient to OzoneClientStub
listBucket = new ListBucket();
listBucket.setClient(clientStub);
rootEndpoint = new RootEndpoint();
rootEndpoint.setClient(clientStub);
}
@Test
public void testListBucket() throws Exception {
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
when(headers.getHeaderString("Authorization")).thenReturn("AWS key:secret");
// List operation should success even there is no bucket.
ListBucketResponse response = listBucket.get(volumeName);
ListBucketResponse response = rootEndpoint.get(headers);
assertEquals(0, response.getBucketsNum());
String bucketBaseName = "bucket-";
@ -74,24 +72,8 @@ public void testListBucket() throws Exception {
volumeStub.createBucket(
bucketBaseName + RandomStringUtils.randomNumeric(3));
}
response = listBucket.get(volumeName);
response = rootEndpoint.get(headers);
assertEquals(10, response.getBucketsNum());
}
@Test
public void testListBucketFail() {
try {
listBucket.get("badVolumeName");
} catch (Exception ex) {
if (ex instanceof OS3Exception) {
assertEquals(S3ErrorTable.NO_SUCH_VOLUME.getCode(),
((OS3Exception) ex).getCode());
assertEquals(S3ErrorTable.NO_SUCH_VOLUME.getErrorMessage(), (
(OS3Exception) ex).getErrorMessage());
} else {
fail("testHeadFail failed");
}
return;
}
}
}

View File

@ -16,6 +16,6 @@
* limitations under the License.
*/
/**
* Unit tests for the object related rest endpoints.
* Unit tests for the rest endpoint implementations.
*/
package org.apache.hadoop.ozone.s3.object;
package org.apache.hadoop.ozone.s3.endpoint;