HDDS-1942. Support copy during S3 multipart upload part creation

Signed-off-by: Anu Engineer <aengineer@apache.org>
This commit is contained in:
Márton Elek 2019-08-11 14:45:02 +02:00 committed by Anu Engineer
parent addfb7ff7d
commit 2fcd0da7dc
7 changed files with 483 additions and 20 deletions

View File

@ -200,3 +200,55 @@ Test Multipart Upload with the simplified aws s3 cp API
Execute AWSS3Cli cp s3://${BUCKET}/mpyawscli /tmp/part1.result
Execute AWSS3Cli rm s3://${BUCKET}/mpyawscli
Compare files /tmp/part1 /tmp/part1.result
Test Multipart Upload Put With Copy
Run Keyword Create Random file 5
${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key copytest/source --body /tmp/part1
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key copytest/destination
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} UploadId
${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source
Should contain ${result} ${BUCKET}
Should contain ${result} ETag
Should contain ${result} LastModified
${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0
Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]'
Execute AWSS3APICli get-object --bucket ${BUCKET} --key copytest/destination /tmp/part-result
Compare files /tmp/part1 /tmp/part-result
Test Multipart Upload Put With Copy and range
Run Keyword Create Random file 10
${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key copyrange/source --body /tmp/part1
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key copyrange/destination
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} UploadId
${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758
Should contain ${result} ${BUCKET}
Should contain ${result} ETag
Should contain ${result} LastModified
${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0
${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760
Should contain ${result} ${BUCKET}
Should contain ${result} ETag
Should contain ${result} LastModified
${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0
Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]'
Execute AWSS3APICli get-object --bucket ${BUCKET} --key copyrange/destination /tmp/part-result
Compare files /tmp/part1 /tmp/part-result

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import java.time.Instant;
import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter;
/**
* Copy object Response.
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlRootElement(name = "CopyPartResult",
namespace = "http://s3.amazonaws.com/doc/2006-03-01/")
public class CopyPartResult {
@XmlJavaTypeAdapter(IsoDateAdapter.class)
@XmlElement(name = "LastModified")
private Instant lastModified;
@XmlElement(name = "ETag")
private String eTag;
public CopyPartResult() {
}
public CopyPartResult(String eTag) {
this.eTag = eTag;
this.lastModified = Instant.now();
}
public Instant getLastModified() {
return lastModified;
}
public void setLastModified(Instant lastModified) {
this.lastModified = lastModified;
}
public String getETag() {
return eTag;
}
public void setETag(String tag) {
this.eTag = tag;
}
}

View File

@ -76,11 +76,13 @@
import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.tuple.Pair;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD;
import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
@ -537,12 +539,45 @@ private Response createMultipartKey(String bucket, String key, long length,
OzoneBucket ozoneBucket = getBucket(bucket);
OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(
key, length, partNumber, uploadID);
IOUtils.copy(body, ozoneOutputStream);
String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
if (copyHeader != null) {
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
try (OzoneInputStream sourceObject =
getBucket(sourceBucket).readKey(sourceKey)) {
String range =
headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
if (range != null) {
RangeHeader rangeHeader =
RangeHeaderParserUtil.parseRangeHeader(range, 0);
IOUtils.copyLarge(sourceObject, ozoneOutputStream,
rangeHeader.getStartOffset(),
rangeHeader.getEndOffset() - rangeHeader.getStartOffset());
} else {
IOUtils.copy(sourceObject, ozoneOutputStream);
}
}
} else {
IOUtils.copy(body, ozoneOutputStream);
}
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
ozoneOutputStream.getCommitUploadPartInfo();
return Response.status(Status.OK).header("ETag",
omMultipartCommitUploadPartInfo.getPartName()).build();
String eTag = omMultipartCommitUploadPartInfo.getPartName();
if (copyHeader != null) {
return Response.ok(new CopyPartResult(eTag)).build();
} else {
return Response.ok().header("ETag",
eTag).build();
}
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
@ -628,20 +663,10 @@ private CopyObjectResponse copyObject(String copyHeader,
boolean storageTypeDefault)
throws OS3Exception, IOException {
if (copyHeader.startsWith("/")) {
copyHeader = copyHeader.substring(1);
}
int pos = copyHeader.indexOf("/");
if (pos == -1) {
OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
.INVALID_ARGUMENT, copyHeader);
ex.setErrorMessage("Copy Source must mention the source bucket and " +
"key: sourcebucket/sourcekey");
throw ex;
}
String sourceBucket = copyHeader.substring(0, pos);
String sourceKey = copyHeader.substring(pos + 1);
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
OzoneInputStream sourceInputStream = null;
OzoneOutputStream destOutputStream = null;
boolean closed = false;
@ -720,4 +745,26 @@ private CopyObjectResponse copyObject(String copyHeader,
}
}
}
/**
* Parse the key and bucket name from copy header.
*/
@VisibleForTesting
public static Pair<String, String> parseSourceHeader(String copyHeader)
throws OS3Exception {
String header = copyHeader;
if (header.startsWith("/")) {
header = copyHeader.substring(1);
}
int pos = header.indexOf("/");
if (pos == -1) {
OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
.INVALID_ARGUMENT, header);
ex.setErrorMessage("Copy Source must mention the source bucket and " +
"key: sourcebucket/sourcekey");
throw ex;
}
return Pair.of(header.substring(0, pos), header.substring(pos + 1));
}
}

View File

@ -34,6 +34,8 @@ private S3Consts() {
}
public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
public static final String COPY_SOURCE_HEADER_RANGE =
"x-amz-copy-source-range";
public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class";
public static final String ENCODING_TYPE = "url";

View File

@ -210,16 +210,23 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key,
}
int count = 1;
ByteArrayOutputStream output = new ByteArrayOutputStream();
for (Map.Entry<Integer, String> part: partsMap.entrySet()) {
Part recordedPart = partsList.get(part.getKey());
if (part.getKey() != count) {
throw new OMException(ResultCodes.MISSING_UPLOAD_PARTS);
} else if (!part.getValue().equals(
partsList.get(part.getKey()).getPartName())) {
throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
} else {
count++;
if (!part.getValue().equals(recordedPart.getPartName())) {
throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST);
} else {
count++;
output.write(recordedPart.getContent());
}
}
}
keyContents.put(key, output.toByteArray());
}
return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key,

View File

@ -0,0 +1,233 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.endpoint;
import javax.ws.rs.core.HttpHeaders;
import javax.ws.rs.core.Response;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClientStub;
import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Mockito.when;
/**
* Class to test Multipart upload where parts are created with copy header.
*/
public class TestMultipartUploadWithCopy {
private final static ObjectEndpoint REST = new ObjectEndpoint();
private final static String BUCKET = "s3bucket";
private final static String KEY = "key2";
private final static String EXISTING_KEY = "key1";
private static final String EXISTING_KEY_CONTENT = "testkey";
private final static OzoneClientStub CLIENT = new OzoneClientStub();
private static final int RANGE_FROM = 2;
private static final int RANGE_TO = 4;
@BeforeClass
public static void setUp() throws Exception {
ObjectStore objectStore = CLIENT.getObjectStore();
objectStore.createS3Bucket("ozone", BUCKET);
OzoneBucket bucket = getOzoneBucket(objectStore, BUCKET);
byte[] keyContent = EXISTING_KEY_CONTENT.getBytes();
try (OutputStream stream = bucket
.createKey(EXISTING_KEY, keyContent.length, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>())) {
stream.write(keyContent);
}
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
"STANDARD");
REST.setHeaders(headers);
REST.setClient(CLIENT);
}
@Test
public void testMultipart() throws Exception {
// Initiate multipart upload
String uploadID = initiateMultipartUpload(KEY);
List<Part> partsList = new ArrayList<>();
// Upload parts
String content = "Multipart Upload 1";
int partNumber = 1;
Part part1 = uploadPart(KEY, uploadID, partNumber, content);
partsList.add(part1);
partNumber = 2;
Part part2 =
uploadPartWithCopy(KEY, uploadID, partNumber,
BUCKET + "/" + EXISTING_KEY, null);
partsList.add(part2);
partNumber = 3;
Part part3 =
uploadPartWithCopy(KEY, uploadID, partNumber,
BUCKET + "/" + EXISTING_KEY,
"bytes=" + RANGE_FROM + "-" + RANGE_TO);
partsList.add(part3);
// complete multipart upload
CompleteMultipartUploadRequest completeMultipartUploadRequest = new
CompleteMultipartUploadRequest();
completeMultipartUploadRequest.setPartList(partsList);
completeMultipartUpload(KEY, completeMultipartUploadRequest,
uploadID);
OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(), BUCKET);
try (InputStream is = bucket.readKey(KEY)) {
String keyContent = new Scanner(is).useDelimiter("\\A").next();
Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT
.substring(RANGE_FROM, RANGE_TO), keyContent);
}
}
private String initiateMultipartUpload(String key) throws IOException,
OS3Exception {
setHeaders();
Response response = REST.initializeMultipartUpload(BUCKET, key);
MultipartUploadInitiateResponse multipartUploadInitiateResponse =
(MultipartUploadInitiateResponse) response.getEntity();
assertNotNull(multipartUploadInitiateResponse.getUploadID());
String uploadID = multipartUploadInitiateResponse.getUploadID();
assertEquals(response.getStatus(), 200);
return uploadID;
}
private Part uploadPart(String key, String uploadID, int partNumber, String
content) throws IOException, OS3Exception {
setHeaders();
ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes());
Response response = REST.put(BUCKET, key, content.length(), partNumber,
uploadID, body);
assertEquals(response.getStatus(), 200);
assertNotNull(response.getHeaderString("ETag"));
Part part = new Part();
part.seteTag(response.getHeaderString("ETag"));
part.setPartNumber(partNumber);
return part;
}
private Part uploadPartWithCopy(String key, String uploadID, int partNumber,
String keyOrigin, String range) throws IOException, OS3Exception {
Map<String, String> additionalHeaders = new HashMap<>();
additionalHeaders.put(COPY_SOURCE_HEADER, keyOrigin);
if (range != null) {
additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, range);
}
setHeaders(additionalHeaders);
ByteArrayInputStream body = new ByteArrayInputStream("".getBytes());
Response response = REST.put(BUCKET, key, 0, partNumber,
uploadID, body);
assertEquals(response.getStatus(), 200);
CopyPartResult result = (CopyPartResult) response.getEntity();
assertNotNull(result.getETag());
assertNotNull(result.getLastModified());
Part part = new Part();
part.seteTag(result.getETag());
part.setPartNumber(partNumber);
return part;
}
private void completeMultipartUpload(String key,
CompleteMultipartUploadRequest completeMultipartUploadRequest,
String uploadID) throws IOException, OS3Exception {
setHeaders();
Response response = REST.completeMultipartUpload(BUCKET, key, uploadID,
completeMultipartUploadRequest);
assertEquals(response.getStatus(), 200);
CompleteMultipartUploadResponse completeMultipartUploadResponse =
(CompleteMultipartUploadResponse) response.getEntity();
assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET);
assertEquals(completeMultipartUploadResponse.getKey(), KEY);
assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET);
assertNotNull(completeMultipartUploadResponse.getETag());
}
private void setHeaders(Map<String, String> additionalHeaders) {
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
"STANDARD");
additionalHeaders
.forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v));
REST.setHeaders(headers);
}
private void setHeaders() {
setHeaders(new HashMap<>());
}
private static OzoneBucket getOzoneBucket(ObjectStore objectStore,
String bucketName)
throws IOException {
String ozoneBucketName = objectStore.getOzoneBucketName(bucketName);
String ozoneVolumeName = objectStore.getOzoneVolumeName(bucketName);
return objectStore.getVolume(ozoneVolumeName).getBucket(ozoneBucketName);
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.ozone.s3.endpoint;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.Assert;
import org.junit.Test;
/**
* Test static utility methods of the ObjectEndpoint.
*/
public class TestObjectEndpoint {
@Test
public void parseSourceHeader() throws OS3Exception {
Pair<String, String> bucketKey =
ObjectEndpoint.parseSourceHeader("bucket1/key1");
Assert.assertEquals("bucket1", bucketKey.getLeft());
Assert.assertEquals("key1", bucketKey.getRight());
}
@Test
public void parseSourceHeaderWithPrefix() throws OS3Exception {
Pair<String, String> bucketKey =
ObjectEndpoint.parseSourceHeader("/bucket1/key1");
Assert.assertEquals("bucket1", bucketKey.getLeft());
Assert.assertEquals("key1", bucketKey.getRight());
}
}