diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index cc908fc425..1d80f97a9b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -26,9 +26,11 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .VolumeList; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; @@ -251,7 +253,7 @@ List listVolumes(String userName, String prefix, * * @return Deleted Table. */ - Table getDeletedTable(); + Table getDeletedTable(); /** * Gets the OpenKeyTable. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java new file mode 100644 index 0000000000..a0ef4a5753 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.codec; + +import com.google.common.base.Preconditions; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RepeatedKeyInfo; + +import java.io.IOException; + +/** + * Codec to encode RepeatedOmKeyInfo as byte array. + */ +public class RepeatedOmKeyInfoCodec implements Codec { + @Override + public byte[] toPersistedFormat(RepeatedOmKeyInfo object) + throws IOException { + Preconditions.checkNotNull(object, + "Null object can't be converted to byte array."); + return object.getProto().toByteArray(); + } + + @Override + public RepeatedOmKeyInfo fromPersistedFormat(byte[] rawData) + throws IOException { + Preconditions.checkNotNull(rawData, + "Null byte array can't converted to real object."); + try { + return RepeatedOmKeyInfo.getFromProto(RepeatedKeyInfo.parseFrom(rawData)); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException( + "Can't encode the the raw data from the byte array", e); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java new file mode 100644 index 0000000000..c28c2c8abc --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RepeatedKeyInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .KeyInfo; + +/** + * Args for deleted keys. This is written to om metadata deletedTable. + * Once a key is deleted, it is moved to om metadata deletedTable. Having a + * {label: List} ensures that if users create & delete keys with + * exact same uri multiple times, all the delete instances are bundled under + * the same key name. This is useful as part of GDPR compliance where an + * admin wants to confirm if a given key is deleted from deletedTable metadata. + */ +public class RepeatedOmKeyInfo { + private List omKeyInfoList; + + public RepeatedOmKeyInfo(List omKeyInfos) { + this.omKeyInfoList = omKeyInfos; + } + + public RepeatedOmKeyInfo(OmKeyInfo omKeyInfos) { + this.omKeyInfoList = new ArrayList<>(); + this.omKeyInfoList.add(omKeyInfos); + } + + public void addOmKeyInfo(OmKeyInfo info) { + this.omKeyInfoList.add(info); + } + + public List getOmKeyInfoList() { + return omKeyInfoList; + } + + public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo + repeatedKeyInfo) { + List list = new ArrayList<>(); + for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { + list.add(OmKeyInfo.getFromProtobuf(k)); + } + return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); + } + + public RepeatedKeyInfo getProto() { + List list = new ArrayList<>(); + for(OmKeyInfo k : omKeyInfoList) { + list.add(k.getProtobuf()); + } + + RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder() + .addAllKeyInfo(list); + return builder.build(); + } + + /** + * Builder of RepeatedOmKeyInfo. + */ + public static class Builder { + private List omKeyInfos; + + public Builder(){} + + public Builder setOmKeyInfos(List infoList) { + this.omKeyInfos = infoList; + return this; + } + + public RepeatedOmKeyInfo build() { + return new RepeatedOmKeyInfo(omKeyInfos); + } + } +} diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto index 0fd02cee2a..61e9f0f7f4 100644 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto @@ -687,6 +687,10 @@ message KeyInfo { repeated OzoneAclInfo acls = 13; } +message RepeatedKeyInfo { + repeated KeyInfo keyInfo = 1; +} + message OzoneFileStatusProto { required hadoop.fs.FileStatusProto status = 1; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 5b6ac422e4..526274c239 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -85,6 +85,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -781,9 +782,17 @@ public void deleteKey(OmKeyArgs args) throws IOException { return; } } - metadataManager.getStore().move(objectKey, - metadataManager.getKeyTable(), - metadataManager.getDeletedTable()); + //Check if key with same keyName exists in deletedTable and then + // insert/update accordingly. + RepeatedOmKeyInfo repeatedOmKeyInfo = + metadataManager.getDeletedTable().get(objectKey); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(keyInfo); + } + metadataManager.getKeyTable().delete(objectKey); + metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo); } catch (OMException ex) { throw ex; } catch (IOException ex) { @@ -1003,7 +1012,14 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( // will not be garbage collected, so move this part to delete table // and throw error // Move this part to delete table. - metadataManager.getDeletedTable().put(partName, keyInfo); + RepeatedOmKeyInfo repeatedOmKeyInfo = + metadataManager.getDeletedTable().get(partName); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(keyInfo); + } + metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); throw new OMException("No such Multipart upload is with specified " + "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); } else { @@ -1031,9 +1047,19 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( // Add the new entry in to the list of part keys. DBStore store = metadataManager.getStore(); try (BatchOperation batch = store.initBatchOperation()) { + RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager. + getDeletedTable().get(oldPartKeyInfo.getPartName()); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo( + OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo())); + } else { + repeatedOmKeyInfo.addOmKeyInfo( + OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo())); + } + metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); metadataManager.getDeletedTable().putWithBatch(batch, oldPartKeyInfo.getPartName(), - OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo())); + repeatedOmKeyInfo); metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey); metadataManager.getMultipartInfoTable().putWithBatch(batch, multipartKey, multipartKeyInfo); @@ -1252,8 +1278,17 @@ public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf( partKeyInfo.getPartKeyInfo()); + + RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager. + getDeletedTable().get(partKeyInfo.getPartName()); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo); + } + metadataManager.getDeletedTable().putWithBatch(batch, - partKeyInfo.getPartName(), currentKeyPartInfo); + partKeyInfo.getPartName(), repeatedOmKeyInfo); } // Finally delete the entry from the multipart info table and open // key table diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 618b6aa660..59158e23a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec; import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec; +import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec; import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.ozone.om.codec.VolumeListCodec; @@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList; @@ -87,31 +89,31 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * OM DB stores metadata as KV pairs in different column families. *

* OM DB Schema: - * |-------------------------------------------------------------------| - * | Column Family | VALUE | - * |-------------------------------------------------------------------| - * | userTable | user->VolumeList | - * |-------------------------------------------------------------------| - * | volumeTable | /volume->VolumeInfo | - * |-------------------------------------------------------------------| - * | bucketTable | /volume/bucket-> BucketInfo | - * |-------------------------------------------------------------------| - * | keyTable | /volumeName/bucketName/keyName->KeyInfo | - * |-------------------------------------------------------------------| - * | deletedTable | /volumeName/bucketName/keyName->KeyInfo | - * |-------------------------------------------------------------------| - * | openKey | /volumeName/bucketName/keyName/id->KeyInfo | - * |-------------------------------------------------------------------| - * | s3Table | s3BucketName -> /volumeName/bucketName | - * |-------------------------------------------------------------------| - * | s3SecretTable | s3g_access_key_id -> s3Secret | - * |-------------------------------------------------------------------| - * | dTokenTable | s3g_access_key_id -> s3Secret | - * |-------------------------------------------------------------------| - * | prefixInfoTable | prefix -> PrefixInfo | - * |-------------------------------------------------------------------| - * | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->...| - * |-------------------------------------------------------------------| + * |----------------------------------------------------------------------| + * | Column Family | VALUE | + * |----------------------------------------------------------------------| + * | userTable | user->VolumeList | + * |----------------------------------------------------------------------| + * | volumeTable | /volume->VolumeInfo | + * |----------------------------------------------------------------------| + * | bucketTable | /volume/bucket-> BucketInfo | + * |----------------------------------------------------------------------| + * | keyTable | /volumeName/bucketName/keyName->KeyInfo | + * |----------------------------------------------------------------------| + * | deletedTable | /volumeName/bucketName/keyName->RepeatedKeyInfo | + * |----------------------------------------------------------------------| + * | openKey | /volumeName/bucketName/keyName/id->KeyInfo | + * |----------------------------------------------------------------------| + * | s3Table | s3BucketName -> /volumeName/bucketName | + * |----------------------------------------------------------------------| + * | s3SecretTable | s3g_access_key_id -> s3Secret | + * |----------------------------------------------------------------------| + * | dTokenTable | s3g_access_key_id -> s3Secret | + * |----------------------------------------------------------------------| + * | prefixInfoTable | prefix -> PrefixInfo | + * |----------------------------------------------------------------------| + * | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->... | + * |----------------------------------------------------------------------| */ public static final String USER_TABLE = "userTable"; @@ -192,7 +194,7 @@ public Table getKeyTable() { } @Override - public Table getDeletedTable() { + public Table getDeletedTable() { return deletedTable; } @@ -261,6 +263,7 @@ protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addTable(PREFIX_TABLE) .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec()) .addCodec(OmKeyInfo.class, new OmKeyInfoCodec()) + .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec()) .addCodec(OmBucketInfo.class, new OmBucketInfoCodec()) .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec()) .addCodec(VolumeList.class, new VolumeListCodec()) @@ -296,8 +299,8 @@ protected void initializeOmTables() throws IOException { keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class); checkTableStatus(keyTable, KEY_TABLE); - deletedTable = - this.store.getTable(DELETED_TABLE, String.class, OmKeyInfo.class); + deletedTable = this.store.getTable(DELETED_TABLE, String.class, + RepeatedOmKeyInfo.class); checkTableStatus(deletedTable, DELETED_TABLE); openKeyTable = @@ -765,25 +768,26 @@ private VolumeList getVolumesByUser(String userNameKey) public List getPendingDeletionKeys(final int keyCount) throws IOException { List keyBlocksList = Lists.newArrayList(); - try (TableIterator> keyIter = - getDeletedTable() - .iterator()) { + try (TableIterator> + keyIter = getDeletedTable().iterator()) { int currentCount = 0; while (keyIter.hasNext() && currentCount < keyCount) { - KeyValue kv = keyIter.next(); + KeyValue kv = keyIter.next(); if (kv != null) { - OmKeyInfo info = kv.getValue(); + RepeatedOmKeyInfo infoList = kv.getValue(); // Get block keys as a list. - OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); - List item = latest.getLocationList().stream() - .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) - .collect(Collectors.toList()); - BlockGroup keyBlocks = BlockGroup.newBuilder() - .setKeyName(kv.getKey()) - .addAllBlockIDs(item) - .build(); - keyBlocksList.add(keyBlocks); - currentCount++; + for(OmKeyInfo info : infoList.getOmKeyInfoList()){ + OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); + List item = latest.getLocationList().stream() + .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) + .collect(Collectors.toList()); + BlockGroup keyBlocks = BlockGroup.newBuilder() + .setKeyName(kv.getKey()) + .addAllBlockIDs(item) + .build(); + keyBlocksList.add(keyBlocks); + currentCount++; + } } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 33e5fef001..eb366adfcf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -139,13 +139,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, // TODO: Revisit if we need it later. omClientResponse = new OMKeyDeleteResponse(omKeyInfo, - deleteKeyArgs.getModificationTime(), omResponse.setDeleteKeyResponse( DeleteKeyResponse.newBuilder()).build()); } catch (IOException ex) { exception = ex; - omClientResponse = new OMKeyDeleteResponse(null, 0, + omClientResponse = new OMKeyDeleteResponse(null, createErrorOMResponse(omResponse, exception)); } finally { if (omClientResponse != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 940ba7d142..f176879a09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -133,14 +133,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } omClientResponse = new S3MultipartUploadAbortResponse(multipartKey, - keyArgs.getModificationTime(), multipartKeyInfo, + multipartKeyInfo, omResponse.setAbortMultiPartUploadResponse( MultipartUploadAbortResponse.newBuilder()).build()); } catch (IOException ex) { exception = ex; omClientResponse = new S3MultipartUploadAbortResponse(multipartKey, - keyArgs.getModificationTime(), multipartKeyInfo, - createErrorOMResponse(omResponse, exception)); + multipartKeyInfo, createErrorOMResponse(omResponse, exception)); } finally { if (omClientResponse != null) { omClientResponse.setFlushFuture( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f9e1338a0f..0992fe0980 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -188,13 +188,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder().setPartName(partName)); omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey, - openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo, + openKey, omKeyInfo, multipartKeyInfo, oldPartKeyInfo, omResponse.build()); } catch (IOException ex) { exception = ex; omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey, - openKey, keyArgs.getModificationTime(), omKeyInfo, multipartKeyInfo, + openKey, omKeyInfo, multipartKeyInfo, oldPartKeyInfo, createErrorOMResponse(omResponse, exception)); } finally { if (omClientResponse != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index a3dfb28441..9a0c936797 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -35,13 +35,10 @@ */ public class OMKeyDeleteResponse extends OMClientResponse { private OmKeyInfo omKeyInfo; - private long deleteTimestamp; - public OMKeyDeleteResponse(OmKeyInfo omKeyInfo, long deletionTime, - OMResponse omResponse) { + public OMKeyDeleteResponse(OmKeyInfo omKeyInfo, OMResponse omResponse) { super(omResponse); this.omKeyInfo = omKeyInfo; - this.deleteTimestamp = deletionTime; } @Override @@ -60,12 +57,22 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, if (!isKeyEmpty(omKeyInfo)) { // If a deleted key is put in the table where a key with the same // name already exists, then the old deleted key information would be - // lost. To differentiate between keys with same name in - // deletedTable, we add the timestamp to the key name. - String deleteKeyName = OmUtils.getDeletedKeyName( - ozoneKey, deleteTimestamp); + // lost. To avoid this, first check if a key with same name exists. + // deletedTable in OM Metadata stores . + // The RepeatedOmKeyInfo is the structure that allows us to store a + // list of OmKeyInfo that can be tied to same key name. For a keyName + // if RepeatedOMKeyInfo structure is null, we create a new instance, + // if it is not null, then we simply add to the list and store this + // instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozoneKey); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); + } omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - deleteKeyName, omKeyInfo); + ozoneKey, repeatedOmKeyInfo); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 26df30082b..cd454c5ffd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -40,16 +40,12 @@ public class S3MultipartUploadAbortResponse extends OMClientResponse { private String multipartKey; - private long timeStamp; private OmMultipartKeyInfo omMultipartKeyInfo; public S3MultipartUploadAbortResponse(String multipartKey, - long timeStamp, - OmMultipartKeyInfo omMultipartKeyInfo, - OMResponse omResponse) { + OmMultipartKeyInfo omMultipartKeyInfo, OMResponse omResponse) { super(omResponse); this.multipartKey = multipartKey; - this.timeStamp = timeStamp; this.omMultipartKeyInfo = omMultipartKeyInfo; } @@ -73,9 +69,18 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo()); + + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName()); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo); + } + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - OmUtils.getDeletedKeyName(partKeyInfo.getPartName(), timeStamp), - currentKeyPartInfo); + partKeyInfo.getPartName(), + repeatedOmKeyInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index e010c199c5..08c443e593 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -41,21 +41,19 @@ public class S3MultipartUploadCommitPartResponse extends OMClientResponse { private String multipartKey; private String openKey; - private long deleteTimeStamp; private OmKeyInfo deletePartKeyInfo; private OmMultipartKeyInfo omMultipartKeyInfo; private OzoneManagerProtocolProtos.PartKeyInfo oldMultipartKeyInfo; public S3MultipartUploadCommitPartResponse(String multipartKey, - String openKey, long deleteTimeStamp, - OmKeyInfo deletePartKeyInfo, OmMultipartKeyInfo omMultipartKeyInfo, + String openKey, OmKeyInfo deletePartKeyInfo, + OmMultipartKeyInfo omMultipartKeyInfo, OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, OMResponse omResponse) { super(omResponse); this.multipartKey = multipartKey; this.openKey = openKey; - this.deleteTimeStamp = deleteTimeStamp; this.deletePartKeyInfo = deletePartKeyInfo; this.omMultipartKeyInfo = omMultipartKeyInfo; this.oldMultipartKeyInfo = oldPartKeyInfo; @@ -69,9 +67,16 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) { // Means by the time we try to commit part, some one has aborted this // multipart upload. So, delete this part information. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(openKey); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(deletePartKeyInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(deletePartKeyInfo); + } omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - OmUtils.getDeletedKeyName(openKey, deleteTimeStamp), - deletePartKeyInfo); + openKey, + repeatedOmKeyInfo); } if (getOMResponse().getStatus() == OK) { @@ -85,10 +90,19 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // This means for this multipart upload part upload, we have an old // part information, so delete it. if (oldMultipartKeyInfo != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager. + getDeletedTable().get(oldMultipartKeyInfo.getPartName()); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo( + OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo())); + } else { + repeatedOmKeyInfo.addOmKeyInfo( + OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo())); + } + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - OmUtils.getDeletedKeyName(oldMultipartKeyInfo.getPartName(), - deleteTimeStamp), - OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo())); + oldMultipartKeyInfo.getPartName(), + repeatedOmKeyInfo); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index cd2b665473..72e049bb42 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -28,13 +28,13 @@ import com.google.common.base.Optional; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -370,10 +370,16 @@ public static String deleteKey(String ozoneKey, // Delete key from KeyTable and put in DeletedKeyTable omMetadataManager.getKeyTable().delete(ozoneKey); - String deletedKeyName = OmUtils.getDeletedKeyName(ozoneKey, Time.now()); - omMetadataManager.getDeletedTable().put(deletedKeyName, omKeyInfo); + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(ozoneKey); + if(repeatedOmKeyInfo == null) { + repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo); + } else { + repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo); + } + omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo); - return deletedKeyName; + return ozoneKey; } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index da96e0c1cb..ba2b738a3c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -21,9 +21,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -52,15 +50,11 @@ public void testAddToDBBatch() throws Exception { .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); - long deletionTime = Time.now(); - OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse); + new OMKeyDeleteResponse(omKeyInfo, omResponse); String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - String deletedOzoneKeyName = OmUtils.getDeletedKeyName( - ozoneKey, deletionTime); TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); @@ -76,7 +70,7 @@ public void testAddToDBBatch() throws Exception { // As default key entry does not have any blocks, it should not be in // deletedKeyTable. Assert.assertFalse(omMetadataManager.getDeletedTable().isExist( - deletedOzoneKeyName)); + ozoneKey)); } @Test @@ -117,13 +111,9 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { .setStatus(OzoneManagerProtocolProtos.Status.OK) .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) .build(); - long deletionTime = Time.now(); OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, deletionTime, omResponse); - - String deletedOzoneKeyName = OmUtils.getDeletedKeyName( - ozoneKey, deletionTime); + new OMKeyDeleteResponse(omKeyInfo, omResponse); Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); @@ -135,7 +125,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { // Key has blocks, it should not be in deletedKeyTable. Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedOzoneKeyName)); + ozoneKey)); } @@ -152,7 +142,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { .build(); OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, Time.now(), omResponse); + new OMKeyDeleteResponse(omKeyInfo, omResponse); String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 634ffaf542..09b028bef4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -113,8 +113,7 @@ public S3MultipartUploadAbortResponse createS3AbortMPUResponse( .setAbortMultiPartUploadResponse( MultipartUploadAbortResponse.newBuilder().build()).build(); - return new S3MultipartUploadAbortResponse(multipartKey, timeStamp, - omMultipartKeyInfo, + return new S3MultipartUploadAbortResponse(multipartKey, omMultipartKeyInfo, omResponse); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java index b6707ed76d..60aacd5a33 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java @@ -20,10 +20,10 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -124,24 +124,25 @@ public void testAddDBToBatchWithParts() throws Exception { Assert.assertTrue(omMetadataManager.countRowsInTable( omMetadataManager.getDeletedTable()) == 2); - String part1DeletedKeyName = OmUtils.getDeletedKeyName( - omMultipartKeyInfo.getPartKeyInfo(1).getPartName(), - timeStamp); + String part1DeletedKeyName = + omMultipartKeyInfo.getPartKeyInfo(1).getPartName(); - String part2DeletedKeyName = OmUtils.getDeletedKeyName( - omMultipartKeyInfo.getPartKeyInfo(2).getPartName(), - timeStamp); + String part2DeletedKeyName = + omMultipartKeyInfo.getPartKeyInfo(2).getPartName(); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part1DeletedKeyName)); Assert.assertNotNull(omMetadataManager.getDeletedTable().get( part2DeletedKeyName)); + RepeatedOmKeyInfo ro = + omMetadataManager.getDeletedTable().get(part1DeletedKeyName); Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()), - omMetadataManager.getDeletedTable().get(part1DeletedKeyName)); + ro.getOmKeyInfoList().get(0)); + ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName); Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()), - omMetadataManager.getDeletedTable().get(part2DeletedKeyName)); + ro.getOmKeyInfoList().get(0)); } }