HDDS-2174. Delete GDPR Encryption Key from metadata when a Key is deleted

Signed-off-by: Anu Engineer <aengineer@apache.org>
This commit is contained in:
dchitlangia 2019-09-24 23:39:34 -04:00 committed by Anu Engineer
parent 2adcc3c932
commit c55ac6a1c7
7 changed files with 158 additions and 69 deletions

View File

@ -46,6 +46,9 @@
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
@ -498,13 +501,36 @@ public static File createOMDir(String dirPath) {
}
/**
* Returns the DB key name of a deleted key in OM metadata store. The
* deleted key name is the <keyName>_<deletionTimestamp>.
* @param key Original key name
* @param timestamp timestamp of deletion
* @return Deleted key name
* Prepares key info to be moved to deletedTable.
* 1. It strips GDPR metadata from key info
* 2. For given object key, if the repeatedOmKeyInfo instance is null, it
* implies that no entry for the object key exists in deletedTable so we
* create a new instance to include this key, else we update the existing
* repeatedOmKeyInfo instance.
* @param keyInfo args supplied by client
* @param repeatedOmKeyInfo key details from deletedTable
* @return {@link RepeatedOmKeyInfo}
* @throws IOException if I/O Errors when checking for key
*/
public static String getDeletedKeyName(String key, long timestamp) {
return key + "_" + timestamp;
public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{
// If this key is in a GDPR enforced bucket, then before moving
// KeyInfo to deletedTable, remove the GDPR related metadata from
// KeyInfo.
if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
}
if(repeatedOmKeyInfo == null) {
//The key doesn't exist in deletedTable, so create a new instance.
repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
} else {
//The key exists in deletedTable, so update existing instance.
repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
}
return repeatedOmKeyInfo;
}
}

View File

@ -86,6 +86,7 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@ -2667,7 +2668,7 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName,
* @throws Exception
*/
@Test
public void testGDPR() throws Exception {
public void testKeyReadWriteForGDPR() throws Exception {
//Step 1
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@ -2733,4 +2734,77 @@ public void testGDPR() throws Exception {
Assert.assertNotEquals(text, new String(fileContent));
}
/**
* Tests deletedKey for GDPR.
* 1. Create GDPR Enabled bucket.
* 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
* 3. Read key and validate the content/metadata is as expected because the
* readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
* Metadata.
* 4. Delete this key in GDPR enabled bucket
* 5. Confirm the deleted key metadata in deletedTable does not contain the
* GDPR encryption details (flag, secret, algorithm).
* @throws Exception
*/
@Test
public void testDeletedKeyForGDPR() throws Exception {
//Step 1
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
BucketArgs args = BucketArgs.newBuilder()
.addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
volume.createBucket(bucketName, args);
OzoneBucket bucket = volume.getBucket(bucketName);
Assert.assertEquals(bucketName, bucket.getName());
Assert.assertNotNull(bucket.getMetadata());
Assert.assertEquals("true",
bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
//Step 2
String text = "hello world";
Map<String, String> keyMetadata = new HashMap<>();
keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
OzoneOutputStream out = bucket.createKey(keyName,
text.getBytes().length, STAND_ALONE, ONE, keyMetadata);
out.write(text.getBytes());
out.close();
//Step 3
OzoneKeyDetails key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
Assert.assertEquals("AES",
key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[text.getBytes().length];
is.read(fileContent);
Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
keyName, STAND_ALONE,
ONE));
Assert.assertEquals(text, new String(fileContent));
//Step 4
bucket.deleteKey(keyName);
//Step 5
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
RepeatedOmKeyInfo deletedKeys =
omMetadataManager.getDeletedTable().get(objectKey);
Map<String, String> deletedKeyMetadata =
deletedKeys.getOmKeyInfoList().get(0).getMetadata();
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
Assert.assertFalse(
deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
}
}

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.BlockGroup;
@ -782,15 +783,10 @@ public void deleteKey(OmKeyArgs args) throws IOException {
return;
}
}
//Check if key with same keyName exists in deletedTable and then
// insert/update accordingly.
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable().get(objectKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
repeatedOmKeyInfo);
metadataManager.getKeyTable().delete(objectKey);
metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo);
} catch (OMException ex) {
@ -1014,11 +1010,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
// Move this part to delete table.
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable().get(partName);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
keyInfo, repeatedOmKeyInfo);
metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
throw new OMException("No such Multipart upload is with specified " +
"uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
@ -1047,15 +1040,16 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
// Add the new entry in to the list of part keys.
DBStore store = metadataManager.getStore();
try (BatchOperation batch = store.initBatchOperation()) {
RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
getDeletedTable().get(oldPartKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
} else {
repeatedOmKeyInfo.addOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
}
OmKeyInfo partKey = OmKeyInfo.getFromProtobuf(
oldPartKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable()
.get(oldPartKeyInfo.getPartName());
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
partKey, repeatedOmKeyInfo);
metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
metadataManager.getDeletedTable().putWithBatch(batch,
oldPartKeyInfo.getPartName(),
@ -1279,13 +1273,12 @@ public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(
partKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
getDeletedTable().get(partKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
}
RepeatedOmKeyInfo repeatedOmKeyInfo =
metadataManager.getDeletedTable()
.get(partKeyInfo.getPartName());
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
currentKeyPartInfo, repeatedOmKeyInfo);
metadataManager.getDeletedTable().putWithBatch(batch,
partKeyInfo.getPartName(), repeatedOmKeyInfo);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om.response.key;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@ -69,11 +70,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// instance in deletedTable.
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(ozoneKey);
if (repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
omKeyInfo, repeatedOmKeyInfo);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
ozoneKey, repeatedOmKeyInfo);
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om.response.s3.multipart;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@ -75,11 +76,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
currentKeyPartInfo, repeatedOmKeyInfo);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
partKeyInfo.getPartName(),

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om.response.s3.multipart;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@ -66,17 +67,16 @@ public S3MultipartUploadCommitPartResponse(String multipartKey,
public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
// Means by the time we try to commit part, some one has aborted this
// multipart upload. So, delete this part information.
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(openKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(deletePartKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(deletePartKeyInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
deletePartKeyInfo, repeatedOmKeyInfo);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
openKey,
repeatedOmKeyInfo);
@ -86,6 +86,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// If we have old part info:
// Need to do 3 steps:
// 0. Strip GDPR related metadata from multipart info
// 1. add old part to delete table
// 2. Commit multipart info which has information about this new part.
// 3. delete this new part entry from open key table.
@ -93,22 +94,21 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// This means for this multipart upload part upload, we have an old
// part information, so delete it.
if (oldMultipartKeyInfo != null) {
RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.
getDeletedTable().get(oldMultipartKeyInfo.getPartName());
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
} else {
repeatedOmKeyInfo.addOmKeyInfo(
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
}
OmKeyInfo partKey =
OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable()
.get(oldMultipartKeyInfo.getPartName());
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKey,
repeatedOmKeyInfo);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
oldMultipartKeyInfo.getPartName(),
repeatedOmKeyInfo);
}
omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
multipartKey, omMultipartKeyInfo);
@ -116,8 +116,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
// safely delete part key info from open key table.
omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
openKey);
}
}

View File

@ -28,6 +28,7 @@
import com.google.common.base.Optional;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@ -374,13 +375,13 @@ public static String deleteKey(String ozoneKey,
// Delete key from KeyTable and put in DeletedKeyTable
omMetadataManager.getKeyTable().delete(ozoneKey);
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(ozoneKey);
if(repeatedOmKeyInfo == null) {
repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
} else {
repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
}
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo,
repeatedOmKeyInfo);
omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
return ozoneKey;