HDDS-1761. Fix class hierarchy for KeyRequest and FileRequest classes. (#1052)
This commit is contained in:
parent
141151325b
commit
585f4d5c64
@ -41,7 +41,6 @@
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
|
||||
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse;
|
||||
@ -73,8 +72,7 @@
|
||||
/**
|
||||
* Handle create directory request.
|
||||
*/
|
||||
public class OMDirectoryCreateRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMDirectoryCreateRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMDirectoryCreateRequest.class);
|
||||
@ -214,8 +212,8 @@ private OmKeyInfo createDirectoryKeyInfo(OzoneManager ozoneManager,
|
||||
OmBucketInfo omBucketInfo, String volumeName, String bucketName,
|
||||
String keyName, KeyArgs keyArgs)
|
||||
throws IOException {
|
||||
FileEncryptionInfo encryptionInfo = getFileEncryptionInfo(ozoneManager,
|
||||
omBucketInfo);
|
||||
Optional<FileEncryptionInfo> encryptionInfo =
|
||||
getFileEncryptionInfo(ozoneManager, omBucketInfo);
|
||||
String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
|
||||
|
||||
return new OmKeyInfo.Builder()
|
||||
@ -229,7 +227,7 @@ private OmKeyInfo createDirectoryKeyInfo(OzoneManager ozoneManager,
|
||||
.setDataSize(0)
|
||||
.setReplicationType(HddsProtos.ReplicationType.RATIS)
|
||||
.setReplicationFactor(HddsProtos.ReplicationFactor.ONE)
|
||||
.setFileEncryptionInfo(encryptionInfo)
|
||||
.setFileEncryptionInfo(encryptionInfo.orNull())
|
||||
.setAcls(keyArgs.getAclsList())
|
||||
.build();
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -43,7 +44,6 @@
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||
import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
|
||||
import org.apache.hadoop.ozone.om.request.key.OMKeyRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
@ -72,8 +72,7 @@
|
||||
/**
|
||||
* Handles create file request.
|
||||
*/
|
||||
public class OMFileCreateRequest extends OMKeyCreateRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMFileCreateRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMFileCreateRequest.class);
|
||||
@ -171,7 +170,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
|
||||
boolean acquiredLock = false;
|
||||
IOException exception = null;
|
||||
FileEncryptionInfo encryptionInfo = null;
|
||||
Optional<FileEncryptionInfo> encryptionInfo = Optional.absent();
|
||||
OmKeyInfo omKeyInfo = null;
|
||||
|
||||
final List<OmKeyLocationInfo> locations = new ArrayList<>();
|
||||
@ -263,7 +262,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
encryptionInfo = getFileEncryptionInfo(ozoneManager, bucketInfo);
|
||||
omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
|
||||
omMetadataManager.getOzoneKey(volumeName, bucketName,
|
||||
keyName), keyArgs.getDataSize(), locations, encryptionInfo);
|
||||
keyName), keyArgs.getDataSize(), locations,
|
||||
encryptionInfo.orNull());
|
||||
|
||||
} catch (IOException ex) {
|
||||
exception = ex;
|
||||
@ -275,7 +275,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
}
|
||||
|
||||
return prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
|
||||
encryptionInfo, exception, createFileRequest.getClientID(),
|
||||
encryptionInfo.orNull(), exception, createFileRequest.getClientID(),
|
||||
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
|
||||
OMAction.CREATE_FILE);
|
||||
}
|
||||
|
@ -39,7 +39,6 @@
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
@ -65,8 +64,7 @@
|
||||
/**
|
||||
* Handles allocate block request.
|
||||
*/
|
||||
public class OMAllocateBlockRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMAllocateBlockRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMAllocateBlockRequest.class);
|
||||
|
@ -36,7 +36,6 @@
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
|
||||
@ -61,8 +60,7 @@
|
||||
/**
|
||||
* Handles CommitKey request.
|
||||
*/
|
||||
public class OMKeyCommitRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMKeyCommitRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMKeyCommitRequest.class);
|
||||
|
@ -20,12 +20,8 @@
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -43,39 +39,24 @@
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
|
||||
import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.CreateKeyResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.CreateKeyRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.KeyArgs;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.utils.UniqueId;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
|
||||
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
|
||||
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey;
|
||||
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile;
|
||||
/**
|
||||
* Handles CreateKey request.
|
||||
*/
|
||||
|
||||
public class OMKeyCreateRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMKeyCreateRequest extends OMKeyRequest {
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMKeyCreateRequest.class);
|
||||
|
||||
@ -175,7 +156,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
|
||||
OmKeyInfo omKeyInfo = null;
|
||||
final List< OmKeyLocationInfo > locations = new ArrayList<>();
|
||||
FileEncryptionInfo encryptionInfo = null;
|
||||
Optional<FileEncryptionInfo> encryptionInfo = Optional.absent();
|
||||
IOException exception = null;
|
||||
boolean acquireLock = false;
|
||||
try {
|
||||
@ -200,7 +181,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
|
||||
omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs,
|
||||
omMetadataManager.getOzoneKey(volumeName, bucketName, keyName),
|
||||
keyArgs.getDataSize(), locations, encryptionInfo);
|
||||
keyArgs.getDataSize(), locations, encryptionInfo.orNull());
|
||||
|
||||
} catch (IOException ex) {
|
||||
LOG.error("Key open failed for volume:{} bucket:{} key:{}",
|
||||
@ -214,245 +195,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
|
||||
}
|
||||
|
||||
return prepareCreateKeyResponse(keyArgs, omKeyInfo, locations,
|
||||
encryptionInfo, exception, createKeyRequest.getClientID(),
|
||||
encryptionInfo.orNull(), exception, createKeyRequest.getClientID(),
|
||||
transactionLogIndex, volumeName, bucketName, keyName, ozoneManager,
|
||||
OMAction.ALLOCATE_KEY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the response returned to the client.
|
||||
* @param keyArgs
|
||||
* @param omKeyInfo
|
||||
* @param locations
|
||||
* @param encryptionInfo
|
||||
* @param exception
|
||||
* @param clientID
|
||||
* @param transactionLogIndex
|
||||
* @param volumeName
|
||||
* @param bucketName
|
||||
* @param keyName
|
||||
* @param ozoneManager
|
||||
* @return OMClientResponse
|
||||
*/
|
||||
@SuppressWarnings("parameternumber")
|
||||
protected OMClientResponse prepareCreateKeyResponse(@Nonnull KeyArgs keyArgs,
|
||||
OmKeyInfo omKeyInfo, @Nonnull List<OmKeyLocationInfo> locations,
|
||||
FileEncryptionInfo encryptionInfo, @Nullable IOException exception,
|
||||
long clientID, long transactionLogIndex, @Nonnull String volumeName,
|
||||
@Nonnull String bucketName, @Nonnull String keyName,
|
||||
@Nonnull OzoneManager ozoneManager, @Nonnull OMAction omAction) {
|
||||
|
||||
OMResponse.Builder omResponse = OMResponse.newBuilder().setStatus(
|
||||
OzoneManagerProtocolProtos.Status.OK);
|
||||
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
|
||||
|
||||
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
|
||||
|
||||
OMClientResponse omClientResponse = null;
|
||||
if (exception == null) {
|
||||
if (omKeyInfo == null) {
|
||||
// the key does not exist, create a new object, the new blocks are the
|
||||
// version 0
|
||||
omKeyInfo = createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
|
||||
keyArgs.getType(), keyArgs.getDataSize(),
|
||||
encryptionInfo);
|
||||
}
|
||||
|
||||
long openVersion = omKeyInfo.getLatestVersionLocations().getVersion();
|
||||
|
||||
// Append blocks
|
||||
try {
|
||||
omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream()
|
||||
.map(OmKeyLocationInfo::getFromProtobuf)
|
||||
.collect(Collectors.toList()), false);
|
||||
|
||||
} catch (IOException ex) {
|
||||
exception = ex;
|
||||
}
|
||||
|
||||
if (exception != null) {
|
||||
LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
|
||||
omAction.getAction(), keyName, bucketName, volumeName, exception);
|
||||
omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
|
||||
omAction, exception, omResponse);
|
||||
} else {
|
||||
String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName,
|
||||
bucketName, keyName, clientID);
|
||||
|
||||
// Add to cache entry can be done outside of lock for this openKey.
|
||||
// Even if bucket gets deleted, when commitKey we shall identify if
|
||||
// bucket gets deleted.
|
||||
omMetadataManager.getOpenKeyTable().addCacheEntry(
|
||||
new CacheKey<>(dbOpenKeyName),
|
||||
new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
|
||||
|
||||
LOG.debug("{} for Key: {} in volume/bucket: {}/{}",
|
||||
omAction.getAction(), keyName, volumeName, bucketName);
|
||||
|
||||
|
||||
if (omAction == OMAction.CREATE_FILE) {
|
||||
ozoneManager.getMetrics().incNumCreateFile();
|
||||
omResponse.setCreateFileResponse(
|
||||
OzoneManagerProtocolProtos.CreateFileResponse.newBuilder()
|
||||
.setKeyInfo(omKeyInfo.getProtobuf())
|
||||
.setID(clientID)
|
||||
.setOpenVersion(openVersion).build());
|
||||
omResponse.setCmdType(OzoneManagerProtocolProtos.Type.CreateFile);
|
||||
omClientResponse = new OMFileCreateResponse(omKeyInfo, clientID,
|
||||
omResponse.build());
|
||||
} else {
|
||||
ozoneManager.getMetrics().incNumKeyAllocates();
|
||||
omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
|
||||
.setKeyInfo(omKeyInfo.getProtobuf())
|
||||
.setID(clientID).setOpenVersion(openVersion)
|
||||
.build());
|
||||
omResponse.setCmdType(OzoneManagerProtocolProtos.Type.CreateKey);
|
||||
omClientResponse = new OMKeyCreateResponse(omKeyInfo, clientID,
|
||||
omResponse.build());
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
|
||||
omAction.getAction(), keyName, volumeName, bucketName, exception);
|
||||
omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
|
||||
omAction, exception, omResponse);
|
||||
}
|
||||
// audit log
|
||||
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(omAction,
|
||||
auditMap, exception, getOmRequest().getUserInfo()));
|
||||
return omClientResponse;
|
||||
}
|
||||
|
||||
private OMClientResponse createKeyErrorResponse(@Nonnull OMMetrics omMetrics,
|
||||
@Nonnull OMAction omAction, @Nonnull IOException exception,
|
||||
@Nonnull OMResponse.Builder omResponse) {
|
||||
if (omAction == OMAction.CREATE_FILE) {
|
||||
omMetrics.incNumCreateFileFails();
|
||||
omResponse.setCmdType(CreateFile);
|
||||
return new OMFileCreateResponse(null, -1L,
|
||||
createErrorOMResponse(omResponse, exception));
|
||||
} else {
|
||||
omMetrics.incNumKeyAllocateFails();
|
||||
omResponse.setCmdType(CreateKey);
|
||||
return new OMKeyCreateResponse(null, -1L,
|
||||
createErrorOMResponse(omResponse, exception));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare OmKeyInfo which will be persisted to openKeyTable.
|
||||
* @param omMetadataManager
|
||||
* @param keyArgs
|
||||
* @param dbKeyName
|
||||
* @param size
|
||||
* @param locations
|
||||
* @param encInfo
|
||||
* @return OmKeyInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
protected OmKeyInfo prepareKeyInfo(
|
||||
@Nonnull OMMetadataManager omMetadataManager,
|
||||
@Nonnull KeyArgs keyArgs, @Nonnull String dbKeyName, long size,
|
||||
@Nonnull List<OmKeyLocationInfo> locations, FileEncryptionInfo encInfo)
|
||||
throws IOException {
|
||||
OmKeyInfo keyInfo = null;
|
||||
if (keyArgs.getIsMultipartKey()) {
|
||||
keyInfo = prepareMultipartKeyInfo(omMetadataManager, keyArgs, size,
|
||||
locations, encInfo);
|
||||
//TODO args.getMetadata
|
||||
} else if (omMetadataManager.getKeyTable().isExist(dbKeyName)) {
|
||||
// TODO: Need to be fixed, as when key already exists, we are
|
||||
// appending new blocks to existing key.
|
||||
keyInfo = omMetadataManager.getKeyTable().get(dbKeyName);
|
||||
// the key already exist, the new blocks will be added as new version
|
||||
// when locations.size = 0, the new version will have identical blocks
|
||||
// as its previous version
|
||||
keyInfo.addNewVersion(locations, false);
|
||||
keyInfo.setDataSize(size + keyInfo.getDataSize());
|
||||
// The modification time is set in preExecute, use the same as
|
||||
// modification time when key already exists.
|
||||
keyInfo.setModificationTime(keyArgs.getModificationTime());
|
||||
}
|
||||
return keyInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare OmKeyInfo for multi-part upload part key which will be persisted
|
||||
* to openKeyTable.
|
||||
* @param omMetadataManager
|
||||
* @param args
|
||||
* @param size
|
||||
* @param locations
|
||||
* @param encInfo
|
||||
* @return OmKeyInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
private OmKeyInfo prepareMultipartKeyInfo(
|
||||
@Nonnull OMMetadataManager omMetadataManager,
|
||||
@Nonnull KeyArgs args, long size,
|
||||
@Nonnull List<OmKeyLocationInfo> locations,
|
||||
FileEncryptionInfo encInfo) throws IOException {
|
||||
HddsProtos.ReplicationFactor factor;
|
||||
HddsProtos.ReplicationType type;
|
||||
|
||||
Preconditions.checkArgument(args.getMultipartNumber() > 0,
|
||||
"PartNumber Should be greater than zero");
|
||||
// When key is multipart upload part key, we should take replication
|
||||
// type and replication factor from original key which has done
|
||||
// initiate multipart upload. If we have not found any such, we throw
|
||||
// error no such multipart upload.
|
||||
String uploadID = args.getMultipartUploadID();
|
||||
Preconditions.checkNotNull(uploadID);
|
||||
String multipartKey = omMetadataManager
|
||||
.getMultipartKey(args.getVolumeName(), args.getBucketName(),
|
||||
args.getKeyName(), uploadID);
|
||||
OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
|
||||
multipartKey);
|
||||
if (partKeyInfo == null) {
|
||||
throw new OMException("No such Multipart upload is with specified " +
|
||||
"uploadId " + uploadID,
|
||||
OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
|
||||
} else {
|
||||
factor = partKeyInfo.getFactor();
|
||||
type = partKeyInfo.getType();
|
||||
}
|
||||
// For this upload part we don't need to check in KeyTable. As this
|
||||
// is not an actual key, it is a part of the key.
|
||||
return createKeyInfo(args, locations, factor, type, size, encInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create OmKeyInfo object.
|
||||
* @param keyArgs
|
||||
* @param locations
|
||||
* @param factor
|
||||
* @param type
|
||||
* @param size
|
||||
* @param encInfo
|
||||
* @return OmKeyInfo
|
||||
*/
|
||||
private OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
|
||||
@Nonnull List<OmKeyLocationInfo> locations,
|
||||
@Nonnull HddsProtos.ReplicationFactor factor,
|
||||
@Nonnull HddsProtos.ReplicationType type, long size,
|
||||
FileEncryptionInfo encInfo) {
|
||||
OmKeyInfo.Builder builder = new OmKeyInfo.Builder()
|
||||
.setVolumeName(keyArgs.getVolumeName())
|
||||
.setBucketName(keyArgs.getBucketName())
|
||||
.setKeyName(keyArgs.getKeyName())
|
||||
.setOmKeyLocationInfos(Collections.singletonList(
|
||||
new OmKeyLocationInfoGroup(0, locations)))
|
||||
.setCreationTime(keyArgs.getModificationTime())
|
||||
.setModificationTime(keyArgs.getModificationTime())
|
||||
.setDataSize(size)
|
||||
.setReplicationType(type)
|
||||
.setReplicationFactor(factor)
|
||||
.setFileEncryptionInfo(encInfo);
|
||||
if(keyArgs.getAclsList() != null) {
|
||||
builder.setAcls(keyArgs.getAclsList());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,6 +22,8 @@
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.ozone.audit.AuditLogger;
|
||||
import org.apache.hadoop.ozone.audit.OMAction;
|
||||
@ -30,7 +32,6 @@
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse;
|
||||
@ -45,8 +46,6 @@
|
||||
import org.apache.hadoop.ozone.security.acl.OzoneObj;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
|
||||
.KEY_NOT_FOUND;
|
||||
@ -55,8 +54,7 @@
|
||||
/**
|
||||
* Handles DeleteKey request.
|
||||
*/
|
||||
public class OMKeyDeleteRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMKeyDeleteRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMKeyDeleteRequest.class);
|
||||
|
@ -18,8 +18,14 @@
|
||||
|
||||
package org.apache.hadoop.ozone.om.request.key;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.ozone.audit.AuditLogger;
|
||||
import org.apache.hadoop.ozone.audit.OMAction;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
@ -27,7 +33,6 @@
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
@ -44,11 +49,6 @@
|
||||
import org.apache.hadoop.utils.db.Table;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
|
||||
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
|
||||
@ -56,8 +56,7 @@
|
||||
/**
|
||||
* Handles rename key request.
|
||||
*/
|
||||
public class OMKeyRenameRequest extends OMClientRequest
|
||||
implements OMKeyRequest {
|
||||
public class OMKeyRenameRequest extends OMKeyRequest {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(OMKeyRenameRequest.class);
|
||||
|
@ -18,7 +18,23 @@
|
||||
|
||||
package org.apache.hadoop.ozone.om.request.key;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
import javax.annotation.Nullable;
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import com.google.common.base.Optional;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
|
||||
.EncryptedKeyVersion;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
@ -29,38 +45,56 @@
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
|
||||
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ozone.audit.OMAction;
|
||||
import org.apache.hadoop.ozone.om.OMMetadataManager;
|
||||
import org.apache.hadoop.ozone.om.OMMetrics;
|
||||
import org.apache.hadoop.ozone.om.OzoneManager;
|
||||
import org.apache.hadoop.ozone.om.ScmClient;
|
||||
import org.apache.hadoop.ozone.om.exceptions.OMException;
|
||||
import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
|
||||
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
|
||||
import org.apache.hadoop.ozone.om.request.OMClientRequest;
|
||||
import org.apache.hadoop.ozone.om.response.OMClientResponse;
|
||||
import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
|
||||
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.CreateFileResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.CreateKeyResponse;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.KeyArgs;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMRequest;
|
||||
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
|
||||
.OMResponse;
|
||||
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.utils.db.cache.CacheKey;
|
||||
import org.apache.hadoop.utils.db.cache.CacheValue;
|
||||
|
||||
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
|
||||
.BUCKET_NOT_FOUND;
|
||||
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
|
||||
.VOLUME_NOT_FOUND;
|
||||
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile;
|
||||
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey;
|
||||
import static org.apache.hadoop.util.Time.monotonicNow;
|
||||
|
||||
/**
|
||||
* Interface for key write requests.
|
||||
*/
|
||||
public interface OMKeyRequest {
|
||||
public abstract class OMKeyRequest extends OMClientRequest {
|
||||
|
||||
Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class);
|
||||
|
||||
public OMKeyRequest(OMRequest omRequest) {
|
||||
super(omRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* This methods avoids multiple rpc calls to SCM by allocating multiple blocks
|
||||
@ -68,7 +102,7 @@ public interface OMKeyRequest {
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("parameternumber")
|
||||
default List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient,
|
||||
protected List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient,
|
||||
OzoneBlockTokenSecretManager secretManager,
|
||||
HddsProtos.ReplicationType replicationType,
|
||||
HddsProtos.ReplicationFactor replicationFactor,
|
||||
@ -113,7 +147,7 @@ default List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient,
|
||||
/* Optimize ugi lookup for RPC operations to avoid a trip through
|
||||
* UGI.getCurrentUser which is synch'ed.
|
||||
*/
|
||||
default UserGroupInformation getRemoteUser() throws IOException {
|
||||
private UserGroupInformation getRemoteUser() throws IOException {
|
||||
UserGroupInformation ugi = Server.getRemoteUser();
|
||||
return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
|
||||
}
|
||||
@ -123,7 +157,7 @@ default UserGroupInformation getRemoteUser() throws IOException {
|
||||
* @param user
|
||||
*
|
||||
* */
|
||||
default EnumSet< HddsProtos.BlockTokenSecretProto.AccessModeProto>
|
||||
private EnumSet< HddsProtos.BlockTokenSecretProto.AccessModeProto>
|
||||
getAclForUser(String user) {
|
||||
// TODO: Return correct acl for user.
|
||||
return EnumSet.allOf(
|
||||
@ -137,7 +171,7 @@ default UserGroupInformation getRemoteUser() throws IOException {
|
||||
* @param bucketName
|
||||
* @throws IOException
|
||||
*/
|
||||
default void validateBucketAndVolume(OMMetadataManager omMetadataManager,
|
||||
public void validateBucketAndVolume(OMMetadataManager omMetadataManager,
|
||||
String volumeName, String bucketName)
|
||||
throws IOException {
|
||||
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
|
||||
@ -157,9 +191,9 @@ default void validateBucketAndVolume(OMMetadataManager omMetadataManager,
|
||||
}
|
||||
}
|
||||
|
||||
default FileEncryptionInfo getFileEncryptionInfo(
|
||||
protected Optional<FileEncryptionInfo> getFileEncryptionInfo(
|
||||
OzoneManager ozoneManager, OmBucketInfo bucketInfo) throws IOException {
|
||||
FileEncryptionInfo encInfo = null;
|
||||
Optional<FileEncryptionInfo> encInfo = Optional.absent();
|
||||
BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo();
|
||||
if (ezInfo != null) {
|
||||
if (ozoneManager.getKmsProvider() == null) {
|
||||
@ -170,15 +204,16 @@ default FileEncryptionInfo getFileEncryptionInfo(
|
||||
|
||||
final String ezKeyName = ezInfo.getKeyName();
|
||||
EncryptedKeyVersion edek = generateEDEK(ozoneManager, ezKeyName);
|
||||
encInfo = new FileEncryptionInfo(ezInfo.getSuite(), ezInfo.getVersion(),
|
||||
encInfo = Optional.of(new FileEncryptionInfo(ezInfo.getSuite(),
|
||||
ezInfo.getVersion(),
|
||||
edek.getEncryptedKeyVersion().getMaterial(),
|
||||
edek.getEncryptedKeyIv(), ezKeyName,
|
||||
edek.getEncryptionKeyVersionName());
|
||||
edek.getEncryptionKeyVersionName()));
|
||||
}
|
||||
return encInfo;
|
||||
}
|
||||
|
||||
default EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager,
|
||||
private EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager,
|
||||
String ezKeyName) throws IOException {
|
||||
if (ezKeyName == null) {
|
||||
return null;
|
||||
@ -202,4 +237,213 @@ public EncryptedKeyVersion run() throws IOException {
|
||||
return edek;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the response returned to the client.
|
||||
* @return OMClientResponse
|
||||
*/
|
||||
@SuppressWarnings("parameternumber")
|
||||
protected OMClientResponse prepareCreateKeyResponse(@Nonnull KeyArgs keyArgs,
|
||||
OmKeyInfo omKeyInfo, @Nonnull List<OmKeyLocationInfo> locations,
|
||||
FileEncryptionInfo encryptionInfo, @Nullable IOException exception,
|
||||
long clientID, long transactionLogIndex, @Nonnull String volumeName,
|
||||
@Nonnull String bucketName, @Nonnull String keyName,
|
||||
@Nonnull OzoneManager ozoneManager, @Nonnull OMAction omAction) {
|
||||
|
||||
OMResponse.Builder omResponse = OMResponse.newBuilder()
|
||||
.setStatus(OzoneManagerProtocolProtos.Status.OK);
|
||||
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
|
||||
|
||||
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
|
||||
|
||||
OMClientResponse omClientResponse = null;
|
||||
if (exception == null) {
|
||||
if (omKeyInfo == null) {
|
||||
// the key does not exist, create a new object, the new blocks are the
|
||||
// version 0
|
||||
omKeyInfo = createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
|
||||
keyArgs.getType(), keyArgs.getDataSize(),
|
||||
encryptionInfo);
|
||||
}
|
||||
|
||||
long openVersion = omKeyInfo.getLatestVersionLocations().getVersion();
|
||||
|
||||
// Append blocks
|
||||
try {
|
||||
omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream()
|
||||
.map(OmKeyLocationInfo::getFromProtobuf)
|
||||
.collect(Collectors.toList()), false);
|
||||
|
||||
} catch (IOException ex) {
|
||||
exception = ex;
|
||||
}
|
||||
|
||||
if (exception != null) {
|
||||
LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
|
||||
omAction.getAction(), keyName, bucketName, volumeName, exception);
|
||||
omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
|
||||
omAction, exception, omResponse);
|
||||
} else {
|
||||
String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName,
|
||||
bucketName, keyName, clientID);
|
||||
|
||||
// Add to cache entry can be done outside of lock for this openKey.
|
||||
// Even if bucket gets deleted, when commitKey we shall identify if
|
||||
// bucket gets deleted.
|
||||
omMetadataManager.getOpenKeyTable().addCacheEntry(
|
||||
new CacheKey<>(dbOpenKeyName),
|
||||
new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
|
||||
|
||||
LOG.debug("{} for Key: {} in volume/bucket: {}/{}",
|
||||
omAction.getAction(), keyName, volumeName, bucketName);
|
||||
|
||||
|
||||
if (omAction == OMAction.CREATE_FILE) {
|
||||
ozoneManager.getMetrics().incNumCreateFile();
|
||||
omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
|
||||
.setKeyInfo(omKeyInfo.getProtobuf())
|
||||
.setID(clientID)
|
||||
.setOpenVersion(openVersion).build());
|
||||
omResponse.setCmdType(CreateFile);
|
||||
omClientResponse = new OMFileCreateResponse(omKeyInfo, clientID,
|
||||
omResponse.build());
|
||||
} else {
|
||||
ozoneManager.getMetrics().incNumKeyAllocates();
|
||||
omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder()
|
||||
.setKeyInfo(omKeyInfo.getProtobuf())
|
||||
.setID(clientID).setOpenVersion(openVersion)
|
||||
.build());
|
||||
omResponse.setCmdType(CreateKey);
|
||||
omClientResponse = new OMKeyCreateResponse(omKeyInfo, clientID,
|
||||
omResponse.build());
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
LOG.error("{} failed for Key: {} in volume/bucket:{}/{}",
|
||||
omAction.getAction(), keyName, volumeName, bucketName, exception);
|
||||
omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(),
|
||||
omAction, exception, omResponse);
|
||||
}
|
||||
// audit log
|
||||
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(omAction,
|
||||
auditMap, exception, getOmRequest().getUserInfo()));
|
||||
return omClientResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create OmKeyInfo object.
|
||||
* @return OmKeyInfo
|
||||
*/
|
||||
protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
|
||||
@Nonnull List<OmKeyLocationInfo> locations,
|
||||
@Nonnull HddsProtos.ReplicationFactor factor,
|
||||
@Nonnull HddsProtos.ReplicationType type, long size,
|
||||
@Nullable FileEncryptionInfo encInfo) {
|
||||
OmKeyInfo.Builder builder = new OmKeyInfo.Builder()
|
||||
.setVolumeName(keyArgs.getVolumeName())
|
||||
.setBucketName(keyArgs.getBucketName())
|
||||
.setKeyName(keyArgs.getKeyName())
|
||||
.setOmKeyLocationInfos(Collections.singletonList(
|
||||
new OmKeyLocationInfoGroup(0, locations)))
|
||||
.setCreationTime(keyArgs.getModificationTime())
|
||||
.setModificationTime(keyArgs.getModificationTime())
|
||||
.setDataSize(size)
|
||||
.setReplicationType(type)
|
||||
.setReplicationFactor(factor)
|
||||
.setFileEncryptionInfo(encInfo);
|
||||
if(keyArgs.getAclsList() != null) {
|
||||
builder.setAcls(keyArgs.getAclsList());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare OmKeyInfo which will be persisted to openKeyTable.
|
||||
* @return OmKeyInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
protected OmKeyInfo prepareKeyInfo(
|
||||
@Nonnull OMMetadataManager omMetadataManager,
|
||||
@Nonnull KeyArgs keyArgs, @Nonnull String dbKeyName, long size,
|
||||
@Nonnull List<OmKeyLocationInfo> locations,
|
||||
@Nullable FileEncryptionInfo encInfo)
|
||||
throws IOException {
|
||||
OmKeyInfo keyInfo = null;
|
||||
if (keyArgs.getIsMultipartKey()) {
|
||||
keyInfo = prepareMultipartKeyInfo(omMetadataManager, keyArgs, size,
|
||||
locations, encInfo);
|
||||
//TODO args.getMetadata
|
||||
} else if (omMetadataManager.getKeyTable().isExist(dbKeyName)) {
|
||||
// TODO: Need to be fixed, as when key already exists, we are
|
||||
// appending new blocks to existing key.
|
||||
keyInfo = omMetadataManager.getKeyTable().get(dbKeyName);
|
||||
// the key already exist, the new blocks will be added as new version
|
||||
// when locations.size = 0, the new version will have identical blocks
|
||||
// as its previous version
|
||||
keyInfo.addNewVersion(locations, false);
|
||||
keyInfo.setDataSize(size + keyInfo.getDataSize());
|
||||
// The modification time is set in preExecute, use the same as
|
||||
// modification time when key already exists.
|
||||
keyInfo.setModificationTime(keyArgs.getModificationTime());
|
||||
}
|
||||
return keyInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare OmKeyInfo for multi-part upload part key which will be persisted
|
||||
* to openKeyTable.
|
||||
* @return OmKeyInfo
|
||||
* @throws IOException
|
||||
*/
|
||||
private OmKeyInfo prepareMultipartKeyInfo(
|
||||
@Nonnull OMMetadataManager omMetadataManager,
|
||||
@Nonnull KeyArgs args, long size,
|
||||
@Nonnull List<OmKeyLocationInfo> locations,
|
||||
FileEncryptionInfo encInfo) throws IOException {
|
||||
HddsProtos.ReplicationFactor factor;
|
||||
HddsProtos.ReplicationType type;
|
||||
|
||||
Preconditions.checkArgument(args.getMultipartNumber() > 0,
|
||||
"PartNumber Should be greater than zero");
|
||||
// When key is multipart upload part key, we should take replication
|
||||
// type and replication factor from original key which has done
|
||||
// initiate multipart upload. If we have not found any such, we throw
|
||||
// error no such multipart upload.
|
||||
String uploadID = args.getMultipartUploadID();
|
||||
Preconditions.checkNotNull(uploadID);
|
||||
String multipartKey = omMetadataManager
|
||||
.getMultipartKey(args.getVolumeName(), args.getBucketName(),
|
||||
args.getKeyName(), uploadID);
|
||||
OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
|
||||
multipartKey);
|
||||
if (partKeyInfo == null) {
|
||||
throw new OMException("No such Multipart upload is with specified " +
|
||||
"uploadId " + uploadID,
|
||||
OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
|
||||
} else {
|
||||
factor = partKeyInfo.getFactor();
|
||||
type = partKeyInfo.getType();
|
||||
}
|
||||
// For this upload part we don't need to check in KeyTable. As this
|
||||
// is not an actual key, it is a part of the key.
|
||||
return createKeyInfo(args, locations, factor, type, size, encInfo);
|
||||
}
|
||||
|
||||
|
||||
private OMClientResponse createKeyErrorResponse(@Nonnull OMMetrics omMetrics,
|
||||
@Nonnull OMAction omAction, @Nonnull IOException exception,
|
||||
@Nonnull OMResponse.Builder omResponse) {
|
||||
if (omAction == OMAction.CREATE_FILE) {
|
||||
omMetrics.incNumCreateFileFails();
|
||||
omResponse.setCmdType(CreateFile);
|
||||
return new OMFileCreateResponse(null, -1L,
|
||||
createErrorOMResponse(omResponse, exception));
|
||||
} else {
|
||||
omMetrics.incNumKeyAllocateFails();
|
||||
omResponse.setCmdType(CreateKey);
|
||||
return new OMKeyCreateResponse(null, -1L,
|
||||
createErrorOMResponse(omResponse, exception));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user