HADOOP-17912. ABFS: Support for Encryption Context (#6221)
Contributed by Pranav Saxena and others.
This commit is contained in:
parent
9edcf42c78
commit
0b43026cab
1
hadoop-tools/hadoop-azure/.gitignore
vendored
1
hadoop-tools/hadoop-azure/.gitignore
vendored
@ -1,5 +1,6 @@
|
|||||||
.checkstyle
|
.checkstyle
|
||||||
bin/
|
bin/
|
||||||
|
src/test/resources/combinationConfigFiles
|
||||||
src/test/resources/abfs-combination-test-configs.xml
|
src/test/resources/abfs-combination-test-configs.xml
|
||||||
dev-support/testlogs
|
dev-support/testlogs
|
||||||
src/test/resources/accountSettings/*
|
src/test/resources/accountSettings/*
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
import org.apache.hadoop.fs.azurebfs.diagnostics.StringConfigurationBasicValidator;
|
import org.apache.hadoop.fs.azurebfs.diagnostics.StringConfigurationBasicValidator;
|
||||||
import org.apache.hadoop.fs.azurebfs.enums.Trilean;
|
import org.apache.hadoop.fs.azurebfs.enums.Trilean;
|
||||||
import org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee;
|
import org.apache.hadoop.fs.azurebfs.extensions.CustomTokenProviderAdaptee;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider;
|
import org.apache.hadoop.fs.azurebfs.extensions.SASTokenProvider;
|
||||||
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
||||||
import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider;
|
import org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider;
|
||||||
@ -337,6 +338,10 @@ public class AbfsConfiguration{
|
|||||||
FS_AZURE_ABFS_RENAME_RESILIENCE, DefaultValue = DEFAULT_ENABLE_ABFS_RENAME_RESILIENCE)
|
FS_AZURE_ABFS_RENAME_RESILIENCE, DefaultValue = DEFAULT_ENABLE_ABFS_RENAME_RESILIENCE)
|
||||||
private boolean renameResilience;
|
private boolean renameResilience;
|
||||||
|
|
||||||
|
private String clientProvidedEncryptionKey;
|
||||||
|
|
||||||
|
private String clientProvidedEncryptionKeySHA;
|
||||||
|
|
||||||
public AbfsConfiguration(final Configuration rawConfig, String accountName)
|
public AbfsConfiguration(final Configuration rawConfig, String accountName)
|
||||||
throws IllegalAccessException, InvalidConfigurationValueException, IOException {
|
throws IllegalAccessException, InvalidConfigurationValueException, IOException {
|
||||||
this.rawConfig = ProviderUtils.excludeIncompatibleCredentialProviders(
|
this.rawConfig = ProviderUtils.excludeIncompatibleCredentialProviders(
|
||||||
@ -957,6 +962,32 @@ public SASTokenProvider getSASTokenProvider() throws AzureBlobFileSystemExceptio
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public EncryptionContextProvider createEncryptionContextProvider() {
|
||||||
|
try {
|
||||||
|
String configKey = FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE;
|
||||||
|
if (get(configKey) == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Class<? extends EncryptionContextProvider> encryptionContextClass =
|
||||||
|
getAccountSpecificClass(configKey, null,
|
||||||
|
EncryptionContextProvider.class);
|
||||||
|
Preconditions.checkArgument(encryptionContextClass != null, String.format(
|
||||||
|
"The configuration value for %s is invalid, or config key is not account-specific",
|
||||||
|
configKey));
|
||||||
|
|
||||||
|
EncryptionContextProvider encryptionContextProvider =
|
||||||
|
ReflectionUtils.newInstance(encryptionContextClass, rawConfig);
|
||||||
|
Preconditions.checkArgument(encryptionContextProvider != null,
|
||||||
|
String.format("Failed to initialize %s", encryptionContextClass));
|
||||||
|
|
||||||
|
LOG.trace("{} init complete", encryptionContextClass.getName());
|
||||||
|
return encryptionContextProvider;
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Unable to load encryption context provider class: ", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public boolean isReadAheadEnabled() {
|
public boolean isReadAheadEnabled() {
|
||||||
return this.enabledReadAhead;
|
return this.enabledReadAhead;
|
||||||
}
|
}
|
||||||
@ -1068,9 +1099,22 @@ public boolean enableAbfsListIterator() {
|
|||||||
return this.enableAbfsListIterator;
|
return this.enableAbfsListIterator;
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getClientProvidedEncryptionKey() {
|
public String getEncodedClientProvidedEncryptionKey() {
|
||||||
String accSpecEncKey = accountConf(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY);
|
if (clientProvidedEncryptionKey == null) {
|
||||||
return rawConfig.get(accSpecEncKey, null);
|
String accSpecEncKey = accountConf(
|
||||||
|
FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY);
|
||||||
|
clientProvidedEncryptionKey = rawConfig.get(accSpecEncKey, null);
|
||||||
|
}
|
||||||
|
return clientProvidedEncryptionKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getEncodedClientProvidedEncryptionKeySHA() {
|
||||||
|
if (clientProvidedEncryptionKeySHA == null) {
|
||||||
|
String accSpecEncKey = accountConf(
|
||||||
|
FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY_SHA);
|
||||||
|
clientProvidedEncryptionKeySHA = rawConfig.get(accSpecEncKey, null);
|
||||||
|
}
|
||||||
|
return clientProvidedEncryptionKeySHA;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -55,7 +55,15 @@
|
|||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextProviderEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.NoContextEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.EncryptionType;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.NamespaceUtil;
|
||||||
import org.apache.hadoop.fs.impl.BackReference;
|
import org.apache.hadoop.fs.impl.BackReference;
|
||||||
|
import org.apache.hadoop.fs.PathIOException;
|
||||||
|
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
||||||
@ -149,6 +157,7 @@
|
|||||||
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_ABFS_ENDPOINT;
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_ABFS_ENDPOINT;
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE;
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_BUFFERED_PREAD_DISABLE;
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_IDENTITY_TRANSFORM_CLASS;
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_IDENTITY_TRANSFORM_CLASS;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_CONTEXT;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage.
|
* Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage.
|
||||||
@ -362,25 +371,7 @@ public boolean getIsNamespaceEnabled(TracingContext tracingContext)
|
|||||||
+ " getAcl server call", e);
|
+ " getAcl server call", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug("Get root ACL status");
|
isNamespaceEnabled = Trilean.getTrilean(NamespaceUtil.isNamespaceEnabled(client, tracingContext));
|
||||||
try (AbfsPerfInfo perfInfo = startTracking("getIsNamespaceEnabled",
|
|
||||||
"getAclStatus")) {
|
|
||||||
AbfsRestOperation op = client
|
|
||||||
.getAclStatus(AbfsHttpConstants.ROOT_PATH, tracingContext);
|
|
||||||
perfInfo.registerResult(op.getResult());
|
|
||||||
isNamespaceEnabled = Trilean.getTrilean(true);
|
|
||||||
perfInfo.registerSuccess(true);
|
|
||||||
} catch (AbfsRestOperationException ex) {
|
|
||||||
// Get ACL status is a HEAD request, its response doesn't contain
|
|
||||||
// errorCode
|
|
||||||
// So can only rely on its status code to determine its account type.
|
|
||||||
if (HttpURLConnection.HTTP_BAD_REQUEST != ex.getStatusCode()) {
|
|
||||||
throw ex;
|
|
||||||
}
|
|
||||||
|
|
||||||
isNamespaceEnabled = Trilean.getTrilean(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
return isNamespaceEnabled.toBoolean();
|
return isNamespaceEnabled.toBoolean();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,16 +460,22 @@ public void setFilesystemProperties(
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Hashtable<String, String> getPathStatus(final Path path,
|
public Hashtable<String, String> getPathStatus(final Path path,
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
TracingContext tracingContext) throws IOException {
|
||||||
try (AbfsPerfInfo perfInfo = startTracking("getPathStatus", "getPathStatus")){
|
try (AbfsPerfInfo perfInfo = startTracking("getPathStatus", "getPathStatus")){
|
||||||
LOG.debug("getPathStatus for filesystem: {} path: {}",
|
LOG.debug("getPathStatus for filesystem: {} path: {}",
|
||||||
client.getFileSystem(),
|
client.getFileSystem(),
|
||||||
path);
|
path);
|
||||||
|
|
||||||
final Hashtable<String, String> parsedXmsProperties;
|
final Hashtable<String, String> parsedXmsProperties;
|
||||||
|
final String relativePath = getRelativePath(path);
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter
|
||||||
|
= createEncryptionAdapterFromServerStoreContext(relativePath,
|
||||||
|
tracingContext);
|
||||||
final AbfsRestOperation op = client
|
final AbfsRestOperation op = client
|
||||||
.getPathStatus(getRelativePath(path), true, tracingContext);
|
.getPathStatus(relativePath, true, tracingContext,
|
||||||
|
contextEncryptionAdapter);
|
||||||
perfInfo.registerResult(op.getResult());
|
perfInfo.registerResult(op.getResult());
|
||||||
|
contextEncryptionAdapter.destroy();
|
||||||
|
|
||||||
final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES);
|
final String xMsProperties = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_PROPERTIES);
|
||||||
|
|
||||||
@ -490,9 +487,52 @@ public Hashtable<String, String> getPathStatus(final Path path,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an object of {@link ContextEncryptionAdapter}
|
||||||
|
* from a file path. It calls {@link org.apache.hadoop.fs.azurebfs.services.AbfsClient
|
||||||
|
* #getPathStatus(String, boolean, TracingContext, EncryptionAdapter)} method to get
|
||||||
|
* contextValue (x-ms-encryption-context) from the server. The contextValue is passed
|
||||||
|
* to the constructor of EncryptionAdapter to create the required object of
|
||||||
|
* EncryptionAdapter.
|
||||||
|
* @param path Path of the file for which the object of EncryptionAdapter is required.
|
||||||
|
* @return <ul>
|
||||||
|
* <li>
|
||||||
|
* {@link NoContextEncryptionAdapter}: if encryptionType is not of type
|
||||||
|
* {@link org.apache.hadoop.fs.azurebfs.utils.EncryptionType#ENCRYPTION_CONTEXT}.
|
||||||
|
* </li>
|
||||||
|
* <li>
|
||||||
|
* new object of {@link ContextProviderEncryptionAdapter} containing required encryptionKeys for the give file:
|
||||||
|
* if encryptionType is of type {@link org.apache.hadoop.fs.azurebfs.utils.EncryptionType#ENCRYPTION_CONTEXT}.
|
||||||
|
* </li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
private ContextEncryptionAdapter createEncryptionAdapterFromServerStoreContext(final String path,
|
||||||
|
final TracingContext tracingContext) throws IOException {
|
||||||
|
if (client.getEncryptionType() != EncryptionType.ENCRYPTION_CONTEXT) {
|
||||||
|
return NoContextEncryptionAdapter.getInstance();
|
||||||
|
}
|
||||||
|
final String responseHeaderEncryptionContext = client.getPathStatus(path,
|
||||||
|
false, tracingContext, null).getResult()
|
||||||
|
.getResponseHeader(X_MS_ENCRYPTION_CONTEXT);
|
||||||
|
if (responseHeaderEncryptionContext == null) {
|
||||||
|
throw new PathIOException(path,
|
||||||
|
"EncryptionContext not present in GetPathStatus response");
|
||||||
|
}
|
||||||
|
byte[] encryptionContext = responseHeaderEncryptionContext.getBytes(
|
||||||
|
StandardCharsets.UTF_8);
|
||||||
|
|
||||||
|
try {
|
||||||
|
return new ContextProviderEncryptionAdapter(client.getEncryptionContextProvider(),
|
||||||
|
new Path(path).toUri().getPath(), encryptionContext);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.debug("Could not initialize EncryptionAdapter");
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void setPathProperties(final Path path,
|
public void setPathProperties(final Path path,
|
||||||
final Hashtable<String, String> properties, TracingContext tracingContext)
|
final Hashtable<String, String> properties, TracingContext tracingContext)
|
||||||
throws AzureBlobFileSystemException {
|
throws IOException {
|
||||||
try (AbfsPerfInfo perfInfo = startTracking("setPathProperties", "setPathProperties")){
|
try (AbfsPerfInfo perfInfo = startTracking("setPathProperties", "setPathProperties")){
|
||||||
LOG.debug("setPathProperties for filesystem: {} path: {} with properties: {}",
|
LOG.debug("setPathProperties for filesystem: {} path: {} with properties: {}",
|
||||||
client.getFileSystem(),
|
client.getFileSystem(),
|
||||||
@ -505,9 +545,14 @@ public void setPathProperties(final Path path,
|
|||||||
} catch (CharacterCodingException ex) {
|
} catch (CharacterCodingException ex) {
|
||||||
throw new InvalidAbfsRestOperationException(ex);
|
throw new InvalidAbfsRestOperationException(ex);
|
||||||
}
|
}
|
||||||
|
final String relativePath = getRelativePath(path);
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter
|
||||||
|
= createEncryptionAdapterFromServerStoreContext(relativePath,
|
||||||
|
tracingContext);
|
||||||
final AbfsRestOperation op = client
|
final AbfsRestOperation op = client
|
||||||
.setPathProperties(getRelativePath(path), commaSeparatedProperties,
|
.setPathProperties(getRelativePath(path), commaSeparatedProperties,
|
||||||
tracingContext);
|
tracingContext, contextEncryptionAdapter);
|
||||||
|
contextEncryptionAdapter.destroy();
|
||||||
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -563,23 +608,30 @@ public OutputStream createFile(final Path path,
|
|||||||
triggerConditionalCreateOverwrite = true;
|
triggerConditionalCreateOverwrite = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter;
|
||||||
|
if (client.getEncryptionType() == EncryptionType.ENCRYPTION_CONTEXT) {
|
||||||
|
contextEncryptionAdapter = new ContextProviderEncryptionAdapter(
|
||||||
|
client.getEncryptionContextProvider(), getRelativePath(path));
|
||||||
|
} else {
|
||||||
|
contextEncryptionAdapter = NoContextEncryptionAdapter.getInstance();
|
||||||
|
}
|
||||||
AbfsRestOperation op;
|
AbfsRestOperation op;
|
||||||
if (triggerConditionalCreateOverwrite) {
|
if (triggerConditionalCreateOverwrite) {
|
||||||
op = conditionalCreateOverwriteFile(relativePath,
|
op = conditionalCreateOverwriteFile(relativePath,
|
||||||
statistics,
|
statistics,
|
||||||
isNamespaceEnabled ? getOctalNotation(permission) : null,
|
new Permissions(isNamespaceEnabled, permission, umask),
|
||||||
isNamespaceEnabled ? getOctalNotation(umask) : null,
|
|
||||||
isAppendBlob,
|
isAppendBlob,
|
||||||
|
contextEncryptionAdapter,
|
||||||
tracingContext
|
tracingContext
|
||||||
);
|
);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
op = client.createPath(relativePath, true,
|
op = client.createPath(relativePath, true,
|
||||||
overwrite,
|
overwrite,
|
||||||
isNamespaceEnabled ? getOctalNotation(permission) : null,
|
new Permissions(isNamespaceEnabled, permission, umask),
|
||||||
isNamespaceEnabled ? getOctalNotation(umask) : null,
|
|
||||||
isAppendBlob,
|
isAppendBlob,
|
||||||
null,
|
null,
|
||||||
|
contextEncryptionAdapter,
|
||||||
tracingContext);
|
tracingContext);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -595,6 +647,7 @@ public OutputStream createFile(final Path path,
|
|||||||
statistics,
|
statistics,
|
||||||
relativePath,
|
relativePath,
|
||||||
0,
|
0,
|
||||||
|
contextEncryptionAdapter,
|
||||||
tracingContext));
|
tracingContext));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -604,32 +657,31 @@ public OutputStream createFile(final Path path,
|
|||||||
* only if there is match for eTag of existing file.
|
* only if there is match for eTag of existing file.
|
||||||
* @param relativePath
|
* @param relativePath
|
||||||
* @param statistics
|
* @param statistics
|
||||||
* @param permission
|
* @param permissions contains permission and umask
|
||||||
* @param umask
|
|
||||||
* @param isAppendBlob
|
* @param isAppendBlob
|
||||||
* @return
|
* @return
|
||||||
* @throws AzureBlobFileSystemException
|
* @throws AzureBlobFileSystemException
|
||||||
*/
|
*/
|
||||||
private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePath,
|
private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePath,
|
||||||
final FileSystem.Statistics statistics,
|
final FileSystem.Statistics statistics,
|
||||||
final String permission,
|
final Permissions permissions,
|
||||||
final String umask,
|
|
||||||
final boolean isAppendBlob,
|
final boolean isAppendBlob,
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
final ContextEncryptionAdapter contextEncryptionAdapter,
|
||||||
|
final TracingContext tracingContext) throws IOException {
|
||||||
AbfsRestOperation op;
|
AbfsRestOperation op;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Trigger a create with overwrite=false first so that eTag fetch can be
|
// Trigger a create with overwrite=false first so that eTag fetch can be
|
||||||
// avoided for cases when no pre-existing file is present (major portion
|
// avoided for cases when no pre-existing file is present (major portion
|
||||||
// of create file traffic falls into the case of no pre-existing file).
|
// of create file traffic falls into the case of no pre-existing file).
|
||||||
op = client.createPath(relativePath, true, false, permission, umask,
|
op = client.createPath(relativePath, true, false, permissions,
|
||||||
isAppendBlob, null, tracingContext);
|
isAppendBlob, null, contextEncryptionAdapter, tracingContext);
|
||||||
|
|
||||||
} catch (AbfsRestOperationException e) {
|
} catch (AbfsRestOperationException e) {
|
||||||
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
|
if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT) {
|
||||||
// File pre-exists, fetch eTag
|
// File pre-exists, fetch eTag
|
||||||
try {
|
try {
|
||||||
op = client.getPathStatus(relativePath, false, tracingContext);
|
op = client.getPathStatus(relativePath, false, tracingContext, null);
|
||||||
} catch (AbfsRestOperationException ex) {
|
} catch (AbfsRestOperationException ex) {
|
||||||
if (ex.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) {
|
if (ex.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) {
|
||||||
// Is a parallel access case, as file which was found to be
|
// Is a parallel access case, as file which was found to be
|
||||||
@ -647,8 +699,8 @@ private AbfsRestOperation conditionalCreateOverwriteFile(final String relativePa
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
// overwrite only if eTag matches with the file properties fetched befpre
|
// overwrite only if eTag matches with the file properties fetched befpre
|
||||||
op = client.createPath(relativePath, true, true, permission, umask,
|
op = client.createPath(relativePath, true, true, permissions,
|
||||||
isAppendBlob, eTag, tracingContext);
|
isAppendBlob, eTag, contextEncryptionAdapter, tracingContext);
|
||||||
} catch (AbfsRestOperationException ex) {
|
} catch (AbfsRestOperationException ex) {
|
||||||
if (ex.getStatusCode() == HttpURLConnection.HTTP_PRECON_FAILED) {
|
if (ex.getStatusCode() == HttpURLConnection.HTTP_PRECON_FAILED) {
|
||||||
// Is a parallel access case, as file with eTag was just queried
|
// Is a parallel access case, as file with eTag was just queried
|
||||||
@ -691,6 +743,7 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(
|
|||||||
FileSystem.Statistics statistics,
|
FileSystem.Statistics statistics,
|
||||||
String path,
|
String path,
|
||||||
long position,
|
long position,
|
||||||
|
ContextEncryptionAdapter contextEncryptionAdapter,
|
||||||
TracingContext tracingContext) {
|
TracingContext tracingContext) {
|
||||||
int bufferSize = abfsConfiguration.getWriteBufferSize();
|
int bufferSize = abfsConfiguration.getWriteBufferSize();
|
||||||
if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) {
|
if (isAppendBlob && bufferSize > FileSystemConfigurations.APPENDBLOB_MAX_WRITE_BUFFER_SIZE) {
|
||||||
@ -707,6 +760,7 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(
|
|||||||
.withWriteMaxConcurrentRequestCount(abfsConfiguration.getWriteMaxConcurrentRequestCount())
|
.withWriteMaxConcurrentRequestCount(abfsConfiguration.getWriteMaxConcurrentRequestCount())
|
||||||
.withMaxWriteRequestsToQueue(abfsConfiguration.getMaxWriteRequestsToQueue())
|
.withMaxWriteRequestsToQueue(abfsConfiguration.getMaxWriteRequestsToQueue())
|
||||||
.withLease(lease)
|
.withLease(lease)
|
||||||
|
.withEncryptionAdapter(contextEncryptionAdapter)
|
||||||
.withBlockFactory(getBlockFactory())
|
.withBlockFactory(getBlockFactory())
|
||||||
.withBlockOutputActiveBlocks(blockOutputActiveBlocks)
|
.withBlockOutputActiveBlocks(blockOutputActiveBlocks)
|
||||||
.withClient(client)
|
.withClient(client)
|
||||||
@ -722,7 +776,7 @@ private AbfsOutputStreamContext populateAbfsOutputStreamContext(
|
|||||||
|
|
||||||
public void createDirectory(final Path path, final FsPermission permission,
|
public void createDirectory(final Path path, final FsPermission permission,
|
||||||
final FsPermission umask, TracingContext tracingContext)
|
final FsPermission umask, TracingContext tracingContext)
|
||||||
throws AzureBlobFileSystemException {
|
throws IOException {
|
||||||
try (AbfsPerfInfo perfInfo = startTracking("createDirectory", "createPath")) {
|
try (AbfsPerfInfo perfInfo = startTracking("createDirectory", "createPath")) {
|
||||||
boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext);
|
boolean isNamespaceEnabled = getIsNamespaceEnabled(tracingContext);
|
||||||
LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}",
|
LOG.debug("createDirectory filesystem: {} path: {} permission: {} umask: {} isNamespaceEnabled: {}",
|
||||||
@ -734,11 +788,10 @@ public void createDirectory(final Path path, final FsPermission permission,
|
|||||||
|
|
||||||
boolean overwrite =
|
boolean overwrite =
|
||||||
!isNamespaceEnabled || abfsConfiguration.isEnabledMkdirOverwrite();
|
!isNamespaceEnabled || abfsConfiguration.isEnabledMkdirOverwrite();
|
||||||
|
Permissions permissions = new Permissions(isNamespaceEnabled,
|
||||||
|
permission, umask);
|
||||||
final AbfsRestOperation op = client.createPath(getRelativePath(path),
|
final AbfsRestOperation op = client.createPath(getRelativePath(path),
|
||||||
false, overwrite,
|
false, overwrite, permissions, false, null, null, tracingContext);
|
||||||
isNamespaceEnabled ? getOctalNotation(permission) : null,
|
|
||||||
isNamespaceEnabled ? getOctalNotation(umask) : null, false, null,
|
|
||||||
tracingContext);
|
|
||||||
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -764,7 +817,19 @@ public AbfsInputStream openFileForRead(Path path,
|
|||||||
String relativePath = getRelativePath(path);
|
String relativePath = getRelativePath(path);
|
||||||
String resourceType, eTag;
|
String resourceType, eTag;
|
||||||
long contentLength;
|
long contentLength;
|
||||||
if (fileStatus instanceof VersionedFileStatus) {
|
ContextEncryptionAdapter contextEncryptionAdapter = NoContextEncryptionAdapter.getInstance();
|
||||||
|
/*
|
||||||
|
* GetPathStatus API has to be called in case of:
|
||||||
|
* 1. fileStatus is null or not an object of VersionedFileStatus: as eTag
|
||||||
|
* would not be there in the fileStatus object.
|
||||||
|
* 2. fileStatus is an object of VersionedFileStatus and the object doesn't
|
||||||
|
* have encryptionContext field when client's encryptionType is
|
||||||
|
* ENCRYPTION_CONTEXT.
|
||||||
|
*/
|
||||||
|
if ((fileStatus instanceof VersionedFileStatus) && (
|
||||||
|
client.getEncryptionType() != EncryptionType.ENCRYPTION_CONTEXT
|
||||||
|
|| ((VersionedFileStatus) fileStatus).getEncryptionContext()
|
||||||
|
!= null)) {
|
||||||
path = path.makeQualified(this.uri, path);
|
path = path.makeQualified(this.uri, path);
|
||||||
Preconditions.checkArgument(fileStatus.getPath().equals(path),
|
Preconditions.checkArgument(fileStatus.getPath().equals(path),
|
||||||
String.format(
|
String.format(
|
||||||
@ -773,19 +838,37 @@ public AbfsInputStream openFileForRead(Path path,
|
|||||||
resourceType = fileStatus.isFile() ? FILE : DIRECTORY;
|
resourceType = fileStatus.isFile() ? FILE : DIRECTORY;
|
||||||
contentLength = fileStatus.getLen();
|
contentLength = fileStatus.getLen();
|
||||||
eTag = ((VersionedFileStatus) fileStatus).getVersion();
|
eTag = ((VersionedFileStatus) fileStatus).getVersion();
|
||||||
} else {
|
final String encryptionContext
|
||||||
if (fileStatus != null) {
|
= ((VersionedFileStatus) fileStatus).getEncryptionContext();
|
||||||
LOG.debug(
|
if (client.getEncryptionType() == EncryptionType.ENCRYPTION_CONTEXT) {
|
||||||
"Fallback to getPathStatus REST call as provided filestatus "
|
contextEncryptionAdapter = new ContextProviderEncryptionAdapter(
|
||||||
+ "is not of type VersionedFileStatus");
|
client.getEncryptionContextProvider(), getRelativePath(path),
|
||||||
|
encryptionContext.getBytes(StandardCharsets.UTF_8));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
AbfsHttpOperation op = client.getPathStatus(relativePath, false,
|
AbfsHttpOperation op = client.getPathStatus(relativePath, false,
|
||||||
tracingContext).getResult();
|
tracingContext, null).getResult();
|
||||||
resourceType = op.getResponseHeader(
|
resourceType = op.getResponseHeader(
|
||||||
HttpHeaderConfigurations.X_MS_RESOURCE_TYPE);
|
HttpHeaderConfigurations.X_MS_RESOURCE_TYPE);
|
||||||
contentLength = Long.parseLong(
|
contentLength = Long.parseLong(
|
||||||
op.getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH));
|
op.getResponseHeader(HttpHeaderConfigurations.CONTENT_LENGTH));
|
||||||
eTag = op.getResponseHeader(HttpHeaderConfigurations.ETAG);
|
eTag = op.getResponseHeader(HttpHeaderConfigurations.ETAG);
|
||||||
|
/*
|
||||||
|
* For file created with ENCRYPTION_CONTEXT, client shall receive
|
||||||
|
* encryptionContext from header field: X_MS_ENCRYPTION_CONTEXT.
|
||||||
|
*/
|
||||||
|
if (client.getEncryptionType() == EncryptionType.ENCRYPTION_CONTEXT) {
|
||||||
|
final String fileEncryptionContext = op.getResponseHeader(
|
||||||
|
HttpHeaderConfigurations.X_MS_ENCRYPTION_CONTEXT);
|
||||||
|
if (fileEncryptionContext == null) {
|
||||||
|
LOG.debug("EncryptionContext missing in GetPathStatus response");
|
||||||
|
throw new PathIOException(path.toString(),
|
||||||
|
"EncryptionContext not present in GetPathStatus response headers");
|
||||||
|
}
|
||||||
|
contextEncryptionAdapter = new ContextProviderEncryptionAdapter(
|
||||||
|
client.getEncryptionContextProvider(), getRelativePath(path),
|
||||||
|
fileEncryptionContext.getBytes(StandardCharsets.UTF_8));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parseIsDirectory(resourceType)) {
|
if (parseIsDirectory(resourceType)) {
|
||||||
@ -801,13 +884,14 @@ public AbfsInputStream openFileForRead(Path path,
|
|||||||
// Add statistics for InputStream
|
// Add statistics for InputStream
|
||||||
return new AbfsInputStream(client, statistics, relativePath,
|
return new AbfsInputStream(client, statistics, relativePath,
|
||||||
contentLength, populateAbfsInputStreamContext(
|
contentLength, populateAbfsInputStreamContext(
|
||||||
parameters.map(OpenFileParameters::getOptions)),
|
parameters.map(OpenFileParameters::getOptions),
|
||||||
|
contextEncryptionAdapter),
|
||||||
eTag, tracingContext);
|
eTag, tracingContext);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private AbfsInputStreamContext populateAbfsInputStreamContext(
|
private AbfsInputStreamContext populateAbfsInputStreamContext(
|
||||||
Optional<Configuration> options) {
|
Optional<Configuration> options, ContextEncryptionAdapter contextEncryptionAdapter) {
|
||||||
boolean bufferedPreadDisabled = options
|
boolean bufferedPreadDisabled = options
|
||||||
.map(c -> c.getBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE, false))
|
.map(c -> c.getBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE, false))
|
||||||
.orElse(false);
|
.orElse(false);
|
||||||
@ -824,6 +908,7 @@ private AbfsInputStreamContext populateAbfsInputStreamContext(
|
|||||||
abfsConfiguration.shouldReadBufferSizeAlways())
|
abfsConfiguration.shouldReadBufferSizeAlways())
|
||||||
.withReadAheadBlockSize(abfsConfiguration.getReadAheadBlockSize())
|
.withReadAheadBlockSize(abfsConfiguration.getReadAheadBlockSize())
|
||||||
.withBufferedPreadDisabled(bufferedPreadDisabled)
|
.withBufferedPreadDisabled(bufferedPreadDisabled)
|
||||||
|
.withEncryptionAdapter(contextEncryptionAdapter)
|
||||||
.withAbfsBackRef(fsBackRef)
|
.withAbfsBackRef(fsBackRef)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
@ -840,7 +925,7 @@ public OutputStream openFileForWrite(final Path path,
|
|||||||
String relativePath = getRelativePath(path);
|
String relativePath = getRelativePath(path);
|
||||||
|
|
||||||
final AbfsRestOperation op = client
|
final AbfsRestOperation op = client
|
||||||
.getPathStatus(relativePath, false, tracingContext);
|
.getPathStatus(relativePath, false, tracingContext, null);
|
||||||
perfInfo.registerResult(op.getResult());
|
perfInfo.registerResult(op.getResult());
|
||||||
|
|
||||||
final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE);
|
final String resourceType = op.getResult().getResponseHeader(HttpHeaderConfigurations.X_MS_RESOURCE_TYPE);
|
||||||
@ -864,6 +949,21 @@ public OutputStream openFileForWrite(final Path path,
|
|||||||
}
|
}
|
||||||
|
|
||||||
AbfsLease lease = maybeCreateLease(relativePath, tracingContext);
|
AbfsLease lease = maybeCreateLease(relativePath, tracingContext);
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter;
|
||||||
|
if (client.getEncryptionType() == EncryptionType.ENCRYPTION_CONTEXT) {
|
||||||
|
final String encryptionContext = op.getResult()
|
||||||
|
.getResponseHeader(
|
||||||
|
HttpHeaderConfigurations.X_MS_ENCRYPTION_CONTEXT);
|
||||||
|
if (encryptionContext == null) {
|
||||||
|
throw new PathIOException(path.toString(),
|
||||||
|
"File doesn't have encryptionContext.");
|
||||||
|
}
|
||||||
|
contextEncryptionAdapter = new ContextProviderEncryptionAdapter(
|
||||||
|
client.getEncryptionContextProvider(), getRelativePath(path),
|
||||||
|
encryptionContext.getBytes(StandardCharsets.UTF_8));
|
||||||
|
} else {
|
||||||
|
contextEncryptionAdapter = NoContextEncryptionAdapter.getInstance();
|
||||||
|
}
|
||||||
|
|
||||||
return new AbfsOutputStream(
|
return new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -873,6 +973,7 @@ public OutputStream openFileForWrite(final Path path,
|
|||||||
statistics,
|
statistics,
|
||||||
relativePath,
|
relativePath,
|
||||||
offset,
|
offset,
|
||||||
|
contextEncryptionAdapter,
|
||||||
tracingContext));
|
tracingContext));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -906,7 +1007,7 @@ public boolean rename(final Path source,
|
|||||||
final Path destination,
|
final Path destination,
|
||||||
final TracingContext tracingContext,
|
final TracingContext tracingContext,
|
||||||
final String sourceEtag) throws
|
final String sourceEtag) throws
|
||||||
AzureBlobFileSystemException {
|
IOException {
|
||||||
final Instant startAggregate = abfsPerfTracker.getLatencyInstant();
|
final Instant startAggregate = abfsPerfTracker.getLatencyInstant();
|
||||||
long countAggregate = 0;
|
long countAggregate = 0;
|
||||||
boolean shouldContinue;
|
boolean shouldContinue;
|
||||||
@ -1005,7 +1106,7 @@ public FileStatus getFileStatus(final Path path,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
perfInfo.registerCallee("getPathStatus");
|
perfInfo.registerCallee("getPathStatus");
|
||||||
op = client.getPathStatus(getRelativePath(path), false, tracingContext);
|
op = client.getPathStatus(getRelativePath(path), false, tracingContext, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
perfInfo.registerResult(op.getResult());
|
perfInfo.registerResult(op.getResult());
|
||||||
@ -1015,6 +1116,7 @@ public FileStatus getFileStatus(final Path path,
|
|||||||
String eTag = extractEtagHeader(result);
|
String eTag = extractEtagHeader(result);
|
||||||
final String lastModified = result.getResponseHeader(HttpHeaderConfigurations.LAST_MODIFIED);
|
final String lastModified = result.getResponseHeader(HttpHeaderConfigurations.LAST_MODIFIED);
|
||||||
final String permissions = result.getResponseHeader((HttpHeaderConfigurations.X_MS_PERMISSIONS));
|
final String permissions = result.getResponseHeader((HttpHeaderConfigurations.X_MS_PERMISSIONS));
|
||||||
|
final String encryptionContext = op.getResult().getResponseHeader(X_MS_ENCRYPTION_CONTEXT);
|
||||||
final boolean hasAcl = AbfsPermission.isExtendedAcl(permissions);
|
final boolean hasAcl = AbfsPermission.isExtendedAcl(permissions);
|
||||||
final long contentLength;
|
final long contentLength;
|
||||||
final boolean resourceIsDir;
|
final boolean resourceIsDir;
|
||||||
@ -1051,7 +1153,8 @@ public FileStatus getFileStatus(final Path path,
|
|||||||
blockSize,
|
blockSize,
|
||||||
DateTimeUtils.parseLastModifiedTime(lastModified),
|
DateTimeUtils.parseLastModifiedTime(lastModified),
|
||||||
path,
|
path,
|
||||||
eTag);
|
eTag,
|
||||||
|
encryptionContext);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1129,6 +1232,7 @@ public String listStatus(final Path path, final String startFrom,
|
|||||||
for (ListResultEntrySchema entry : retrievedSchema.paths()) {
|
for (ListResultEntrySchema entry : retrievedSchema.paths()) {
|
||||||
final String owner = identityTransformer.transformIdentityForGetRequest(entry.owner(), true, userName);
|
final String owner = identityTransformer.transformIdentityForGetRequest(entry.owner(), true, userName);
|
||||||
final String group = identityTransformer.transformIdentityForGetRequest(entry.group(), false, primaryUserGroup);
|
final String group = identityTransformer.transformIdentityForGetRequest(entry.group(), false, primaryUserGroup);
|
||||||
|
final String encryptionContext = entry.getXMsEncryptionContext();
|
||||||
final FsPermission fsPermission = entry.permissions() == null
|
final FsPermission fsPermission = entry.permissions() == null
|
||||||
? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)
|
? new AbfsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)
|
||||||
: AbfsPermission.valueOf(entry.permissions());
|
: AbfsPermission.valueOf(entry.permissions());
|
||||||
@ -1157,7 +1261,8 @@ public String listStatus(final Path path, final String startFrom,
|
|||||||
blockSize,
|
blockSize,
|
||||||
lastModifiedMillis,
|
lastModifiedMillis,
|
||||||
entryPath,
|
entryPath,
|
||||||
entry.eTag()));
|
entry.eTag(),
|
||||||
|
encryptionContext));
|
||||||
}
|
}
|
||||||
|
|
||||||
perfInfo.registerSuccess(true);
|
perfInfo.registerSuccess(true);
|
||||||
@ -1627,16 +1732,38 @@ private void initializeClient(URI uri, String fileSystemName,
|
|||||||
abfsConfiguration.getRawConfiguration());
|
abfsConfiguration.getRawConfiguration());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Encryption setup
|
||||||
|
EncryptionContextProvider encryptionContextProvider = null;
|
||||||
|
if (isSecure) {
|
||||||
|
encryptionContextProvider =
|
||||||
|
abfsConfiguration.createEncryptionContextProvider();
|
||||||
|
if (encryptionContextProvider != null) {
|
||||||
|
if (abfsConfiguration.getEncodedClientProvidedEncryptionKey() != null) {
|
||||||
|
throw new PathIOException(uri.getPath(),
|
||||||
|
"Both global key and encryption context are set, only one allowed");
|
||||||
|
}
|
||||||
|
encryptionContextProvider.initialize(
|
||||||
|
abfsConfiguration.getRawConfiguration(), accountName,
|
||||||
|
fileSystemName);
|
||||||
|
} else if (abfsConfiguration.getEncodedClientProvidedEncryptionKey() != null) {
|
||||||
|
if (abfsConfiguration.getEncodedClientProvidedEncryptionKeySHA() == null) {
|
||||||
|
throw new PathIOException(uri.getPath(),
|
||||||
|
"Encoded SHA256 hash must be provided for global encryption");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
LOG.trace("Initializing AbfsClient for {}", baseUrl);
|
LOG.trace("Initializing AbfsClient for {}", baseUrl);
|
||||||
if (tokenProvider != null) {
|
if (tokenProvider != null) {
|
||||||
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
|
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
|
||||||
tokenProvider,
|
tokenProvider, encryptionContextProvider,
|
||||||
populateAbfsClientContext());
|
populateAbfsClientContext());
|
||||||
} else {
|
} else {
|
||||||
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
|
this.client = new AbfsClient(baseUrl, creds, abfsConfiguration,
|
||||||
sasTokenProvider,
|
sasTokenProvider, encryptionContextProvider,
|
||||||
populateAbfsClientContext());
|
populateAbfsClientContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.trace("AbfsClient init complete");
|
LOG.trace("AbfsClient init complete");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1654,12 +1781,7 @@ private AbfsClientContext populateAbfsClientContext() {
|
|||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getOctalNotation(FsPermission fsPermission) {
|
public String getRelativePath(final Path path) {
|
||||||
Preconditions.checkNotNull(fsPermission, "fsPermission");
|
|
||||||
return String.format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal());
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getRelativePath(final Path path) {
|
|
||||||
Preconditions.checkNotNull(path, "path");
|
Preconditions.checkNotNull(path, "path");
|
||||||
String relPath = path.toUri().getPath();
|
String relPath = path.toUri().getPath();
|
||||||
if (relPath.isEmpty()) {
|
if (relPath.isEmpty()) {
|
||||||
@ -1682,7 +1804,14 @@ private boolean parseIsDirectory(final String resourceType) {
|
|||||||
&& resourceType.equalsIgnoreCase(AbfsHttpConstants.DIRECTORY);
|
&& resourceType.equalsIgnoreCase(AbfsHttpConstants.DIRECTORY);
|
||||||
}
|
}
|
||||||
|
|
||||||
private String convertXmsPropertiesToCommaSeparatedString(final Hashtable<String, String> properties) throws
|
/**
|
||||||
|
* Convert properties stored in a Map into a comma separated string. For map
|
||||||
|
* <key1:value1; key2:value2: keyN:valueN>, method would convert to:
|
||||||
|
* key1=value1,key2=value,...,keyN=valueN
|
||||||
|
* */
|
||||||
|
@VisibleForTesting
|
||||||
|
String convertXmsPropertiesToCommaSeparatedString(final Map<String,
|
||||||
|
String> properties) throws
|
||||||
CharacterCodingException {
|
CharacterCodingException {
|
||||||
StringBuilder commaSeparatedProperties = new StringBuilder();
|
StringBuilder commaSeparatedProperties = new StringBuilder();
|
||||||
|
|
||||||
@ -1780,7 +1909,7 @@ private AbfsPerfInfo startTracking(String callerName, String calleeName) {
|
|||||||
* in a LIST or HEAD request.
|
* in a LIST or HEAD request.
|
||||||
* The etag is included in the java serialization.
|
* The etag is included in the java serialization.
|
||||||
*/
|
*/
|
||||||
private static final class VersionedFileStatus extends FileStatus
|
static final class VersionedFileStatus extends FileStatus
|
||||||
implements EtagSource {
|
implements EtagSource {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1795,11 +1924,13 @@ private static final class VersionedFileStatus extends FileStatus
|
|||||||
*/
|
*/
|
||||||
private String version;
|
private String version;
|
||||||
|
|
||||||
|
private String encryptionContext;
|
||||||
|
|
||||||
private VersionedFileStatus(
|
private VersionedFileStatus(
|
||||||
final String owner, final String group, final FsPermission fsPermission, final boolean hasAcl,
|
final String owner, final String group, final FsPermission fsPermission, final boolean hasAcl,
|
||||||
final long length, final boolean isdir, final int blockReplication,
|
final long length, final boolean isdir, final int blockReplication,
|
||||||
final long blocksize, final long modificationTime, final Path path,
|
final long blocksize, final long modificationTime, final Path path,
|
||||||
String version) {
|
final String version, final String encryptionContext) {
|
||||||
super(length, isdir, blockReplication, blocksize, modificationTime, 0,
|
super(length, isdir, blockReplication, blocksize, modificationTime, 0,
|
||||||
fsPermission,
|
fsPermission,
|
||||||
owner,
|
owner,
|
||||||
@ -1809,6 +1940,7 @@ private VersionedFileStatus(
|
|||||||
hasAcl, false, false);
|
hasAcl, false, false);
|
||||||
|
|
||||||
this.version = version;
|
this.version = version;
|
||||||
|
this.encryptionContext = encryptionContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Compare if this object is equal to another object.
|
/** Compare if this object is equal to another object.
|
||||||
@ -1861,6 +1993,10 @@ public String getEtag() {
|
|||||||
return getVersion();
|
return getVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getEncryptionContext() {
|
||||||
|
return encryptionContext;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final StringBuilder sb = new StringBuilder(
|
final StringBuilder sb = new StringBuilder(
|
||||||
@ -1872,6 +2008,54 @@ public String toString() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Permissions class contain provided permission and umask in octalNotation.
|
||||||
|
* If the object is created for namespace-disabled account, the permission and
|
||||||
|
* umask would be null.
|
||||||
|
* */
|
||||||
|
public static final class Permissions {
|
||||||
|
private final String permission;
|
||||||
|
private final String umask;
|
||||||
|
|
||||||
|
Permissions(boolean isNamespaceEnabled, FsPermission permission,
|
||||||
|
FsPermission umask) {
|
||||||
|
if (isNamespaceEnabled) {
|
||||||
|
this.permission = getOctalNotation(permission);
|
||||||
|
this.umask = getOctalNotation(umask);
|
||||||
|
} else {
|
||||||
|
this.permission = null;
|
||||||
|
this.umask = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getOctalNotation(FsPermission fsPermission) {
|
||||||
|
Preconditions.checkNotNull(fsPermission, "fsPermission");
|
||||||
|
return String.format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean hasPermission() {
|
||||||
|
return permission != null && !permission.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Boolean hasUmask() {
|
||||||
|
return umask != null && !umask.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPermission() {
|
||||||
|
return permission;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getUmask() {
|
||||||
|
return umask;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return String.format("{\"permission\":%s, \"umask\":%s}", permission,
|
||||||
|
umask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A builder class for AzureBlobFileSystemStore.
|
* A builder class for AzureBlobFileSystemStore.
|
||||||
*/
|
*/
|
||||||
|
@ -119,6 +119,9 @@ public final class AbfsHttpConstants {
|
|||||||
public static final char CHAR_EQUALS = '=';
|
public static final char CHAR_EQUALS = '=';
|
||||||
public static final char CHAR_STAR = '*';
|
public static final char CHAR_STAR = '*';
|
||||||
public static final char CHAR_PLUS = '+';
|
public static final char CHAR_PLUS = '+';
|
||||||
|
public static final String DECEMBER_2019_API_VERSION = "2019-12-12";
|
||||||
|
public static final String APRIL_2021_API_VERSION = "2021-04-10";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Value that differentiates categories of the http_status.
|
* Value that differentiates categories of the http_status.
|
||||||
* <pre>
|
* <pre>
|
||||||
|
@ -203,8 +203,14 @@ public final class ConfigurationKeys {
|
|||||||
|
|
||||||
/** Setting this true will make the driver use it's own RemoteIterator implementation */
|
/** Setting this true will make the driver use it's own RemoteIterator implementation */
|
||||||
public static final String FS_AZURE_ENABLE_ABFS_LIST_ITERATOR = "fs.azure.enable.abfslistiterator";
|
public static final String FS_AZURE_ENABLE_ABFS_LIST_ITERATOR = "fs.azure.enable.abfslistiterator";
|
||||||
/** Server side encryption key */
|
/** Server side encryption key encoded in Base6format {@value}.*/
|
||||||
public static final String FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY = "fs.azure.client-provided-encryption-key";
|
public static final String FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY =
|
||||||
|
"fs.azure.encryption.encoded.client-provided-key";
|
||||||
|
/** SHA256 hash of encryption key encoded in Base64format */
|
||||||
|
public static final String FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY_SHA =
|
||||||
|
"fs.azure.encryption.encoded.client-provided-key-sha";
|
||||||
|
/** Custom EncryptionContextProvider type */
|
||||||
|
public static final String FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE = "fs.azure.encryption.context.provider.type";
|
||||||
|
|
||||||
/** End point of ABFS account: {@value}. */
|
/** End point of ABFS account: {@value}. */
|
||||||
public static final String AZURE_ABFS_ENDPOINT = "fs.azure.abfs.endpoint";
|
public static final String AZURE_ABFS_ENDPOINT = "fs.azure.abfs.endpoint";
|
||||||
|
@ -65,6 +65,7 @@ public final class HttpHeaderConfigurations {
|
|||||||
public static final String X_MS_ENCRYPTION_ALGORITHM = "x-ms-encryption-algorithm";
|
public static final String X_MS_ENCRYPTION_ALGORITHM = "x-ms-encryption-algorithm";
|
||||||
public static final String X_MS_REQUEST_SERVER_ENCRYPTED = "x-ms-request-server-encrypted";
|
public static final String X_MS_REQUEST_SERVER_ENCRYPTED = "x-ms-request-server-encrypted";
|
||||||
public static final String X_MS_SERVER_ENCRYPTED = "x-ms-server-encrypted";
|
public static final String X_MS_SERVER_ENCRYPTED = "x-ms-server-encrypted";
|
||||||
|
public static final String X_MS_ENCRYPTION_CONTEXT = "x-ms-encryption-context";
|
||||||
public static final String X_MS_LEASE_ACTION = "x-ms-lease-action";
|
public static final String X_MS_LEASE_ACTION = "x-ms-lease-action";
|
||||||
public static final String X_MS_LEASE_DURATION = "x-ms-lease-duration";
|
public static final String X_MS_LEASE_DURATION = "x-ms-lease-duration";
|
||||||
public static final String X_MS_LEASE_ID = "x-ms-lease-id";
|
public static final String X_MS_LEASE_ID = "x-ms-lease-id";
|
||||||
|
@ -77,6 +77,18 @@ public class ListResultEntrySchema {
|
|||||||
@JsonProperty(value = "permissions")
|
@JsonProperty(value = "permissions")
|
||||||
private String permissions;
|
private String permissions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The encryption context property
|
||||||
|
*/
|
||||||
|
@JsonProperty(value = "EncryptionContext")
|
||||||
|
private String xMsEncryptionContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The customer-provided encryption-256 value
|
||||||
|
* */
|
||||||
|
@JsonProperty(value = "CustomerProvidedKeySha256")
|
||||||
|
private String customerProvidedKeySha256;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the name value.
|
* Get the name value.
|
||||||
*
|
*
|
||||||
@ -238,4 +250,19 @@ public ListResultEntrySchema withPermissions(final String permissions) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the x-ms-encryption-context value.
|
||||||
|
* @return the x-ms-encryption-context value.
|
||||||
|
* */
|
||||||
|
public String getXMsEncryptionContext() {
|
||||||
|
return xMsEncryptionContext;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the customer-provided sha-256 key
|
||||||
|
* @return the x-ms-encryption-key-sha256 value used by client.
|
||||||
|
* */
|
||||||
|
public String getCustomerProvidedKeySha256() {
|
||||||
|
return customerProvidedKeySha256;
|
||||||
|
}
|
||||||
}
|
}
|
@ -0,0 +1,67 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.extensions;
|
||||||
|
|
||||||
|
import javax.security.auth.Destroyable;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ABFSKey;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This interface has two roles:<br>
|
||||||
|
* <ul>
|
||||||
|
* <li>
|
||||||
|
* To create new encryptionContext from a given path: To be used in case of
|
||||||
|
* create file as there is no encryptionContext in remote server to refer to
|
||||||
|
* for encryptionKey creation.
|
||||||
|
* </li>
|
||||||
|
* <li>To create encryptionKey using encryptionContext.</li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public interface EncryptionContextProvider extends Destroyable {
|
||||||
|
/**
|
||||||
|
* Initialize instance.
|
||||||
|
*
|
||||||
|
* @param configuration rawConfig instance
|
||||||
|
* @param accountName Account Name (with domain)
|
||||||
|
* @param fileSystem container name
|
||||||
|
* @throws IOException error in initialization
|
||||||
|
*/
|
||||||
|
void initialize(Configuration configuration, String accountName, String fileSystem) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch encryption context for a given path.
|
||||||
|
*
|
||||||
|
* @param path file path from filesystem root
|
||||||
|
* @return encryptionContext key
|
||||||
|
* @throws IOException error in fetching encryption context
|
||||||
|
*/
|
||||||
|
ABFSKey getEncryptionContext(String path) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetch encryption key in-exchange for encryption context.
|
||||||
|
*
|
||||||
|
* @param path file path from filesystem root
|
||||||
|
* @param encryptionContext encryptionContext fetched from server
|
||||||
|
* @return Encryption key
|
||||||
|
* @throws IOException error in fetching encryption key
|
||||||
|
*/
|
||||||
|
ABFSKey getEncryptionKey(String path, ABFSKey encryptionContext) throws IOException;
|
||||||
|
}
|
@ -0,0 +1,64 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.security;
|
||||||
|
|
||||||
|
import javax.crypto.SecretKey;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of SecretKey that would be used by EncryptionAdapter object,
|
||||||
|
* implementations of encryptionContextProvider to maintain the byteArrays of
|
||||||
|
* encryptionContext and encryptionKey.
|
||||||
|
*/
|
||||||
|
public final class ABFSKey implements SecretKey {
|
||||||
|
private byte[] bytes;
|
||||||
|
|
||||||
|
public ABFSKey(byte[] bytes) {
|
||||||
|
if (bytes != null) {
|
||||||
|
this.bytes = bytes.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAlgorithm() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getFormat() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method to be called by implementations of EncryptionContextProvider interface.
|
||||||
|
* Method returns clone of the original bytes array to prevent findbugs flags.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public byte[] getEncoded() {
|
||||||
|
if (bytes == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return bytes.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void destroy() {
|
||||||
|
Arrays.fill(bytes, (byte) 0);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,45 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.security;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides APIs to get encryptionKey from encryptionContext for a given path.
|
||||||
|
*/
|
||||||
|
public abstract class ContextEncryptionAdapter {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return computed encryptionKey from server provided encryptionContext
|
||||||
|
*/
|
||||||
|
public abstract String getEncodedKey();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return computed encryptionKeySHA from server provided encryptionContext
|
||||||
|
*/
|
||||||
|
public abstract String getEncodedKeySHA();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return encryptionContext to be supplied in createPath API
|
||||||
|
*/
|
||||||
|
public abstract String getEncodedContext();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Destroys all the encapsulated fields which are used for creating keys.
|
||||||
|
*/
|
||||||
|
public abstract void destroy();
|
||||||
|
}
|
@ -0,0 +1,121 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.security;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.security.EncodingHelper.getBase64EncodedString;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class manages the encryptionContext and encryptionKey that needs to be added
|
||||||
|
* to the headers to server request if Customer-Encryption-Context is enabled in
|
||||||
|
* the configuration.
|
||||||
|
* <br>
|
||||||
|
* For fileCreation, the object helps in creating encryptionContext through the
|
||||||
|
* implementation of EncryptionContextProvider.
|
||||||
|
* <br>
|
||||||
|
* For all operations, the object helps in converting encryptionContext to
|
||||||
|
* encryptionKey through the implementation of EncryptionContextProvider.
|
||||||
|
*/
|
||||||
|
public class ContextProviderEncryptionAdapter extends ContextEncryptionAdapter {
|
||||||
|
private final String path;
|
||||||
|
private final ABFSKey encryptionContext;
|
||||||
|
private ABFSKey encryptionKey;
|
||||||
|
private final EncryptionContextProvider provider;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Following constructor called when the encryptionContext of file is known.
|
||||||
|
* The server shall send encryptionContext as a String, the constructor shall
|
||||||
|
* convert the string into a byte-array. The converted byte-array would be used
|
||||||
|
* by the implementation of EncryptionContextProvider to create byte-array of
|
||||||
|
* encryptionKey.
|
||||||
|
* @param provider developer's implementation of {@link EncryptionContextProvider}
|
||||||
|
* @param path Path for which encryptionContext and encryptionKeys to be stored
|
||||||
|
* in the object
|
||||||
|
* @param encryptionContext encryptionContext for the path stored in the backend
|
||||||
|
* @throws IOException throws back the exception it receives from the
|
||||||
|
* {@link ContextProviderEncryptionAdapter#computeKeys()} method call.
|
||||||
|
*/
|
||||||
|
public ContextProviderEncryptionAdapter(EncryptionContextProvider provider, String path,
|
||||||
|
byte[] encryptionContext) throws IOException {
|
||||||
|
this.provider = provider;
|
||||||
|
this.path = path;
|
||||||
|
Objects.requireNonNull(encryptionContext,
|
||||||
|
"Encryption context should not be null.");
|
||||||
|
this.encryptionContext = new ABFSKey(Base64.getDecoder().decode(encryptionContext));
|
||||||
|
Arrays.fill(encryptionContext, (byte) 0);
|
||||||
|
computeKeys();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Following constructor called in case of createPath. Since, the path is not
|
||||||
|
* on the server, encryptionContext is not there for the path. Implementation
|
||||||
|
* of the EncryptionContextProvider would be used to create encryptionContext
|
||||||
|
* from the path.
|
||||||
|
* @param provider developer's implementation of {@link EncryptionContextProvider}
|
||||||
|
* @param path file path for which encryptionContext and encryptionKeys to be
|
||||||
|
* created and stored
|
||||||
|
* @throws IOException throws back the exception it receives from the method call
|
||||||
|
* to {@link EncryptionContextProvider} object.
|
||||||
|
*/
|
||||||
|
public ContextProviderEncryptionAdapter(EncryptionContextProvider provider, String path)
|
||||||
|
throws IOException {
|
||||||
|
this.provider = provider;
|
||||||
|
this.path = path;
|
||||||
|
encryptionContext = provider.getEncryptionContext(path);
|
||||||
|
Objects.requireNonNull(encryptionContext,
|
||||||
|
"Encryption context should not be null.");
|
||||||
|
computeKeys();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void computeKeys() throws IOException {
|
||||||
|
encryptionKey = provider.getEncryptionKey(path, encryptionContext);
|
||||||
|
Objects.requireNonNull(encryptionKey, "Encryption key should not be null.");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedKey() {
|
||||||
|
return getBase64EncodedString(encryptionKey.getEncoded());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedKeySHA() {
|
||||||
|
return getBase64EncodedString(EncodingHelper.getSHA256Hash(encryptionKey.getEncoded()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedContext() {
|
||||||
|
return getBase64EncodedString(encryptionContext.getEncoded());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void destroy() {
|
||||||
|
if (encryptionContext != null) {
|
||||||
|
encryptionContext.destroy();
|
||||||
|
}
|
||||||
|
if (encryptionKey != null) {
|
||||||
|
encryptionKey.destroy();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,50 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.security;
|
||||||
|
|
||||||
|
import java.security.MessageDigest;
|
||||||
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
import java.util.Base64;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class for managing encryption of bytes or base64String conversion of
|
||||||
|
* bytes.
|
||||||
|
*/
|
||||||
|
public final class EncodingHelper {
|
||||||
|
|
||||||
|
private EncodingHelper() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static byte[] getSHA256Hash(byte[] key) {
|
||||||
|
try {
|
||||||
|
final MessageDigest digester = MessageDigest.getInstance("SHA-256");
|
||||||
|
return digester.digest(key);
|
||||||
|
} catch (NoSuchAlgorithmException noSuchAlgorithmException) {
|
||||||
|
/*This exception can be ignored. Reason being SHA-256 is a valid algorithm,
|
||||||
|
and it is constant for all method calls.*/
|
||||||
|
throw new RuntimeException("SHA-256 algorithm not found in MessageDigest",
|
||||||
|
noSuchAlgorithmException);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String getBase64EncodedString(byte[] bytes) {
|
||||||
|
return Base64.getEncoder().encodeToString(bytes);
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,52 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.security;
|
||||||
|
|
||||||
|
public final class NoContextEncryptionAdapter extends ContextEncryptionAdapter {
|
||||||
|
|
||||||
|
private NoContextEncryptionAdapter() {
|
||||||
|
|
||||||
|
}
|
||||||
|
private static final NoContextEncryptionAdapter
|
||||||
|
INSTANCE = new NoContextEncryptionAdapter();
|
||||||
|
|
||||||
|
public static NoContextEncryptionAdapter getInstance() {
|
||||||
|
return INSTANCE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedKey() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedKeySHA() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getEncodedContext() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void destroy() {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -25,11 +25,7 @@
|
|||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.net.URLEncoder;
|
import java.net.URLEncoder;
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.security.MessageDigest;
|
|
||||||
import java.security.NoSuchAlgorithmException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Base64;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
@ -38,8 +34,13 @@
|
|||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.NamespaceUtil;
|
||||||
import org.apache.hadoop.fs.store.LogExactlyOnce;
|
import org.apache.hadoop.fs.store.LogExactlyOnce;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore.Permissions;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.EncryptionType;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
||||||
@ -48,6 +49,7 @@
|
|||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningScheduledExecutorService;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningScheduledExecutorService;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
|
import org.apache.hadoop.util.Preconditions;
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
@ -65,7 +67,6 @@
|
|||||||
import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
|
import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
|
||||||
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
|
||||||
import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory;
|
import org.apache.hadoop.security.ssl.DelegatingSSLSocketFactory;
|
||||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||||
|
|
||||||
@ -91,23 +92,27 @@ public class AbfsClient implements Closeable {
|
|||||||
|
|
||||||
private final URL baseUrl;
|
private final URL baseUrl;
|
||||||
private final SharedKeyCredentials sharedKeyCredentials;
|
private final SharedKeyCredentials sharedKeyCredentials;
|
||||||
private final String xMsVersion = "2019-12-12";
|
private String xMsVersion = DECEMBER_2019_API_VERSION;
|
||||||
private final ExponentialRetryPolicy retryPolicy;
|
private final ExponentialRetryPolicy retryPolicy;
|
||||||
private final String filesystem;
|
private final String filesystem;
|
||||||
private final AbfsConfiguration abfsConfiguration;
|
private final AbfsConfiguration abfsConfiguration;
|
||||||
private final String userAgent;
|
private final String userAgent;
|
||||||
private final AbfsPerfTracker abfsPerfTracker;
|
private final AbfsPerfTracker abfsPerfTracker;
|
||||||
private final String clientProvidedEncryptionKey;
|
private String clientProvidedEncryptionKey = null;
|
||||||
private final String clientProvidedEncryptionKeySHA;
|
private String clientProvidedEncryptionKeySHA = null;
|
||||||
|
|
||||||
private final String accountName;
|
private final String accountName;
|
||||||
private final AuthType authType;
|
private final AuthType authType;
|
||||||
private AccessTokenProvider tokenProvider;
|
private AccessTokenProvider tokenProvider;
|
||||||
private SASTokenProvider sasTokenProvider;
|
private SASTokenProvider sasTokenProvider;
|
||||||
private final AbfsCounters abfsCounters;
|
private final AbfsCounters abfsCounters;
|
||||||
|
private EncryptionContextProvider encryptionContextProvider = null;
|
||||||
|
private EncryptionType encryptionType = EncryptionType.NONE;
|
||||||
private final AbfsThrottlingIntercept intercept;
|
private final AbfsThrottlingIntercept intercept;
|
||||||
|
|
||||||
private final ListeningScheduledExecutorService executorService;
|
private final ListeningScheduledExecutorService executorService;
|
||||||
|
private Boolean isNamespaceEnabled;
|
||||||
|
|
||||||
|
|
||||||
private boolean renameResilience;
|
private boolean renameResilience;
|
||||||
|
|
||||||
@ -116,10 +121,11 @@ public class AbfsClient implements Closeable {
|
|||||||
*/
|
*/
|
||||||
private static final LogExactlyOnce ABFS_METADATA_INCOMPLETE_RENAME_FAILURE = new LogExactlyOnce(LOG);
|
private static final LogExactlyOnce ABFS_METADATA_INCOMPLETE_RENAME_FAILURE = new LogExactlyOnce(LOG);
|
||||||
|
|
||||||
private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials,
|
private AbfsClient(final URL baseUrl,
|
||||||
final AbfsConfiguration abfsConfiguration,
|
final SharedKeyCredentials sharedKeyCredentials,
|
||||||
final AbfsClientContext abfsClientContext)
|
final AbfsConfiguration abfsConfiguration,
|
||||||
throws IOException {
|
final EncryptionContextProvider encryptionContextProvider,
|
||||||
|
final AbfsClientContext abfsClientContext) throws IOException {
|
||||||
this.baseUrl = baseUrl;
|
this.baseUrl = baseUrl;
|
||||||
this.sharedKeyCredentials = sharedKeyCredentials;
|
this.sharedKeyCredentials = sharedKeyCredentials;
|
||||||
String baseUrlString = baseUrl.toString();
|
String baseUrlString = baseUrl.toString();
|
||||||
@ -131,15 +137,16 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden
|
|||||||
this.intercept = AbfsThrottlingInterceptFactory.getInstance(accountName, abfsConfiguration);
|
this.intercept = AbfsThrottlingInterceptFactory.getInstance(accountName, abfsConfiguration);
|
||||||
this.renameResilience = abfsConfiguration.getRenameResilience();
|
this.renameResilience = abfsConfiguration.getRenameResilience();
|
||||||
|
|
||||||
String encryptionKey = this.abfsConfiguration
|
if (encryptionContextProvider != null) {
|
||||||
.getClientProvidedEncryptionKey();
|
this.encryptionContextProvider = encryptionContextProvider;
|
||||||
if (encryptionKey != null) {
|
xMsVersion = APRIL_2021_API_VERSION; // will be default once server change deployed
|
||||||
this.clientProvidedEncryptionKey = getBase64EncodedString(encryptionKey);
|
encryptionType = EncryptionType.ENCRYPTION_CONTEXT;
|
||||||
this.clientProvidedEncryptionKeySHA = getBase64EncodedString(
|
} else if (abfsConfiguration.getEncodedClientProvidedEncryptionKey() != null) {
|
||||||
getSHA256Hash(encryptionKey));
|
clientProvidedEncryptionKey =
|
||||||
} else {
|
abfsConfiguration.getEncodedClientProvidedEncryptionKey();
|
||||||
this.clientProvidedEncryptionKey = null;
|
this.clientProvidedEncryptionKeySHA =
|
||||||
this.clientProvidedEncryptionKeySHA = null;
|
abfsConfiguration.getEncodedClientProvidedEncryptionKeySHA();
|
||||||
|
encryptionType = EncryptionType.GLOBAL_KEY;
|
||||||
}
|
}
|
||||||
|
|
||||||
String sslProviderName = null;
|
String sslProviderName = null;
|
||||||
@ -170,42 +177,30 @@ private AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCreden
|
|||||||
public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials,
|
public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials,
|
||||||
final AbfsConfiguration abfsConfiguration,
|
final AbfsConfiguration abfsConfiguration,
|
||||||
final AccessTokenProvider tokenProvider,
|
final AccessTokenProvider tokenProvider,
|
||||||
|
final EncryptionContextProvider encryptionContextProvider,
|
||||||
final AbfsClientContext abfsClientContext)
|
final AbfsClientContext abfsClientContext)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext);
|
this(baseUrl, sharedKeyCredentials, abfsConfiguration,
|
||||||
|
encryptionContextProvider, abfsClientContext);
|
||||||
this.tokenProvider = tokenProvider;
|
this.tokenProvider = tokenProvider;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials,
|
public AbfsClient(final URL baseUrl, final SharedKeyCredentials sharedKeyCredentials,
|
||||||
final AbfsConfiguration abfsConfiguration,
|
final AbfsConfiguration abfsConfiguration,
|
||||||
final SASTokenProvider sasTokenProvider,
|
final SASTokenProvider sasTokenProvider,
|
||||||
|
final EncryptionContextProvider encryptionContextProvider,
|
||||||
final AbfsClientContext abfsClientContext)
|
final AbfsClientContext abfsClientContext)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
this(baseUrl, sharedKeyCredentials, abfsConfiguration, abfsClientContext);
|
this(baseUrl, sharedKeyCredentials, abfsConfiguration,
|
||||||
|
encryptionContextProvider, abfsClientContext);
|
||||||
this.sasTokenProvider = sasTokenProvider;
|
this.sasTokenProvider = sasTokenProvider;
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] getSHA256Hash(String key) throws IOException {
|
|
||||||
try {
|
|
||||||
final MessageDigest digester = MessageDigest.getInstance("SHA-256");
|
|
||||||
return digester.digest(key.getBytes(StandardCharsets.UTF_8));
|
|
||||||
} catch (NoSuchAlgorithmException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getBase64EncodedString(String key) {
|
|
||||||
return getBase64EncodedString(key.getBytes(StandardCharsets.UTF_8));
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getBase64EncodedString(byte[] bytes) {
|
|
||||||
return Base64.getEncoder().encodeToString(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
if (tokenProvider instanceof Closeable) {
|
if (tokenProvider instanceof Closeable) {
|
||||||
IOUtils.cleanupWithLogger(LOG, (Closeable) tokenProvider);
|
IOUtils.cleanupWithLogger(LOG,
|
||||||
|
(Closeable) tokenProvider);
|
||||||
}
|
}
|
||||||
HadoopExecutors.shutdown(executorService, LOG, 0, TimeUnit.SECONDS);
|
HadoopExecutors.shutdown(executorService, LOG, 0, TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
@ -226,6 +221,14 @@ SharedKeyCredentials getSharedKeyCredentials() {
|
|||||||
return sharedKeyCredentials;
|
return sharedKeyCredentials;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setEncryptionType(EncryptionType encryptionType) {
|
||||||
|
this.encryptionType = encryptionType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public EncryptionType getEncryptionType() {
|
||||||
|
return encryptionType;
|
||||||
|
}
|
||||||
|
|
||||||
AbfsThrottlingIntercept getIntercept() {
|
AbfsThrottlingIntercept getIntercept() {
|
||||||
return intercept;
|
return intercept;
|
||||||
}
|
}
|
||||||
@ -242,16 +245,56 @@ List<AbfsHttpHeader> createDefaultHeaders() {
|
|||||||
return requestHeaders;
|
return requestHeaders;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addCustomerProvidedKeyHeaders(
|
/**
|
||||||
final List<AbfsHttpHeader> requestHeaders) {
|
* This method adds following headers:
|
||||||
if (clientProvidedEncryptionKey != null) {
|
* <ol>
|
||||||
requestHeaders.add(
|
* <li>X_MS_ENCRYPTION_KEY</li>
|
||||||
new AbfsHttpHeader(X_MS_ENCRYPTION_KEY, clientProvidedEncryptionKey));
|
* <li>X_MS_ENCRYPTION_KEY_SHA256</li>
|
||||||
requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_KEY_SHA256,
|
* <li>X_MS_ENCRYPTION_ALGORITHM</li>
|
||||||
clientProvidedEncryptionKeySHA));
|
* </ol>
|
||||||
requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_ALGORITHM,
|
* Above headers have to be added in following operations:
|
||||||
SERVER_SIDE_ENCRYPTION_ALGORITHM));
|
* <ol>
|
||||||
|
* <li>createPath</li>
|
||||||
|
* <li>append</li>
|
||||||
|
* <li>flush</li>
|
||||||
|
* <li>setPathProperties</li>
|
||||||
|
* <li>getPathStatus for fs.setXAttr and fs.getXAttr</li>
|
||||||
|
* <li>read</li>
|
||||||
|
* </ol>
|
||||||
|
*/
|
||||||
|
private void addEncryptionKeyRequestHeaders(String path,
|
||||||
|
List<AbfsHttpHeader> requestHeaders, boolean isCreateFileRequest,
|
||||||
|
ContextEncryptionAdapter contextEncryptionAdapter, TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
|
if (!getIsNamespaceEnabled(tracingContext)) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
String encodedKey, encodedKeySHA256;
|
||||||
|
switch (encryptionType) {
|
||||||
|
case GLOBAL_KEY:
|
||||||
|
encodedKey = clientProvidedEncryptionKey;
|
||||||
|
encodedKeySHA256 = clientProvidedEncryptionKeySHA;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ENCRYPTION_CONTEXT:
|
||||||
|
if (isCreateFileRequest) {
|
||||||
|
// get new context for create file request
|
||||||
|
requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_CONTEXT,
|
||||||
|
contextEncryptionAdapter.getEncodedContext()));
|
||||||
|
}
|
||||||
|
// else use cached encryption keys from input/output streams
|
||||||
|
encodedKey = contextEncryptionAdapter.getEncodedKey();
|
||||||
|
encodedKeySHA256 = contextEncryptionAdapter.getEncodedKeySHA();
|
||||||
|
break;
|
||||||
|
|
||||||
|
default: return; // no client-provided encryption keys
|
||||||
|
}
|
||||||
|
|
||||||
|
requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_KEY, encodedKey));
|
||||||
|
requestHeaders.add(
|
||||||
|
new AbfsHttpHeader(X_MS_ENCRYPTION_KEY_SHA256, encodedKeySHA256));
|
||||||
|
requestHeaders.add(new AbfsHttpHeader(X_MS_ENCRYPTION_ALGORITHM,
|
||||||
|
SERVER_SIDE_ENCRYPTION_ALGORITHM));
|
||||||
}
|
}
|
||||||
|
|
||||||
AbfsUriQueryBuilder createDefaultUriQueryBuilder() {
|
AbfsUriQueryBuilder createDefaultUriQueryBuilder() {
|
||||||
@ -301,7 +344,7 @@ public AbfsRestOperation setFilesystemProperties(final String properties,
|
|||||||
|
|
||||||
public AbfsRestOperation listPath(final String relativePath, final boolean recursive, final int listMaxResults,
|
public AbfsRestOperation listPath(final String relativePath, final boolean recursive, final int listMaxResults,
|
||||||
final String continuation, TracingContext tracingContext)
|
final String continuation, TracingContext tracingContext)
|
||||||
throws AzureBlobFileSystemException {
|
throws IOException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
|
|
||||||
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
|
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
|
||||||
@ -355,24 +398,60 @@ public AbfsRestOperation deleteFilesystem(TracingContext tracingContext) throws
|
|||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AbfsRestOperation createPath(final String path, final boolean isFile, final boolean overwrite,
|
/**
|
||||||
final String permission, final String umask,
|
* Method for calling createPath API to the backend. Method can be called from:
|
||||||
final boolean isAppendBlob, final String eTag,
|
* <ol>
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
* <li>create new file</li>
|
||||||
|
* <li>overwrite file</li>
|
||||||
|
* <li>create new directory</li>
|
||||||
|
* </ol>
|
||||||
|
*
|
||||||
|
* @param path: path of the file / directory to be created / overwritten.
|
||||||
|
* @param isFile: defines if file or directory has to be created / overwritten.
|
||||||
|
* @param overwrite: defines if the file / directory to be overwritten.
|
||||||
|
* @param permissions: contains permission and umask
|
||||||
|
* @param isAppendBlob: defines if directory in the path is enabled for appendBlob
|
||||||
|
* @param eTag: required in case of overwrite of file / directory. Path would be
|
||||||
|
* overwritten only if the provided eTag is equal to the one present in backend for
|
||||||
|
* the path.
|
||||||
|
* @param contextEncryptionAdapter: object that contains the encryptionContext and
|
||||||
|
* encryptionKey created from the developer provided implementation of
|
||||||
|
* {@link org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider}
|
||||||
|
* @param tracingContext: Object of {@link org.apache.hadoop.fs.azurebfs.utils.TracingContext}
|
||||||
|
* correlating to the current fs.create() request.
|
||||||
|
* @return object of {@link AbfsRestOperation} which contain all the information
|
||||||
|
* about the communication with the server. The information is in
|
||||||
|
* {@link AbfsRestOperation#getResult()}
|
||||||
|
* @throws AzureBlobFileSystemException throws back the exception it receives from the
|
||||||
|
* {@link AbfsRestOperation#execute(TracingContext)} method call.
|
||||||
|
*/
|
||||||
|
public AbfsRestOperation createPath(final String path,
|
||||||
|
final boolean isFile,
|
||||||
|
final boolean overwrite,
|
||||||
|
final Permissions permissions,
|
||||||
|
final boolean isAppendBlob,
|
||||||
|
final String eTag,
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter,
|
||||||
|
final TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
if (isFile) {
|
if (isFile) {
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, true,
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
}
|
}
|
||||||
if (!overwrite) {
|
if (!overwrite) {
|
||||||
requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, AbfsHttpConstants.STAR));
|
requestHeaders.add(new AbfsHttpHeader(IF_NONE_MATCH, AbfsHttpConstants.STAR));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (permission != null && !permission.isEmpty()) {
|
if (permissions.hasPermission()) {
|
||||||
requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS, permission));
|
requestHeaders.add(
|
||||||
|
new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_PERMISSIONS,
|
||||||
|
permissions.getPermission()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (umask != null && !umask.isEmpty()) {
|
if (permissions.hasUmask()) {
|
||||||
requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_UMASK, umask));
|
requestHeaders.add(new AbfsHttpHeader(HttpHeaderConfigurations.X_MS_UMASK,
|
||||||
|
permissions.getUmask()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eTag != null && !eTag.isEmpty()) {
|
if (eTag != null && !eTag.isEmpty()) {
|
||||||
@ -491,7 +570,6 @@ public AbfsRestOperation breakLease(final String path,
|
|||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Rename a file or directory.
|
* Rename a file or directory.
|
||||||
* If a source etag is passed in, the operation will attempt to recover
|
* If a source etag is passed in, the operation will attempt to recover
|
||||||
@ -522,7 +600,7 @@ public AbfsClientRenameResult renamePath(
|
|||||||
String sourceEtag,
|
String sourceEtag,
|
||||||
boolean isMetadataIncompleteState,
|
boolean isMetadataIncompleteState,
|
||||||
boolean isNamespaceEnabled)
|
boolean isNamespaceEnabled)
|
||||||
throws AzureBlobFileSystemException {
|
throws IOException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
|
|
||||||
final boolean hasEtag = !isEmpty(sourceEtag);
|
final boolean hasEtag = !isEmpty(sourceEtag);
|
||||||
@ -534,7 +612,7 @@ public AbfsClientRenameResult renamePath(
|
|||||||
// fetch the source etag to be used later in recovery
|
// fetch the source etag to be used later in recovery
|
||||||
try {
|
try {
|
||||||
final AbfsRestOperation srcStatusOp = getPathStatus(source,
|
final AbfsRestOperation srcStatusOp = getPathStatus(source,
|
||||||
false, tracingContext);
|
false, tracingContext, null);
|
||||||
if (srcStatusOp.hasResult()) {
|
if (srcStatusOp.hasResult()) {
|
||||||
final AbfsHttpOperation result = srcStatusOp.getResult();
|
final AbfsHttpOperation result = srcStatusOp.getResult();
|
||||||
sourceEtag = extractEtagHeader(result);
|
sourceEtag = extractEtagHeader(result);
|
||||||
@ -598,7 +676,8 @@ public AbfsClientRenameResult renamePath(
|
|||||||
// Doing a HEAD call resolves the incomplete metadata state and
|
// Doing a HEAD call resolves the incomplete metadata state and
|
||||||
// then we can retry the rename operation.
|
// then we can retry the rename operation.
|
||||||
AbfsRestOperation sourceStatusOp = getPathStatus(source, false,
|
AbfsRestOperation sourceStatusOp = getPathStatus(source, false,
|
||||||
tracingContext);
|
tracingContext, null);
|
||||||
|
isMetadataIncompleteState = true;
|
||||||
// Extract the sourceEtag, using the status Op, and set it
|
// Extract the sourceEtag, using the status Op, and set it
|
||||||
// for future rename recovery.
|
// for future rename recovery.
|
||||||
AbfsHttpOperation sourceStatusResult = sourceStatusOp.getResult();
|
AbfsHttpOperation sourceStatusResult = sourceStatusOp.getResult();
|
||||||
@ -688,7 +767,8 @@ public boolean renameIdempotencyCheckOp(
|
|||||||
LOG.info("rename {} to {} failed, checking etag of destination",
|
LOG.info("rename {} to {} failed, checking etag of destination",
|
||||||
source, destination);
|
source, destination);
|
||||||
try {
|
try {
|
||||||
final AbfsRestOperation destStatusOp = getPathStatus(destination, false, tracingContext);
|
final AbfsRestOperation destStatusOp = getPathStatus(destination,
|
||||||
|
false, tracingContext, null);
|
||||||
final AbfsHttpOperation result = destStatusOp.getResult();
|
final AbfsHttpOperation result = destStatusOp.getResult();
|
||||||
|
|
||||||
final boolean recovered = result.getStatusCode() == HttpURLConnection.HTTP_OK
|
final boolean recovered = result.getStatusCode() == HttpURLConnection.HTTP_OK
|
||||||
@ -714,10 +794,12 @@ boolean isSourceDestEtagEqual(String sourceEtag, AbfsHttpOperation result) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public AbfsRestOperation append(final String path, final byte[] buffer,
|
public AbfsRestOperation append(final String path, final byte[] buffer,
|
||||||
AppendRequestParameters reqParams, final String cachedSasToken, TracingContext tracingContext)
|
AppendRequestParameters reqParams, final String cachedSasToken,
|
||||||
|
ContextEncryptionAdapter contextEncryptionAdapter, TracingContext tracingContext)
|
||||||
throws AzureBlobFileSystemException {
|
throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, false,
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
if (reqParams.isExpectHeaderEnabled()) {
|
if (reqParams.isExpectHeaderEnabled()) {
|
||||||
requestHeaders.add(new AbfsHttpHeader(EXPECT, HUNDRED_CONTINUE));
|
requestHeaders.add(new AbfsHttpHeader(EXPECT, HUNDRED_CONTINUE));
|
||||||
}
|
}
|
||||||
@ -782,7 +864,7 @@ public AbfsRestOperation append(final String path, final byte[] buffer,
|
|||||||
reqParams.setExpectHeaderEnabled(false);
|
reqParams.setExpectHeaderEnabled(false);
|
||||||
reqParams.setRetryDueToExpect(true);
|
reqParams.setRetryDueToExpect(true);
|
||||||
return this.append(path, buffer, reqParams, cachedSasToken,
|
return this.append(path, buffer, reqParams, cachedSasToken,
|
||||||
tracingContext);
|
contextEncryptionAdapter, tracingContext);
|
||||||
}
|
}
|
||||||
// If we have no HTTP response, throw the original exception.
|
// If we have no HTTP response, throw the original exception.
|
||||||
if (!op.hasResult()) {
|
if (!op.hasResult()) {
|
||||||
@ -825,10 +907,11 @@ private boolean checkUserError(int responseStatusCode) {
|
|||||||
// Hence, we pass/succeed the appendblob append call
|
// Hence, we pass/succeed the appendblob append call
|
||||||
// in case we are doing a retry after checking the length of the file
|
// in case we are doing a retry after checking the length of the file
|
||||||
public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path,
|
public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path,
|
||||||
final long length, TracingContext tracingContext) throws AzureBlobFileSystemException {
|
final long length, TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
if ((op.isARetriedRequest())
|
if ((op.isARetriedRequest())
|
||||||
&& (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) {
|
&& (op.getResult().getStatusCode() == HttpURLConnection.HTTP_BAD_REQUEST)) {
|
||||||
final AbfsRestOperation destStatusOp = getPathStatus(path, false, tracingContext);
|
final AbfsRestOperation destStatusOp = getPathStatus(path, false, tracingContext, null);
|
||||||
if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) {
|
if (destStatusOp.getResult().getStatusCode() == HttpURLConnection.HTTP_OK) {
|
||||||
String fileLength = destStatusOp.getResult().getResponseHeader(
|
String fileLength = destStatusOp.getResult().getResponseHeader(
|
||||||
HttpHeaderConfigurations.CONTENT_LENGTH);
|
HttpHeaderConfigurations.CONTENT_LENGTH);
|
||||||
@ -844,9 +927,11 @@ public boolean appendSuccessCheckOp(AbfsRestOperation op, final String path,
|
|||||||
public AbfsRestOperation flush(final String path, final long position,
|
public AbfsRestOperation flush(final String path, final long position,
|
||||||
boolean retainUncommittedData, boolean isClose,
|
boolean retainUncommittedData, boolean isClose,
|
||||||
final String cachedSasToken, final String leaseId,
|
final String cachedSasToken, final String leaseId,
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
ContextEncryptionAdapter contextEncryptionAdapter, TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, false,
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
// JDK7 does not support PATCH, so to workaround the issue we will use
|
// JDK7 does not support PATCH, so to workaround the issue we will use
|
||||||
// PUT and specify the real method in the X-Http-Method-Override header.
|
// PUT and specify the real method in the X-Http-Method-Override header.
|
||||||
requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
|
requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
|
||||||
@ -875,10 +960,11 @@ public AbfsRestOperation flush(final String path, final long position,
|
|||||||
}
|
}
|
||||||
|
|
||||||
public AbfsRestOperation setPathProperties(final String path, final String properties,
|
public AbfsRestOperation setPathProperties(final String path, final String properties,
|
||||||
TracingContext tracingContext)
|
final TracingContext tracingContext, final ContextEncryptionAdapter contextEncryptionAdapter)
|
||||||
throws AzureBlobFileSystemException {
|
throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, false,
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
// JDK7 does not support PATCH, so to workaround the issue we will use
|
// JDK7 does not support PATCH, so to workaround the issue we will use
|
||||||
// PUT and specify the real method in the X-Http-Method-Override header.
|
// PUT and specify the real method in the X-Http-Method-Override header.
|
||||||
requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
|
requestHeaders.add(new AbfsHttpHeader(X_HTTP_METHOD_OVERRIDE,
|
||||||
@ -900,8 +986,10 @@ public AbfsRestOperation setPathProperties(final String path, final String prope
|
|||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AbfsRestOperation getPathStatus(final String path, final boolean includeProperties,
|
public AbfsRestOperation getPathStatus(final String path,
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
final boolean includeProperties, final TracingContext tracingContext,
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
|
|
||||||
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
|
final AbfsUriQueryBuilder abfsUriQueryBuilder = createDefaultUriQueryBuilder();
|
||||||
@ -913,7 +1001,9 @@ public AbfsRestOperation getPathStatus(final String path, final boolean includeP
|
|||||||
abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.GET_STATUS);
|
abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_ACTION, AbfsHttpConstants.GET_STATUS);
|
||||||
operation = SASTokenProvider.GET_STATUS_OPERATION;
|
operation = SASTokenProvider.GET_STATUS_OPERATION;
|
||||||
} else {
|
} else {
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, false,
|
||||||
|
contextEncryptionAdapter,
|
||||||
|
tracingContext);
|
||||||
}
|
}
|
||||||
abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(abfsConfiguration.isUpnUsed()));
|
abfsUriQueryBuilder.addQuery(HttpQueryParams.QUERY_PARAM_UPN, String.valueOf(abfsConfiguration.isUpnUsed()));
|
||||||
appendSASTokenToQuery(path, operation, abfsUriQueryBuilder);
|
appendSASTokenToQuery(path, operation, abfsUriQueryBuilder);
|
||||||
@ -928,11 +1018,18 @@ public AbfsRestOperation getPathStatus(final String path, final boolean includeP
|
|||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AbfsRestOperation read(final String path, final long position, final byte[] buffer, final int bufferOffset,
|
public AbfsRestOperation read(final String path,
|
||||||
final int bufferLength, final String eTag, String cachedSasToken,
|
final long position,
|
||||||
|
final byte[] buffer,
|
||||||
|
final int bufferOffset,
|
||||||
|
final int bufferLength,
|
||||||
|
final String eTag,
|
||||||
|
String cachedSasToken,
|
||||||
|
ContextEncryptionAdapter contextEncryptionAdapter,
|
||||||
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
TracingContext tracingContext) throws AzureBlobFileSystemException {
|
||||||
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
final List<AbfsHttpHeader> requestHeaders = createDefaultHeaders();
|
||||||
addCustomerProvidedKeyHeaders(requestHeaders);
|
addEncryptionKeyRequestHeaders(path, requestHeaders, false,
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
requestHeaders.add(new AbfsHttpHeader(RANGE,
|
requestHeaders.add(new AbfsHttpHeader(RANGE,
|
||||||
String.format("bytes=%d-%d", position, position + bufferLength - 1)));
|
String.format("bytes=%d-%d", position, position + bufferLength - 1)));
|
||||||
requestHeaders.add(new AbfsHttpHeader(IF_MATCH, eTag));
|
requestHeaders.add(new AbfsHttpHeader(IF_MATCH, eTag));
|
||||||
@ -1295,10 +1392,23 @@ public synchronized String getAccessToken() throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private synchronized Boolean getIsNamespaceEnabled(TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
|
if (isNamespaceEnabled == null) {
|
||||||
|
setIsNamespaceEnabled(NamespaceUtil.isNamespaceEnabled(this,
|
||||||
|
tracingContext));
|
||||||
|
}
|
||||||
|
return isNamespaceEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
public AuthType getAuthType() {
|
public AuthType getAuthType() {
|
||||||
return authType;
|
return authType;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public EncryptionContextProvider getEncryptionContextProvider() {
|
||||||
|
return encryptionContextProvider;
|
||||||
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
String initializeUserAgent(final AbfsConfiguration abfsConfiguration,
|
String initializeUserAgent(final AbfsConfiguration abfsConfiguration,
|
||||||
final String sslProviderName) {
|
final String sslProviderName) {
|
||||||
@ -1373,6 +1483,16 @@ public SASTokenProvider getSasTokenProvider() {
|
|||||||
return this.sasTokenProvider;
|
return this.sasTokenProvider;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
void setEncryptionContextProvider(EncryptionContextProvider provider) {
|
||||||
|
encryptionContextProvider = provider;
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
void setIsNamespaceEnabled(final Boolean isNamespaceEnabled) {
|
||||||
|
this.isNamespaceEnabled = isNamespaceEnabled;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Getter for abfsCounters from AbfsClient.
|
* Getter for abfsCounters from AbfsClient.
|
||||||
* @return AbfsCounters instance.
|
* @return AbfsCounters instance.
|
||||||
@ -1381,6 +1501,10 @@ protected AbfsCounters getAbfsCounters() {
|
|||||||
return abfsCounters;
|
return abfsCounters;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getxMsVersion() {
|
||||||
|
return xMsVersion;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Getter for abfsConfiguration from AbfsClient.
|
* Getter for abfsConfiguration from AbfsClient.
|
||||||
* @return AbfsConfiguration instance
|
* @return AbfsConfiguration instance
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
import org.apache.hadoop.fs.azurebfs.constants.FSOperationType;
|
import org.apache.hadoop.fs.azurebfs.constants.FSOperationType;
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
|
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
|
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken;
|
import org.apache.hadoop.fs.azurebfs.utils.CachedSASToken;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.Listener;
|
import org.apache.hadoop.fs.azurebfs.utils.Listener;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
||||||
@ -99,6 +100,7 @@ public class AbfsInputStream extends FSInputStream implements CanUnbuffer,
|
|||||||
// of valid bytes in buffer)
|
// of valid bytes in buffer)
|
||||||
private boolean closed = false;
|
private boolean closed = false;
|
||||||
private TracingContext tracingContext;
|
private TracingContext tracingContext;
|
||||||
|
private final ContextEncryptionAdapter contextEncryptionAdapter;
|
||||||
|
|
||||||
// Optimisations modify the pointer fields.
|
// Optimisations modify the pointer fields.
|
||||||
// For better resilience the following fields are used to save the
|
// For better resilience the following fields are used to save the
|
||||||
@ -157,6 +159,7 @@ public AbfsInputStream(
|
|||||||
this.context = abfsInputStreamContext;
|
this.context = abfsInputStreamContext;
|
||||||
readAheadBlockSize = abfsInputStreamContext.getReadAheadBlockSize();
|
readAheadBlockSize = abfsInputStreamContext.getReadAheadBlockSize();
|
||||||
this.fsBackRef = abfsInputStreamContext.getFsBackRef();
|
this.fsBackRef = abfsInputStreamContext.getFsBackRef();
|
||||||
|
contextEncryptionAdapter = abfsInputStreamContext.getEncryptionAdapter();
|
||||||
|
|
||||||
// Propagate the config values to ReadBufferManager so that the first instance
|
// Propagate the config values to ReadBufferManager so that the first instance
|
||||||
// to initialize can set the readAheadBlockSize
|
// to initialize can set the readAheadBlockSize
|
||||||
@ -548,7 +551,8 @@ int readRemote(long position, byte[] b, int offset, int length, TracingContext t
|
|||||||
}
|
}
|
||||||
LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length);
|
LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length);
|
||||||
op = client.read(path, position, b, offset, length,
|
op = client.read(path, position, b, offset, length,
|
||||||
tolerateOobAppends ? "*" : eTag, cachedSasToken.get(), tracingContext);
|
tolerateOobAppends ? "*" : eTag, cachedSasToken.get(),
|
||||||
|
contextEncryptionAdapter, tracingContext);
|
||||||
cachedSasToken.update(op.getSasToken());
|
cachedSasToken.update(op.getSasToken());
|
||||||
LOG.debug("issuing HTTP GET request params position = {} b.length = {} "
|
LOG.debug("issuing HTTP GET request params position = {} b.length = {} "
|
||||||
+ "offset = {} length = {}", position, b.length, offset, length);
|
+ "offset = {} length = {}", position, b.length, offset, length);
|
||||||
@ -701,8 +705,11 @@ public boolean seekToNewSource(long l) throws IOException {
|
|||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
LOG.debug("Closing {}", this);
|
LOG.debug("Closing {}", this);
|
||||||
closed = true;
|
closed = true;
|
||||||
buffer = null; // de-reference the buffer so it can be GC'ed sooner
|
|
||||||
ReadBufferManager.getBufferManager().purgeBuffersForStream(this);
|
ReadBufferManager.getBufferManager().purgeBuffersForStream(this);
|
||||||
|
buffer = null; // de-reference the buffer so it can be GC'ed sooner
|
||||||
|
if (contextEncryptionAdapter != null) {
|
||||||
|
contextEncryptionAdapter.destroy();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -24,6 +24,8 @@
|
|||||||
import org.apache.hadoop.fs.impl.BackReference;
|
import org.apache.hadoop.fs.impl.BackReference;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class to hold extra input stream configs.
|
* Class to hold extra input stream configs.
|
||||||
*/
|
*/
|
||||||
@ -56,6 +58,8 @@ public class AbfsInputStreamContext extends AbfsStreamContext {
|
|||||||
/** A BackReference to the FS instance that created this OutputStream. */
|
/** A BackReference to the FS instance that created this OutputStream. */
|
||||||
private BackReference fsBackRef;
|
private BackReference fsBackRef;
|
||||||
|
|
||||||
|
private ContextEncryptionAdapter contextEncryptionAdapter = null;
|
||||||
|
|
||||||
public AbfsInputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) {
|
public AbfsInputStreamContext(final long sasTokenRenewPeriodForStreamsInSeconds) {
|
||||||
super(sasTokenRenewPeriodForStreamsInSeconds);
|
super(sasTokenRenewPeriodForStreamsInSeconds);
|
||||||
}
|
}
|
||||||
@ -133,6 +137,12 @@ public AbfsInputStreamContext withAbfsBackRef(
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public AbfsInputStreamContext withEncryptionAdapter(
|
||||||
|
ContextEncryptionAdapter contextEncryptionAdapter){
|
||||||
|
this.contextEncryptionAdapter = contextEncryptionAdapter;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
public AbfsInputStreamContext build() {
|
public AbfsInputStreamContext build() {
|
||||||
if (readBufferSize > readAheadBlockSize) {
|
if (readBufferSize > readAheadBlockSize) {
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
@ -195,4 +205,8 @@ public boolean isBufferedPreadDisabled() {
|
|||||||
public BackReference getFsBackRef() {
|
public BackReference getFsBackRef() {
|
||||||
return fsBackRef;
|
return fsBackRef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ContextEncryptionAdapter getEncryptionAdapter() {
|
||||||
|
return contextEncryptionAdapter;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.fs.impl.BackReference;
|
import org.apache.hadoop.fs.impl.BackReference;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
@ -95,6 +96,7 @@ public class AbfsOutputStream extends OutputStream implements Syncable,
|
|||||||
private final int maxRequestsThatCanBeQueued;
|
private final int maxRequestsThatCanBeQueued;
|
||||||
|
|
||||||
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
|
||||||
|
private final ContextEncryptionAdapter contextEncryptionAdapter;
|
||||||
|
|
||||||
// SAS tokens can be re-used until they expire
|
// SAS tokens can be re-used until they expire
|
||||||
private CachedSASToken cachedSasToken;
|
private CachedSASToken cachedSasToken;
|
||||||
@ -152,6 +154,7 @@ public AbfsOutputStream(AbfsOutputStreamContext abfsOutputStreamContext)
|
|||||||
this.writeOperations = new ConcurrentLinkedDeque<>();
|
this.writeOperations = new ConcurrentLinkedDeque<>();
|
||||||
this.outputStreamStatistics = abfsOutputStreamContext.getStreamStatistics();
|
this.outputStreamStatistics = abfsOutputStreamContext.getStreamStatistics();
|
||||||
this.fsBackRef = abfsOutputStreamContext.getFsBackRef();
|
this.fsBackRef = abfsOutputStreamContext.getFsBackRef();
|
||||||
|
this.contextEncryptionAdapter = abfsOutputStreamContext.getEncryptionAdapter();
|
||||||
|
|
||||||
if (this.isAppendBlob) {
|
if (this.isAppendBlob) {
|
||||||
this.maxConcurrentRequestCount = 1;
|
this.maxConcurrentRequestCount = 1;
|
||||||
@ -335,9 +338,9 @@ private void uploadBlockAsync(DataBlocks.DataBlock blockToUpload,
|
|||||||
*/
|
*/
|
||||||
AppendRequestParameters reqParams = new AppendRequestParameters(
|
AppendRequestParameters reqParams = new AppendRequestParameters(
|
||||||
offset, 0, bytesLength, mode, false, leaseId, isExpectHeaderEnabled);
|
offset, 0, bytesLength, mode, false, leaseId, isExpectHeaderEnabled);
|
||||||
AbfsRestOperation op =
|
AbfsRestOperation op = client.append(path,
|
||||||
client.append(path, blockUploadData.toByteArray(), reqParams,
|
blockUploadData.toByteArray(), reqParams, cachedSasToken.get(),
|
||||||
cachedSasToken.get(), new TracingContext(tracingContext));
|
contextEncryptionAdapter, new TracingContext(tracingContext));
|
||||||
cachedSasToken.update(op.getSasToken());
|
cachedSasToken.update(op.getSasToken());
|
||||||
perfInfo.registerResult(op.getResult());
|
perfInfo.registerResult(op.getResult());
|
||||||
perfInfo.registerSuccess(true);
|
perfInfo.registerSuccess(true);
|
||||||
@ -507,6 +510,9 @@ public synchronized void close() throws IOException {
|
|||||||
// See HADOOP-16785
|
// See HADOOP-16785
|
||||||
throw wrapException(path, e.getMessage(), e);
|
throw wrapException(path, e.getMessage(), e);
|
||||||
} finally {
|
} finally {
|
||||||
|
if (contextEncryptionAdapter != null) {
|
||||||
|
contextEncryptionAdapter.destroy();
|
||||||
|
}
|
||||||
if (hasLease()) {
|
if (hasLease()) {
|
||||||
lease.free();
|
lease.free();
|
||||||
lease = null;
|
lease = null;
|
||||||
@ -587,8 +593,9 @@ private void writeAppendBlobCurrentBufferToService() throws IOException {
|
|||||||
"writeCurrentBufferToService", "append")) {
|
"writeCurrentBufferToService", "append")) {
|
||||||
AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0,
|
AppendRequestParameters reqParams = new AppendRequestParameters(offset, 0,
|
||||||
bytesLength, APPEND_MODE, true, leaseId, isExpectHeaderEnabled);
|
bytesLength, APPEND_MODE, true, leaseId, isExpectHeaderEnabled);
|
||||||
AbfsRestOperation op = client.append(path, uploadData.toByteArray(), reqParams,
|
AbfsRestOperation op = client.append(path, uploadData.toByteArray(),
|
||||||
cachedSasToken.get(), new TracingContext(tracingContext));
|
reqParams, cachedSasToken.get(), contextEncryptionAdapter,
|
||||||
|
new TracingContext(tracingContext));
|
||||||
cachedSasToken.update(op.getSasToken());
|
cachedSasToken.update(op.getSasToken());
|
||||||
outputStreamStatistics.uploadSuccessful(bytesLength);
|
outputStreamStatistics.uploadSuccessful(bytesLength);
|
||||||
|
|
||||||
@ -648,8 +655,9 @@ private synchronized void flushWrittenBytesToServiceInternal(final long offset,
|
|||||||
AbfsPerfTracker tracker = client.getAbfsPerfTracker();
|
AbfsPerfTracker tracker = client.getAbfsPerfTracker();
|
||||||
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker,
|
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(tracker,
|
||||||
"flushWrittenBytesToServiceInternal", "flush")) {
|
"flushWrittenBytesToServiceInternal", "flush")) {
|
||||||
AbfsRestOperation op = client.flush(path, offset, retainUncommitedData, isClose,
|
AbfsRestOperation op = client.flush(path, offset, retainUncommitedData,
|
||||||
cachedSasToken.get(), leaseId, new TracingContext(tracingContext));
|
isClose, cachedSasToken.get(), leaseId, contextEncryptionAdapter,
|
||||||
|
new TracingContext(tracingContext));
|
||||||
cachedSasToken.update(op.getSasToken());
|
cachedSasToken.update(op.getSasToken());
|
||||||
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
perfInfo.registerResult(op.getResult()).registerSuccess(true);
|
||||||
} catch (AzureBlobFileSystemException ex) {
|
} catch (AzureBlobFileSystemException ex) {
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
||||||
import org.apache.hadoop.fs.impl.BackReference;
|
import org.apache.hadoop.fs.impl.BackReference;
|
||||||
import org.apache.hadoop.fs.store.DataBlocks;
|
import org.apache.hadoop.fs.store.DataBlocks;
|
||||||
@ -50,6 +51,8 @@ public class AbfsOutputStreamContext extends AbfsStreamContext {
|
|||||||
|
|
||||||
private AbfsLease lease;
|
private AbfsLease lease;
|
||||||
|
|
||||||
|
private ContextEncryptionAdapter contextEncryptionAdapter;
|
||||||
|
|
||||||
private DataBlocks.BlockFactory blockFactory;
|
private DataBlocks.BlockFactory blockFactory;
|
||||||
|
|
||||||
private int blockOutputActiveBlocks;
|
private int blockOutputActiveBlocks;
|
||||||
@ -193,6 +196,12 @@ public AbfsOutputStreamContext withLease(final AbfsLease lease) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public AbfsOutputStreamContext withEncryptionAdapter(
|
||||||
|
final ContextEncryptionAdapter contextEncryptionAdapter) {
|
||||||
|
this.contextEncryptionAdapter = contextEncryptionAdapter;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
public int getWriteBufferSize() {
|
public int getWriteBufferSize() {
|
||||||
return writeBufferSize;
|
return writeBufferSize;
|
||||||
}
|
}
|
||||||
@ -240,6 +249,10 @@ public String getLeaseId() {
|
|||||||
return this.lease.getLeaseID();
|
return this.lease.getLeaseID();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ContextEncryptionAdapter getEncryptionAdapter() {
|
||||||
|
return contextEncryptionAdapter;
|
||||||
|
}
|
||||||
|
|
||||||
public DataBlocks.BlockFactory getBlockFactory() {
|
public DataBlocks.BlockFactory getBlockFactory() {
|
||||||
return blockFactory;
|
return blockFactory;
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,33 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.utils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Enum EncryptionType to represent the level of encryption applied.
|
||||||
|
* <ol>
|
||||||
|
* <li>GLOBAL_KEY: encrypt all files with the same client-provided key.</li>
|
||||||
|
* <li>ENCRYPTION_CONTEXT: uses client-provided implementation to generate keys.</li>
|
||||||
|
* <li>NONE: encryption handled entirely at server.</li>
|
||||||
|
* </ol>
|
||||||
|
*/
|
||||||
|
public enum EncryptionType {
|
||||||
|
GLOBAL_KEY,
|
||||||
|
ENCRYPTION_CONTEXT,
|
||||||
|
NONE
|
||||||
|
}
|
@ -0,0 +1,88 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
* <p>
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* <p>
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.utils;
|
||||||
|
|
||||||
|
import java.net.HttpURLConnection;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AbfsRestOperationException;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Utility class to provide method which can return if the account is namespace
|
||||||
|
* enabled or not.
|
||||||
|
*/
|
||||||
|
public final class NamespaceUtil {
|
||||||
|
|
||||||
|
public static final Logger LOG = LoggerFactory.getLogger(NamespaceUtil.class);
|
||||||
|
|
||||||
|
private NamespaceUtil() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return if the account used in the provided abfsClient object namespace enabled
|
||||||
|
* or not.
|
||||||
|
* It would call {@link org.apache.hadoop.fs.azurebfs.services.AbfsClient#getAclStatus(String, TracingContext)}.
|
||||||
|
* <ol>
|
||||||
|
* <li>
|
||||||
|
* If the API call is successful, then the account is namespace enabled.
|
||||||
|
* </li>
|
||||||
|
* <li>
|
||||||
|
* If the server returns with {@link java.net.HttpURLConnection#HTTP_BAD_REQUEST}, the account is non-namespace enabled.
|
||||||
|
* </li>
|
||||||
|
* <li>
|
||||||
|
* If the server call gets some other exception, then the method would throw the exception.
|
||||||
|
* </li>
|
||||||
|
* </ol>
|
||||||
|
* @param abfsClient client for which namespace-enabled to be checked.
|
||||||
|
* @param tracingContext object to correlate Store requests.
|
||||||
|
* @return if the account corresponding to the given client is namespace-enabled
|
||||||
|
* or not.
|
||||||
|
* @throws AzureBlobFileSystemException throws back the exception the method receives
|
||||||
|
* from the {@link AbfsClient#getAclStatus(String, TracingContext)}. In case it gets
|
||||||
|
* {@link AbfsRestOperationException}, it checks if the exception statusCode is
|
||||||
|
* BAD_REQUEST or not. If not, then it will pass the exception to the calling method.
|
||||||
|
*/
|
||||||
|
public static Boolean isNamespaceEnabled(final AbfsClient abfsClient,
|
||||||
|
final TracingContext tracingContext)
|
||||||
|
throws AzureBlobFileSystemException {
|
||||||
|
Boolean isNamespaceEnabled;
|
||||||
|
try {
|
||||||
|
LOG.debug("Get root ACL status");
|
||||||
|
abfsClient.getAclStatus(AbfsHttpConstants.ROOT_PATH, tracingContext);
|
||||||
|
isNamespaceEnabled = true;
|
||||||
|
} catch (AbfsRestOperationException ex) {
|
||||||
|
// Get ACL status is a HEAD request, its response doesn't contain
|
||||||
|
// errorCode
|
||||||
|
// So can only rely on its status code to determine its account type.
|
||||||
|
if (HttpURLConnection.HTTP_BAD_REQUEST != ex.getStatusCode()) {
|
||||||
|
throw ex;
|
||||||
|
}
|
||||||
|
isNamespaceEnabled = false;
|
||||||
|
} catch (AzureBlobFileSystemException ex) {
|
||||||
|
throw ex;
|
||||||
|
}
|
||||||
|
return isNamespaceEnabled;
|
||||||
|
}
|
||||||
|
}
|
@ -908,6 +908,38 @@ specified SSL channel mode. Value should be of the enum
|
|||||||
DelegatingSSLSocketFactory.SSLChannelMode. The default value will be
|
DelegatingSSLSocketFactory.SSLChannelMode. The default value will be
|
||||||
DelegatingSSLSocketFactory.SSLChannelMode.Default.
|
DelegatingSSLSocketFactory.SSLChannelMode.Default.
|
||||||
|
|
||||||
|
### <a name="encryptionconfigoptions"></a> Encryption Options
|
||||||
|
Only one of the following two options can be configured. If config values of
|
||||||
|
both types are set, ABFS driver will throw an exception. If using the global
|
||||||
|
key type, ensure both pre-computed values are provided.
|
||||||
|
|
||||||
|
#### <a name="globalcpkconfigoptions"></a> Customer-Provided Global Key
|
||||||
|
A global encryption key can be configured by providing the following
|
||||||
|
pre-computed values. The key will be applied to any new files created post
|
||||||
|
setting the configuration, and will be required in the requests to read ro
|
||||||
|
modify the contents of the files.
|
||||||
|
|
||||||
|
`fs.azure.encryption.encoded.client-provided-key`: The Base64 encoded version
|
||||||
|
of the 256-bit encryption key.
|
||||||
|
|
||||||
|
`fs.azure.encryption.encoded.client-provided-key-sha`: The Base64 encoded
|
||||||
|
version of the SHA256 has of the 256-bit encryption key.
|
||||||
|
|
||||||
|
#### <a name="encryptioncontextconfigoptions"></a> Encryption Context Provider
|
||||||
|
|
||||||
|
ABFS driver supports an interface called `EncryptionContextProvider` that
|
||||||
|
can be used as a plugin for clients to provide custom implementations for
|
||||||
|
the encryption framework. This framework allows for an `encryptionContext`
|
||||||
|
and an `encryptionKey` to be generated by the EncryptionContextProvider for
|
||||||
|
a file to be created. The server keeps track of the encryptionContext for
|
||||||
|
each file. To perform subsequent operations such as read on the encrypted file,
|
||||||
|
ABFS driver will fetch the corresponding encryption key from the
|
||||||
|
EncryptionContextProvider implementation by providing the encryptionContext
|
||||||
|
string retrieved from a GetFileStatus request to the server.
|
||||||
|
|
||||||
|
`fs.azure.encryption.context.provider.type`: The canonical name of the class
|
||||||
|
implementing EncryptionContextProvider.
|
||||||
|
|
||||||
### <a name="serverconfigoptions"></a> Server Options
|
### <a name="serverconfigoptions"></a> Server Options
|
||||||
When the config `fs.azure.io.read.tolerate.concurrent.append` is made true, the
|
When the config `fs.azure.io.read.tolerate.concurrent.append` is made true, the
|
||||||
If-Match header sent to the server for read calls will be set as * otherwise the
|
If-Match header sent to the server for read calls will be set as * otherwise the
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
import org.apache.hadoop.fs.azurebfs.oauth2.AccessTokenProvider;
|
||||||
import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager;
|
import org.apache.hadoop.fs.azurebfs.security.AbfsDelegationTokenManager;
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClientUtils;
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream;
|
import org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream;
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AuthType;
|
import org.apache.hadoop.fs.azurebfs.services.AuthType;
|
||||||
import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient;
|
import org.apache.hadoop.fs.azurebfs.services.ITestAbfsClient;
|
||||||
@ -211,6 +212,7 @@ public void setup() throws Exception {
|
|||||||
wasb = new NativeAzureFileSystem(azureNativeFileSystemStore);
|
wasb = new NativeAzureFileSystem(azureNativeFileSystemStore);
|
||||||
wasb.initialize(wasbUri, rawConfig);
|
wasb.initialize(wasbUri, rawConfig);
|
||||||
}
|
}
|
||||||
|
AbfsClientUtils.setIsNamespaceEnabled(abfs.getAbfsClient(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -0,0 +1,460 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Hashtable;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.PathIOException;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.EncodingHelper;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClientUtils;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
||||||
|
import org.assertj.core.api.Assertions;
|
||||||
|
import org.junit.Assume;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.junit.runners.Parameterized;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.constants.FSOperationType;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.MockEncryptionContextProvider;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextProviderEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.utils.EncryptionType;
|
||||||
|
import org.apache.hadoop.fs.impl.OpenFileParameters;
|
||||||
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.test.LambdaTestUtils;
|
||||||
|
import org.apache.hadoop.util.Lists;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY_SHA;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_CONTEXT;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_KEY_SHA256;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_REQUEST_SERVER_ENCRYPTED;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_SERVER_ENCRYPTED;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.ENCRYPTION_KEY_LEN;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode.APPEND_MODE;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.utils.EncryptionType.ENCRYPTION_CONTEXT;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.utils.EncryptionType.GLOBAL_KEY;
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.utils.EncryptionType.NONE;
|
||||||
|
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
|
||||||
|
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
|
||||||
|
import static org.apache.hadoop.fs.permission.FsAction.ALL;
|
||||||
|
|
||||||
|
@RunWith(Parameterized.class)
|
||||||
|
public class ITestAbfsCustomEncryption extends AbstractAbfsIntegrationTest {
|
||||||
|
|
||||||
|
public static final String SERVER_FILE_CONTENT = "123";
|
||||||
|
|
||||||
|
private final byte[] cpk = new byte[ENCRYPTION_KEY_LEN];
|
||||||
|
private final String cpkSHAEncoded;
|
||||||
|
|
||||||
|
private List<AzureBlobFileSystem> fileSystemsOpenedInTest = new ArrayList<>();
|
||||||
|
|
||||||
|
// Encryption type used by filesystem while creating file
|
||||||
|
@Parameterized.Parameter
|
||||||
|
public EncryptionType fileEncryptionType;
|
||||||
|
|
||||||
|
// Encryption type used by filesystem to call different operations
|
||||||
|
@Parameterized.Parameter(1)
|
||||||
|
public EncryptionType requestEncryptionType;
|
||||||
|
|
||||||
|
@Parameterized.Parameter(2)
|
||||||
|
public FSOperationType operation;
|
||||||
|
|
||||||
|
@Parameterized.Parameter(3)
|
||||||
|
public boolean responseHeaderServerEnc;
|
||||||
|
|
||||||
|
@Parameterized.Parameter(4)
|
||||||
|
public boolean responseHeaderReqServerEnc;
|
||||||
|
|
||||||
|
@Parameterized.Parameter(5)
|
||||||
|
public boolean isExceptionCase;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean value to indicate that the server response would have header related
|
||||||
|
* to CPK and the test would need to assert its value.
|
||||||
|
*/
|
||||||
|
@Parameterized.Parameter(6)
|
||||||
|
public boolean isCpkResponseHdrExpected;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean value to indicate that the server response would have fields related
|
||||||
|
* to CPK and the test would need to assert its value.
|
||||||
|
*/
|
||||||
|
@Parameterized.Parameter(7)
|
||||||
|
public Boolean isCpkResponseKeyExpected = false;
|
||||||
|
|
||||||
|
@Parameterized.Parameter(8)
|
||||||
|
public Boolean fileSystemListStatusResultToBeUsedForOpeningFile = false;
|
||||||
|
|
||||||
|
@Parameterized.Parameters(name = "{0} mode, {2}")
|
||||||
|
public static Iterable<Object[]> params() {
|
||||||
|
return Arrays.asList(new Object[][] {
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.READ, true, false, false, true, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.READ, true, false, false, true, false, true},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.WRITE, false, true, false, true, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.APPEND, false, true, false, true, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.SET_ACL, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.GET_ATTR, true, false, false, true, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.SET_ATTR, false, true, false, true, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.LISTSTATUS, false, false, false, false, true, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.RENAME, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, ENCRYPTION_CONTEXT, FSOperationType.DELETE, false, false, false, false, false, false},
|
||||||
|
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.WRITE, false, false, true, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.GET_ATTR, true, false, true, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.READ, false, false, true, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.SET_ATTR, false, true, true, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.RENAME, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.LISTSTATUS, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.DELETE, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.SET_ACL, false, false, false, false, false, false},
|
||||||
|
{ENCRYPTION_CONTEXT, NONE, FSOperationType.SET_PERMISSION, false, false, false, false, false, false},
|
||||||
|
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.READ, true, false, false, true, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.WRITE, false, true, false, true, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.APPEND, false, true, false, true, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.SET_ACL, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.LISTSTATUS, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.RENAME, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.DELETE, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.GET_ATTR, true, false, false, true, false, false},
|
||||||
|
{GLOBAL_KEY, GLOBAL_KEY, FSOperationType.SET_ATTR, false, true, false, true, false, false},
|
||||||
|
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.READ, true, false, true, true, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.WRITE, false, true, true, true, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.SET_ATTR, false, false, true, true, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.SET_ACL, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.RENAME, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.LISTSTATUS, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.DELETE, false, false, false, false, false, false},
|
||||||
|
{GLOBAL_KEY, NONE, FSOperationType.SET_PERMISSION, false, false, false, false, false, false},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public ITestAbfsCustomEncryption() throws Exception {
|
||||||
|
Assume.assumeTrue("Account should be HNS enabled for CPK",
|
||||||
|
getConfiguration().getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT,
|
||||||
|
false));
|
||||||
|
new Random().nextBytes(cpk);
|
||||||
|
cpkSHAEncoded = EncodingHelper.getBase64EncodedString(
|
||||||
|
EncodingHelper.getSHA256Hash(cpk));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCustomEncryptionCombinations() throws Exception {
|
||||||
|
AzureBlobFileSystem fs = getOrCreateFS();
|
||||||
|
Path testPath = path("/testFile");
|
||||||
|
String relativePath = fs.getAbfsStore().getRelativePath(testPath);
|
||||||
|
MockEncryptionContextProvider ecp =
|
||||||
|
(MockEncryptionContextProvider) createEncryptedFile(testPath);
|
||||||
|
AbfsRestOperation op = callOperation(fs, new Path(relativePath), ecp);
|
||||||
|
if (op == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
AbfsHttpOperation httpOp = op.getResult();
|
||||||
|
if (isCpkResponseHdrExpected) {
|
||||||
|
if (requestEncryptionType == ENCRYPTION_CONTEXT) {
|
||||||
|
String encryptionContext = ecp.getEncryptionContextForTest(relativePath);
|
||||||
|
String expectedKeySHA = EncodingHelper.getBase64EncodedString(
|
||||||
|
EncodingHelper.getSHA256Hash(
|
||||||
|
ecp.getEncryptionKeyForTest(encryptionContext)));
|
||||||
|
Assertions.assertThat(httpOp.getResponseHeader(X_MS_ENCRYPTION_KEY_SHA256))
|
||||||
|
.isEqualTo(expectedKeySHA);
|
||||||
|
} else { // GLOBAL_KEY
|
||||||
|
Assertions.assertThat(httpOp.getResponseHeader(X_MS_ENCRYPTION_KEY_SHA256))
|
||||||
|
.isEqualTo(cpkSHAEncoded);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (isCpkResponseKeyExpected) {
|
||||||
|
if (requestEncryptionType == ENCRYPTION_CONTEXT) {
|
||||||
|
String encryptionContext = ecp.getEncryptionContextForTest(relativePath);
|
||||||
|
String expectedKeySHA = EncodingHelper.getBase64EncodedString(
|
||||||
|
EncodingHelper.getSHA256Hash(
|
||||||
|
ecp.getEncryptionKeyForTest(encryptionContext)));
|
||||||
|
Assertions.assertThat(httpOp.getListResultSchema().paths().get(0)
|
||||||
|
.getCustomerProvidedKeySha256()).isEqualTo(expectedKeySHA);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Assertions.assertThat(
|
||||||
|
httpOp.getResponseHeader(X_MS_ENCRYPTION_KEY_SHA256))
|
||||||
|
.isEqualTo(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Assertions.assertThat(httpOp.getResponseHeader(X_MS_SERVER_ENCRYPTED))
|
||||||
|
.isEqualTo(responseHeaderServerEnc? "true" : null);
|
||||||
|
Assertions.assertThat(httpOp.getResponseHeader(X_MS_REQUEST_SERVER_ENCRYPTED))
|
||||||
|
.isEqualTo(responseHeaderReqServerEnc? "true" : null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes a given operation at the AbfsClient level and returns
|
||||||
|
* AbfsRestOperation instance to verify response headers. Asserts excetion
|
||||||
|
* for combinations that should not succeed.
|
||||||
|
* @param fs AzureBlobFileSystem instance
|
||||||
|
* @param testPath path of file
|
||||||
|
* @param ecp EncryptionContextProvider instance to support AbfsClient methods
|
||||||
|
* @return Rest op or null depending on whether the request is allowed
|
||||||
|
* @throws Exception error
|
||||||
|
*/
|
||||||
|
private AbfsRestOperation callOperation(AzureBlobFileSystem fs,
|
||||||
|
Path testPath, EncryptionContextProvider ecp)
|
||||||
|
throws Exception {
|
||||||
|
AbfsClient client = fs.getAbfsClient();
|
||||||
|
AbfsClientUtils.setEncryptionContextProvider(client, ecp);
|
||||||
|
if (isExceptionCase) {
|
||||||
|
LambdaTestUtils.intercept(IOException.class, () -> {
|
||||||
|
switch (operation) {
|
||||||
|
case WRITE: try (FSDataOutputStream out = fs.append(testPath)) {
|
||||||
|
out.write("bytes".getBytes());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case READ: try (FSDataInputStream in = fs.open(testPath)) {
|
||||||
|
in.read(new byte[5]);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case SET_ATTR: fs.setXAttr(testPath, "attribute", "value".getBytes());
|
||||||
|
break;
|
||||||
|
case GET_ATTR: fs.getXAttr(testPath, "attribute");
|
||||||
|
break;
|
||||||
|
default: throw new NoSuchFieldException();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
ContextProviderEncryptionAdapter encryptionAdapter = null;
|
||||||
|
if (fileEncryptionType == ENCRYPTION_CONTEXT) {
|
||||||
|
encryptionAdapter = new ContextProviderEncryptionAdapter(ecp,
|
||||||
|
fs.getAbfsStore().getRelativePath(testPath),
|
||||||
|
Base64.getEncoder().encode(
|
||||||
|
((MockEncryptionContextProvider) ecp).getEncryptionContextForTest(testPath.toString())
|
||||||
|
.getBytes(StandardCharsets.UTF_8)));
|
||||||
|
}
|
||||||
|
String path = testPath.toString();
|
||||||
|
switch (operation) {
|
||||||
|
case READ:
|
||||||
|
if (!fileSystemListStatusResultToBeUsedForOpeningFile
|
||||||
|
|| fileEncryptionType != ENCRYPTION_CONTEXT) {
|
||||||
|
TracingContext tracingContext = getTestTracingContext(fs, true);
|
||||||
|
AbfsHttpOperation statusOp = client.getPathStatus(path, false,
|
||||||
|
tracingContext, null).getResult();
|
||||||
|
return client.read(path, 0, new byte[5], 0, 5,
|
||||||
|
statusOp.getResponseHeader(HttpHeaderConfigurations.ETAG),
|
||||||
|
null, encryptionAdapter, tracingContext);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* In this block, its tested scenario is:
|
||||||
|
* 1.Create a file.
|
||||||
|
* 2.Fetch List of VersionFileStatus objects by listStatus API of the AzureBlobFileSystem.
|
||||||
|
* 3.Use the context value in the VersionFileStatus object for making read API call to backend.
|
||||||
|
* 4.Assert for no exception and get response.
|
||||||
|
*/
|
||||||
|
FileStatus status = fs.listStatus(testPath)[0];
|
||||||
|
Assertions.assertThat(status)
|
||||||
|
.isInstanceOf(AzureBlobFileSystemStore.VersionedFileStatus.class);
|
||||||
|
|
||||||
|
Assertions.assertThat(
|
||||||
|
((AzureBlobFileSystemStore.VersionedFileStatus) status).getEncryptionContext())
|
||||||
|
.isNotNull();
|
||||||
|
|
||||||
|
try (FSDataInputStream in = fs.openFileWithOptions(testPath,
|
||||||
|
new OpenFileParameters().withMandatoryKeys(new HashSet<>())
|
||||||
|
.withStatus(fs.listStatus(testPath)[0])).get()) {
|
||||||
|
byte[] readBuffer = new byte[3];
|
||||||
|
Assertions.assertThat(in.read(readBuffer)).isGreaterThan(0);
|
||||||
|
Assertions.assertThat(readBuffer).isEqualTo(SERVER_FILE_CONTENT.getBytes());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case WRITE:
|
||||||
|
return client.flush(path, 3, false, false, null,
|
||||||
|
null, encryptionAdapter, getTestTracingContext(fs, false));
|
||||||
|
case APPEND:
|
||||||
|
return client.append(path, "val".getBytes(),
|
||||||
|
new AppendRequestParameters(3, 0, 3, APPEND_MODE, false, null, true),
|
||||||
|
null, encryptionAdapter, getTestTracingContext(fs, false));
|
||||||
|
case SET_ACL:
|
||||||
|
return client.setAcl(path, AclEntry.aclSpecToString(
|
||||||
|
Lists.newArrayList(aclEntry(ACCESS, USER, ALL))),
|
||||||
|
getTestTracingContext(fs, false));
|
||||||
|
case LISTSTATUS:
|
||||||
|
return client.listPath(path, false, 5, null,
|
||||||
|
getTestTracingContext(fs, true));
|
||||||
|
case RENAME:
|
||||||
|
TracingContext tc = getTestTracingContext(fs, true);
|
||||||
|
return client.renamePath(path, new Path(path + "_2").toString(),
|
||||||
|
null, tc, null, false, fs.getIsNamespaceEnabled(tc)).getOp();
|
||||||
|
case DELETE:
|
||||||
|
return client.deletePath(path, false, null,
|
||||||
|
getTestTracingContext(fs, false));
|
||||||
|
case GET_ATTR:
|
||||||
|
return client.getPathStatus(path, true,
|
||||||
|
getTestTracingContext(fs, false),
|
||||||
|
createEncryptionAdapterFromServerStoreContext(path,
|
||||||
|
getTestTracingContext(fs, false), client));
|
||||||
|
case SET_ATTR:
|
||||||
|
Hashtable<String, String> properties = new Hashtable<>();
|
||||||
|
properties.put("key", "{ value: valueTest }");
|
||||||
|
return client.setPathProperties(path, fs.getAbfsStore()
|
||||||
|
.convertXmsPropertiesToCommaSeparatedString(properties),
|
||||||
|
getTestTracingContext(fs, false),
|
||||||
|
createEncryptionAdapterFromServerStoreContext(path,
|
||||||
|
getTestTracingContext(fs, false), client));
|
||||||
|
case SET_PERMISSION:
|
||||||
|
return client.setPermission(path, FsPermission.getDefault().toString(),
|
||||||
|
getTestTracingContext(fs, false));
|
||||||
|
default: throw new NoSuchFieldException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private ContextProviderEncryptionAdapter createEncryptionAdapterFromServerStoreContext(final String path,
|
||||||
|
final TracingContext tracingContext,
|
||||||
|
final AbfsClient client) throws IOException {
|
||||||
|
if (client.getEncryptionType() != ENCRYPTION_CONTEXT) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
final String responseHeaderEncryptionContext = client.getPathStatus(path,
|
||||||
|
false, tracingContext, null).getResult()
|
||||||
|
.getResponseHeader(X_MS_ENCRYPTION_CONTEXT);
|
||||||
|
if (responseHeaderEncryptionContext == null) {
|
||||||
|
throw new PathIOException(path,
|
||||||
|
"EncryptionContext not present in GetPathStatus response");
|
||||||
|
}
|
||||||
|
byte[] encryptionContext = responseHeaderEncryptionContext.getBytes(
|
||||||
|
StandardCharsets.UTF_8);
|
||||||
|
|
||||||
|
return new ContextProviderEncryptionAdapter(client.getEncryptionContextProvider(),
|
||||||
|
new Path(path).toUri().getPath(), encryptionContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
private AzureBlobFileSystem getECProviderEnabledFS() throws Exception {
|
||||||
|
Configuration configuration = getRawConfiguration();
|
||||||
|
configuration.set(FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE + "."
|
||||||
|
+ getAccountName(), MockEncryptionContextProvider.class.getCanonicalName());
|
||||||
|
configuration.unset(FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY + "."
|
||||||
|
+ getAccountName());
|
||||||
|
configuration.unset(FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY_SHA + "."
|
||||||
|
+ getAccountName());
|
||||||
|
AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.newInstance(configuration);
|
||||||
|
fileSystemsOpenedInTest.add(fs);
|
||||||
|
return fs;
|
||||||
|
}
|
||||||
|
|
||||||
|
private AzureBlobFileSystem getCPKEnabledFS() throws IOException {
|
||||||
|
Configuration conf = getRawConfiguration();
|
||||||
|
String cpkEncoded = EncodingHelper.getBase64EncodedString(cpk);
|
||||||
|
String cpkEncodedSHA = EncodingHelper.getBase64EncodedString(
|
||||||
|
EncodingHelper.getSHA256Hash(cpk));
|
||||||
|
conf.set(FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY + "."
|
||||||
|
+ getAccountName(), cpkEncoded);
|
||||||
|
conf.set(FS_AZURE_ENCRYPTION_ENCODED_CLIENT_PROVIDED_KEY_SHA + "."
|
||||||
|
+ getAccountName(), cpkEncodedSHA);
|
||||||
|
conf.unset(FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE);
|
||||||
|
AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
||||||
|
fileSystemsOpenedInTest.add(fs);
|
||||||
|
return fs;
|
||||||
|
}
|
||||||
|
|
||||||
|
private AzureBlobFileSystem getOrCreateFS() throws Exception {
|
||||||
|
if (getFileSystem().getAbfsClient().getEncryptionType() == requestEncryptionType) {
|
||||||
|
return getFileSystem();
|
||||||
|
}
|
||||||
|
if (requestEncryptionType == ENCRYPTION_CONTEXT) {
|
||||||
|
return getECProviderEnabledFS();
|
||||||
|
} else if (requestEncryptionType == GLOBAL_KEY) {
|
||||||
|
return getCPKEnabledFS();
|
||||||
|
} else {
|
||||||
|
Configuration conf = getRawConfiguration();
|
||||||
|
conf.unset(FS_AZURE_ENCRYPTION_CONTEXT_PROVIDER_TYPE);
|
||||||
|
AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
||||||
|
fileSystemsOpenedInTest.add(fs);
|
||||||
|
return fs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a file in the server with values for the following keys:
|
||||||
|
* <ol>
|
||||||
|
* <li>x-ms-encryption-key: for ENCRYPTION_CONTEXT, GLOBAL</li>
|
||||||
|
* <li>x-ms-encryption-key-sha256: for ENCRYPTION_CONTEXT, GLOBAL</li>
|
||||||
|
* <li>x-ms-encryption-context: for ENCRYPTION_CONTEXT</li>
|
||||||
|
* </ol>
|
||||||
|
* Returns in case of ENCRYPTION_CONTEXT the encryptionProvider object which
|
||||||
|
* was used to create the x-ms-encryption-context value used for creating the file.
|
||||||
|
*/
|
||||||
|
private EncryptionContextProvider createEncryptedFile(Path testPath) throws Exception {
|
||||||
|
AzureBlobFileSystem fs;
|
||||||
|
if (getFileSystem().getAbfsClient().getEncryptionType() == fileEncryptionType) {
|
||||||
|
fs = getFileSystem();
|
||||||
|
} else {
|
||||||
|
fs = fileEncryptionType == ENCRYPTION_CONTEXT
|
||||||
|
? getECProviderEnabledFS()
|
||||||
|
: getCPKEnabledFS();
|
||||||
|
}
|
||||||
|
String relativePath = fs.getAbfsStore().getRelativePath(testPath);
|
||||||
|
try (FSDataOutputStream out = fs.create(new Path(relativePath))) {
|
||||||
|
out.write(SERVER_FILE_CONTENT.getBytes());
|
||||||
|
}
|
||||||
|
// verify file is encrypted by calling getPathStatus (with properties)
|
||||||
|
// without encryption headers in request
|
||||||
|
if (fileEncryptionType != EncryptionType.NONE) {
|
||||||
|
final AbfsClient abfsClient = fs.getAbfsClient();
|
||||||
|
abfsClient.setEncryptionType(EncryptionType.NONE);
|
||||||
|
LambdaTestUtils.intercept(IOException.class, () ->
|
||||||
|
abfsClient.getPathStatus(relativePath,
|
||||||
|
true,
|
||||||
|
getTestTracingContext(fs, false),
|
||||||
|
createEncryptionAdapterFromServerStoreContext(relativePath,
|
||||||
|
getTestTracingContext(fs, false), abfsClient)));
|
||||||
|
fs.getAbfsClient().setEncryptionType(fileEncryptionType);
|
||||||
|
}
|
||||||
|
return fs.getAbfsClient().getEncryptionContextProvider();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void teardown() throws Exception {
|
||||||
|
super.teardown();
|
||||||
|
for (AzureBlobFileSystem azureBlobFileSystem : fileSystemsOpenedInTest) {
|
||||||
|
azureBlobFileSystem.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -280,7 +280,7 @@ public void testWithNullStreamStatistics() throws IOException {
|
|||||||
// AbfsRestOperation Instance required for eTag.
|
// AbfsRestOperation Instance required for eTag.
|
||||||
AbfsRestOperation abfsRestOperation = fs.getAbfsClient()
|
AbfsRestOperation abfsRestOperation = fs.getAbfsClient()
|
||||||
.getPathStatus(nullStatFilePath.toUri().getPath(), false,
|
.getPathStatus(nullStatFilePath.toUri().getPath(), false,
|
||||||
getTestTracingContext(fs, false));
|
getTestTracingContext(fs, false), null);
|
||||||
|
|
||||||
// AbfsInputStream with no StreamStatistics.
|
// AbfsInputStream with no StreamStatistics.
|
||||||
in = new AbfsInputStream(fs.getAbfsClient(), null,
|
in = new AbfsInputStream(fs.getAbfsClient(), null,
|
||||||
|
@ -35,6 +35,8 @@
|
|||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
|
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.services.AbfsClientUtils;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
@ -57,6 +59,7 @@
|
|||||||
|
|
||||||
import static org.mockito.ArgumentMatchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.ArgumentMatchers.eq;
|
import static org.mockito.ArgumentMatchers.eq;
|
||||||
|
import static org.mockito.ArgumentMatchers.nullable;
|
||||||
import static org.mockito.Mockito.doThrow;
|
import static org.mockito.Mockito.doThrow;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
@ -278,6 +281,7 @@ public void testCreateFileOverwrite(boolean enableConditionalCreateOverwrite)
|
|||||||
final AzureBlobFileSystem fs =
|
final AzureBlobFileSystem fs =
|
||||||
(AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(),
|
(AzureBlobFileSystem) FileSystem.newInstance(currentFs.getUri(),
|
||||||
config);
|
config);
|
||||||
|
AbfsClientUtils.setIsNamespaceEnabled(fs.getAbfsClient(), true);
|
||||||
|
|
||||||
long totalConnectionMadeBeforeTest = fs.getInstrumentationMap()
|
long totalConnectionMadeBeforeTest = fs.getInstrumentationMap()
|
||||||
.get(CONNECTIONS_MADE.getStatName());
|
.get(CONNECTIONS_MADE.getStatName());
|
||||||
@ -421,16 +425,16 @@ public void testNegativeScenariosForCreateOverwriteDisabled()
|
|||||||
serverErrorResponseEx) // Scn5: create overwrite=false fails with Http500
|
serverErrorResponseEx) // Scn5: create overwrite=false fails with Http500
|
||||||
.when(mockClient)
|
.when(mockClient)
|
||||||
.createPath(any(String.class), eq(true), eq(false),
|
.createPath(any(String.class), eq(true), eq(false),
|
||||||
isNamespaceEnabled ? any(String.class) : eq(null),
|
any(AzureBlobFileSystemStore.Permissions.class), any(boolean.class), eq(null), any(),
|
||||||
isNamespaceEnabled ? any(String.class) : eq(null),
|
any(TracingContext.class));
|
||||||
any(boolean.class), eq(null), any(TracingContext.class));
|
|
||||||
|
|
||||||
doThrow(fileNotFoundResponseEx) // Scn1: GFS fails with Http404
|
doThrow(fileNotFoundResponseEx) // Scn1: GFS fails with Http404
|
||||||
.doThrow(serverErrorResponseEx) // Scn2: GFS fails with Http500
|
.doThrow(serverErrorResponseEx) // Scn2: GFS fails with Http500
|
||||||
.doReturn(successOp) // Scn3: create overwrite=true fails with Http412
|
.doReturn(successOp) // Scn3: create overwrite=true fails with Http412
|
||||||
.doReturn(successOp) // Scn4: create overwrite=true fails with Http500
|
.doReturn(successOp) // Scn4: create overwrite=true fails with Http500
|
||||||
.when(mockClient)
|
.when(mockClient)
|
||||||
.getPathStatus(any(String.class), eq(false), any(TracingContext.class));
|
.getPathStatus(any(String.class), eq(false), any(TracingContext.class), nullable(
|
||||||
|
ContextEncryptionAdapter.class));
|
||||||
|
|
||||||
// mock for overwrite=true
|
// mock for overwrite=true
|
||||||
doThrow(
|
doThrow(
|
||||||
@ -439,9 +443,8 @@ public void testNegativeScenariosForCreateOverwriteDisabled()
|
|||||||
serverErrorResponseEx) // Scn4: create overwrite=true fails with Http500
|
serverErrorResponseEx) // Scn4: create overwrite=true fails with Http500
|
||||||
.when(mockClient)
|
.when(mockClient)
|
||||||
.createPath(any(String.class), eq(true), eq(true),
|
.createPath(any(String.class), eq(true), eq(true),
|
||||||
isNamespaceEnabled ? any(String.class) : eq(null),
|
any(AzureBlobFileSystemStore.Permissions.class), any(boolean.class), eq(null), any(),
|
||||||
isNamespaceEnabled ? any(String.class) : eq(null),
|
any(TracingContext.class));
|
||||||
any(boolean.class), eq(null), any(TracingContext.class));
|
|
||||||
|
|
||||||
// Scn1: GFS fails with Http404
|
// Scn1: GFS fails with Http404
|
||||||
// Sequence of events expected:
|
// Sequence of events expected:
|
||||||
|
@ -508,7 +508,7 @@ public void testAlwaysReadBufferSizeConfig(boolean alwaysReadBufferSizeConfigVal
|
|||||||
1 * MEGABYTE, config);
|
1 * MEGABYTE, config);
|
||||||
String eTag = fs.getAbfsClient()
|
String eTag = fs.getAbfsClient()
|
||||||
.getPathStatus(testFile.toUri().getPath(), false,
|
.getPathStatus(testFile.toUri().getPath(), false,
|
||||||
getTestTracingContext(fs, false))
|
getTestTracingContext(fs, false), null)
|
||||||
.getResult()
|
.getResult()
|
||||||
.getResponseHeader(ETAG);
|
.getResponseHeader(ETAG);
|
||||||
|
|
||||||
|
@ -1,976 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 ("License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
* <p>
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
* <p>
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.apache.hadoop.fs.azurebfs;
|
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.CharBuffer;
|
|
||||||
import java.nio.charset.CharacterCodingException;
|
|
||||||
import java.nio.charset.Charset;
|
|
||||||
import java.nio.charset.CharsetEncoder;
|
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.security.MessageDigest;
|
|
||||||
import java.security.NoSuchAlgorithmException;
|
|
||||||
import java.util.EnumSet;
|
|
||||||
import java.util.Hashtable;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Random;
|
|
||||||
import java.util.UUID;
|
|
||||||
|
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
|
||||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
|
||||||
import org.apache.hadoop.util.Preconditions;
|
|
||||||
import org.apache.hadoop.util.Lists;
|
|
||||||
import org.assertj.core.api.Assertions;
|
|
||||||
import org.junit.Assume;
|
|
||||||
import org.junit.Test;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.constants.AbfsHttpConstants;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.services.AppendRequestParameters.Mode;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsAclHelper;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsHttpHeader;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsHttpOperation;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AbfsRestOperation;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.services.AuthType;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.Base64;
|
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
|
||||||
import org.apache.hadoop.test.LambdaTestUtils;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.ONE_MB;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_ALGORITHM;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_KEY;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_ENCRYPTION_KEY_SHA256;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_REQUEST_SERVER_ENCRYPTED;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations.X_MS_SERVER_ENCRYPTED;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ACCOUNT_KEY;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT_KEY;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME;
|
|
||||||
import static org.apache.hadoop.fs.azurebfs.utils.AclTestHelpers.aclEntry;
|
|
||||||
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
|
|
||||||
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
|
|
||||||
import static org.apache.hadoop.fs.permission.FsAction.ALL;
|
|
||||||
|
|
||||||
public class ITestCustomerProvidedKey extends AbstractAbfsIntegrationTest {
|
|
||||||
private static final Logger LOG = LoggerFactory
|
|
||||||
.getLogger(ITestCustomerProvidedKey.class);
|
|
||||||
|
|
||||||
private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1";
|
|
||||||
private static final int INT_512 = 512;
|
|
||||||
private static final int INT_50 = 50;
|
|
||||||
private static final int ENCRYPTION_KEY_LEN = 32;
|
|
||||||
private static final int FILE_SIZE = 10 * ONE_MB;
|
|
||||||
private static final int FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS = 24 * ONE_MB;
|
|
||||||
|
|
||||||
private boolean isNamespaceEnabled;
|
|
||||||
|
|
||||||
public ITestCustomerProvidedKey() throws Exception {
|
|
||||||
boolean isCPKTestsEnabled = getConfiguration()
|
|
||||||
.getBoolean(FS_AZURE_TEST_CPK_ENABLED, false);
|
|
||||||
Assume.assumeTrue(isCPKTestsEnabled);
|
|
||||||
isNamespaceEnabled = getConfiguration()
|
|
||||||
.getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getFileName() throws IOException {
|
|
||||||
return path("/" + methodName.getMethodName()).toUri().getPath();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadWithCPK() throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(true);
|
|
||||||
String fileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, fileName, FILE_SIZE);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
int length = FILE_SIZE;
|
|
||||||
byte[] buffer = new byte[length];
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
final AbfsRestOperation op = abfsClient.getPathStatus(fileName, false,
|
|
||||||
tracingContext);
|
|
||||||
final String eTag = op.getResult()
|
|
||||||
.getResponseHeader(HttpHeaderConfigurations.ETAG);
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.read(fileName, 0, buffer, 0, length, eTag, null, tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, true);
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
assertResponseHeader(abfsRestOperation, false,
|
|
||||||
X_MS_REQUEST_SERVER_ENCRYPTED, "");
|
|
||||||
|
|
||||||
// Trying to read with different CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"different-1234567890123456789012");
|
|
||||||
try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
FSDataInputStream iStream = fs2.open(new Path(fileName))) {
|
|
||||||
int len = 8 * ONE_MB;
|
|
||||||
byte[] b = new byte[len];
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
iStream.read(b, 0, len);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to read with no CPK headers
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem
|
|
||||||
.get(conf); FSDataInputStream iStream = fs3.open(new Path(fileName))) {
|
|
||||||
int len = 8 * ONE_MB;
|
|
||||||
byte[] b = new byte[len];
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
iStream.read(b, 0, len);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testReadWithoutCPK() throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(false);
|
|
||||||
String fileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, fileName, FILE_SIZE);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
int length = INT_512;
|
|
||||||
byte[] buffer = new byte[length * 4];
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
final AbfsRestOperation op = abfsClient
|
|
||||||
.getPathStatus(fileName, false, tracingContext);
|
|
||||||
final String eTag = op.getResult()
|
|
||||||
.getResponseHeader(HttpHeaderConfigurations.ETAG);
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.read(fileName, 0, buffer, 0, length, eTag, null, tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
assertResponseHeader(abfsRestOperation, false,
|
|
||||||
X_MS_REQUEST_SERVER_ENCRYPTED, "");
|
|
||||||
|
|
||||||
// Trying to read with CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"12345678901234567890123456789012");
|
|
||||||
|
|
||||||
try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
AbfsClient abfsClient2 = fs2.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient2.read(fileName, 0, buffer, 0, length, eTag, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAppendWithCPK() throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(true);
|
|
||||||
final String fileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, fileName, FILE_SIZE);
|
|
||||||
|
|
||||||
// Trying to append with correct CPK headers
|
|
||||||
AppendRequestParameters appendRequestParameters =
|
|
||||||
new AppendRequestParameters(
|
|
||||||
0, 0, 5, Mode.APPEND_MODE, false, null, true);
|
|
||||||
byte[] buffer = getRandomBytesArray(5);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.append(fileName, buffer, appendRequestParameters, null, getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, true);
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
|
|
||||||
// Trying to append with different CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"different-1234567890123456789012");
|
|
||||||
try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
AbfsClient abfsClient2 = fs2.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient2.append(fileName, buffer, appendRequestParameters, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to append with no CPK headers
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem
|
|
||||||
.get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient3.append(fileName, buffer, appendRequestParameters, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testAppendWithoutCPK() throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(false);
|
|
||||||
final String fileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, fileName, FILE_SIZE);
|
|
||||||
|
|
||||||
// Trying to append without CPK headers
|
|
||||||
AppendRequestParameters appendRequestParameters =
|
|
||||||
new AppendRequestParameters(
|
|
||||||
0, 0, 5, Mode.APPEND_MODE, false, null, true);
|
|
||||||
byte[] buffer = getRandomBytesArray(5);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.append(fileName, buffer, appendRequestParameters, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
"");
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
|
|
||||||
// Trying to append with CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"12345678901234567890123456789012");
|
|
||||||
try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
AbfsClient abfsClient2 = fs2.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient2.append(fileName, buffer, appendRequestParameters, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetGetXAttr() throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(true);
|
|
||||||
final String fileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, fileName, FILE_SIZE);
|
|
||||||
|
|
||||||
String valSent = "testValue";
|
|
||||||
String attrName = "testXAttr";
|
|
||||||
|
|
||||||
// set get and verify
|
|
||||||
fs.setXAttr(new Path(fileName), attrName,
|
|
||||||
valSent.getBytes(StandardCharsets.UTF_8),
|
|
||||||
EnumSet.of(XAttrSetFlag.CREATE));
|
|
||||||
byte[] valBytes = fs.getXAttr(new Path(fileName), attrName);
|
|
||||||
String valRecieved = new String(valBytes);
|
|
||||||
assertEquals(valSent, valRecieved);
|
|
||||||
|
|
||||||
// set new value get and verify
|
|
||||||
valSent = "new value";
|
|
||||||
fs.setXAttr(new Path(fileName), attrName,
|
|
||||||
valSent.getBytes(StandardCharsets.UTF_8),
|
|
||||||
EnumSet.of(XAttrSetFlag.REPLACE));
|
|
||||||
valBytes = fs.getXAttr(new Path(fileName), attrName);
|
|
||||||
valRecieved = new String(valBytes);
|
|
||||||
assertEquals(valSent, valRecieved);
|
|
||||||
|
|
||||||
// Read without CPK header
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
getAbfs(false).getXAttr(new Path(fileName), attrName);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wrong CPK
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
getSameFSWithWrongCPK(fs).getXAttr(new Path(fileName), attrName);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCopyBetweenAccounts() throws Exception {
|
|
||||||
String accountName = getRawConfiguration()
|
|
||||||
.get(FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT);
|
|
||||||
String accountKey = getRawConfiguration()
|
|
||||||
.get(FS_AZURE_TEST_CPK_ENABLED_SECONDARY_ACCOUNT_KEY);
|
|
||||||
Assume.assumeTrue(accountName != null && !accountName.isEmpty());
|
|
||||||
Assume.assumeTrue(accountKey != null && !accountKey.isEmpty());
|
|
||||||
String fileSystemName = "cpkfs";
|
|
||||||
|
|
||||||
// Create fs1 and a file with CPK
|
|
||||||
AzureBlobFileSystem fs1 = getAbfs(true);
|
|
||||||
int fileSize = FILE_SIZE_FOR_COPY_BETWEEN_ACCOUNTS;
|
|
||||||
byte[] fileContent = getRandomBytesArray(fileSize);
|
|
||||||
Path testFilePath = createFileWithContent(fs1,
|
|
||||||
String.format("fs1-file%s.txt", UUID.randomUUID()), fileContent);
|
|
||||||
|
|
||||||
// Create fs2 with different CPK
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
conf.addResource(TEST_CONFIGURATION_FILE_NAME);
|
|
||||||
conf.setBoolean(AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION, true);
|
|
||||||
conf.unset(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_ABFS_ACCOUNT_NAME, accountName);
|
|
||||||
conf.set(FS_AZURE_ACCOUNT_KEY + "." + accountName, accountKey);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"123456789012345678901234567890ab");
|
|
||||||
conf.set("fs.defaultFS", "abfs://" + fileSystemName + "@" + accountName);
|
|
||||||
AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
|
|
||||||
// Read from fs1 and write to fs2, fs1 and fs2 are having different CPK
|
|
||||||
Path fs2DestFilePath = new Path(
|
|
||||||
String.format("fs2-dest-file%s.txt", UUID.randomUUID()));
|
|
||||||
FSDataOutputStream ops = fs2.create(fs2DestFilePath);
|
|
||||||
try (FSDataInputStream iStream = fs1.open(testFilePath)) {
|
|
||||||
long totalBytesRead = 0;
|
|
||||||
do {
|
|
||||||
int length = 8 * ONE_MB;
|
|
||||||
byte[] buffer = new byte[length];
|
|
||||||
int bytesRead = iStream.read(buffer, 0, length);
|
|
||||||
totalBytesRead += bytesRead;
|
|
||||||
ops.write(buffer);
|
|
||||||
} while (totalBytesRead < fileContent.length);
|
|
||||||
ops.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to read fs2DestFilePath with different CPK headers
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"different-1234567890123456789012");
|
|
||||||
try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem
|
|
||||||
.get(conf); FSDataInputStream iStream = fs3.open(fs2DestFilePath)) {
|
|
||||||
int length = 8 * ONE_MB;
|
|
||||||
byte[] buffer = new byte[length];
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
iStream.read(buffer, 0, length);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to read fs2DestFilePath with no CPK headers
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
try (AzureBlobFileSystem fs4 = (AzureBlobFileSystem) FileSystem
|
|
||||||
.get(conf); FSDataInputStream iStream = fs4.open(fs2DestFilePath)) {
|
|
||||||
int length = 8 * ONE_MB;
|
|
||||||
byte[] buffer = new byte[length];
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
iStream.read(buffer, 0, length);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read fs2DestFilePath and verify the content with the initial random
|
|
||||||
// bytes created and wrote into the source file at fs1
|
|
||||||
try (FSDataInputStream iStream = fs2.open(fs2DestFilePath)) {
|
|
||||||
long totalBytesRead = 0;
|
|
||||||
int pos = 0;
|
|
||||||
do {
|
|
||||||
int length = 8 * ONE_MB;
|
|
||||||
byte[] buffer = new byte[length];
|
|
||||||
int bytesRead = iStream.read(buffer, 0, length);
|
|
||||||
totalBytesRead += bytesRead;
|
|
||||||
for (int i = 0; i < bytesRead; i++) {
|
|
||||||
assertEquals(fileContent[pos + i], buffer[i]);
|
|
||||||
}
|
|
||||||
pos = pos + bytesRead;
|
|
||||||
} while (totalBytesRead < fileContent.length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testListPathWithCPK() throws Exception {
|
|
||||||
testListPath(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testListPathWithoutCPK() throws Exception {
|
|
||||||
testListPath(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testListPath(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final Path testPath = path("/" + methodName.getMethodName());
|
|
||||||
String testDirName = testPath.toUri().getPath();
|
|
||||||
fs.mkdirs(testPath);
|
|
||||||
createFileAndGetContent(fs, testDirName + "/aaa", FILE_SIZE);
|
|
||||||
createFileAndGetContent(fs, testDirName + "/bbb", FILE_SIZE);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.listPath(testDirName, false, INT_50, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertListstatus(fs, abfsRestOperation, testPath);
|
|
||||||
|
|
||||||
// Trying with different CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"different-1234567890123456789012");
|
|
||||||
AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
AbfsClient abfsClient2 = fs2.getAbfsClient();
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
abfsRestOperation = abfsClient2.listPath(testDirName, false, INT_50,
|
|
||||||
null, tracingContext);
|
|
||||||
assertListstatus(fs, abfsRestOperation, testPath);
|
|
||||||
|
|
||||||
if (isWithCPK) {
|
|
||||||
// Trying with no CPK headers
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem.get(conf);
|
|
||||||
AbfsClient abfsClient3 = fs3.getAbfsClient();
|
|
||||||
abfsRestOperation = abfsClient3
|
|
||||||
.listPath(testDirName, false, INT_50, null, tracingContext);
|
|
||||||
assertListstatus(fs, abfsRestOperation, testPath);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertListstatus(AzureBlobFileSystem fs,
|
|
||||||
AbfsRestOperation abfsRestOperation, Path testPath) throws IOException {
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
|
|
||||||
FileStatus[] listStatuses = fs.listStatus(testPath);
|
|
||||||
Assertions.assertThat(listStatuses.length)
|
|
||||||
.describedAs("listStatuses should have 2 entries").isEqualTo(2);
|
|
||||||
|
|
||||||
listStatuses = getSameFSWithWrongCPK(fs).listStatus(testPath);
|
|
||||||
Assertions.assertThat(listStatuses.length)
|
|
||||||
.describedAs("listStatuses should have 2 entries").isEqualTo(2);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCreatePathWithCPK() throws Exception {
|
|
||||||
testCreatePath(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCreatePathWithoutCPK() throws Exception {
|
|
||||||
testCreatePath(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testCreatePath(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
FsPermission permission = new FsPermission(FsAction.EXECUTE,
|
|
||||||
FsAction.EXECUTE, FsAction.EXECUTE);
|
|
||||||
FsPermission umask = new FsPermission(FsAction.NONE, FsAction.NONE,
|
|
||||||
FsAction.NONE);
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(tracingContext);
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.createPath(testFileName, true, true,
|
|
||||||
isNamespaceEnabled ? getOctalNotation(permission) : null,
|
|
||||||
isNamespaceEnabled ? getOctalNotation(umask) : null, false, null,
|
|
||||||
tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, isWithCPK);
|
|
||||||
assertResponseHeader(abfsRestOperation, isWithCPK,
|
|
||||||
X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
|
|
||||||
FileStatus[] listStatuses = fs.listStatus(new Path(testFileName));
|
|
||||||
Assertions.assertThat(listStatuses.length)
|
|
||||||
.describedAs("listStatuses should have 1 entry").isEqualTo(1);
|
|
||||||
|
|
||||||
listStatuses = getSameFSWithWrongCPK(fs).listStatus(new Path(testFileName));
|
|
||||||
Assertions.assertThat(listStatuses.length)
|
|
||||||
.describedAs("listStatuses should have 1 entry").isEqualTo(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRenamePathWithCPK() throws Exception {
|
|
||||||
testRenamePath(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testRenamePathWithoutCPK() throws Exception {
|
|
||||||
testRenamePath(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testRenamePath(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
|
|
||||||
FileStatus fileStatusBeforeRename = fs
|
|
||||||
.getFileStatus(new Path(testFileName));
|
|
||||||
|
|
||||||
String newName = "/newName";
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.renamePath(testFileName, newName, null,
|
|
||||||
getTestTracingContext(fs, false), null, false, isNamespaceEnabled)
|
|
||||||
.getOp();
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
|
|
||||||
LambdaTestUtils.intercept(FileNotFoundException.class,
|
|
||||||
(() -> fs.getFileStatus(new Path(testFileName))));
|
|
||||||
|
|
||||||
FileStatus fileStatusAfterRename = fs.getFileStatus(new Path(newName));
|
|
||||||
Assertions.assertThat(fileStatusAfterRename.getLen())
|
|
||||||
.describedAs("File size has to be same before and after rename")
|
|
||||||
.isEqualTo(fileStatusBeforeRename.getLen());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFlushWithCPK() throws Exception {
|
|
||||||
testFlush(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFlushWithoutCPK() throws Exception {
|
|
||||||
testFlush(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testFlush(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
fs.create(new Path(testFileName)).close();
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
String expectedCPKSha = getCPKSha(fs);
|
|
||||||
|
|
||||||
byte[] fileContent = getRandomBytesArray(FILE_SIZE);
|
|
||||||
Path testFilePath = new Path(testFileName + "1");
|
|
||||||
try (FSDataOutputStream oStream = fs.create(testFilePath)) {
|
|
||||||
oStream.write(fileContent);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to read with different CPK headers
|
|
||||||
Configuration conf = fs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
"different-1234567890123456789012");
|
|
||||||
try (AzureBlobFileSystem fs2 = (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
AbfsClient abfsClient2 = fs2.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient2.flush(testFileName, 0, false, false, null, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trying to read with no CPK headers
|
|
||||||
if (isWithCPK) {
|
|
||||||
conf.unset(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
try (AzureBlobFileSystem fs3 = (AzureBlobFileSystem) FileSystem
|
|
||||||
.get(conf); AbfsClient abfsClient3 = fs3.getAbfsClient()) {
|
|
||||||
LambdaTestUtils.intercept(IOException.class, () -> {
|
|
||||||
abfsClient3.flush(testFileName, 0, false, false, null, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// With correct CPK
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.flush(testFileName, 0, false, false, null, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, isWithCPK);
|
|
||||||
assertResponseHeader(abfsRestOperation, isWithCPK,
|
|
||||||
X_MS_ENCRYPTION_KEY_SHA256, expectedCPKSha);
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED,
|
|
||||||
isWithCPK + "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetPathPropertiesWithCPK() throws Exception {
|
|
||||||
testSetPathProperties(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetPathPropertiesWithoutCPK() throws Exception {
|
|
||||||
testSetPathProperties(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testSetPathProperties(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
final Hashtable<String, String> properties = new Hashtable<>();
|
|
||||||
properties.put("key", "val");
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.setPathProperties(testFileName,
|
|
||||||
convertXmsPropertiesToCommaSeparatedString(properties),
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, isWithCPK);
|
|
||||||
assertResponseHeader(abfsRestOperation, isWithCPK,
|
|
||||||
X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_REQUEST_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetPathStatusFileWithCPK() throws Exception {
|
|
||||||
testGetPathStatusFile(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetPathStatusFileWithoutCPK() throws Exception {
|
|
||||||
testGetPathStatusFile(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testGetPathStatusFile(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.getPathStatus(testFileName, false, tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertResponseHeader(abfsRestOperation, isWithCPK,
|
|
||||||
X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
assertResponseHeader(abfsRestOperation, false,
|
|
||||||
X_MS_REQUEST_SERVER_ENCRYPTED, "");
|
|
||||||
|
|
||||||
abfsRestOperation = abfsClient.getPathStatus(testFileName, true, tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, isWithCPK);
|
|
||||||
assertResponseHeader(abfsRestOperation, isWithCPK,
|
|
||||||
X_MS_ENCRYPTION_KEY_SHA256, getCPKSha(fs));
|
|
||||||
assertResponseHeader(abfsRestOperation, true, X_MS_SERVER_ENCRYPTED,
|
|
||||||
"true");
|
|
||||||
assertResponseHeader(abfsRestOperation, false,
|
|
||||||
X_MS_REQUEST_SERVER_ENCRYPTED, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testDeletePathWithCPK() throws Exception {
|
|
||||||
testDeletePath(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testDeletePathWithoutCPK() throws Exception {
|
|
||||||
testDeletePath(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testDeletePath(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
|
|
||||||
FileStatus[] listStatuses = fs.listStatus(new Path(testFileName));
|
|
||||||
Assertions.assertThat(listStatuses.length)
|
|
||||||
.describedAs("listStatuses should have 1 entry").isEqualTo(1);
|
|
||||||
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.deletePath(testFileName, false, null,
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
|
|
||||||
Assertions.assertThatThrownBy(() -> fs.listStatus(new Path(testFileName)))
|
|
||||||
.isInstanceOf(FileNotFoundException.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetPermissionWithCPK() throws Exception {
|
|
||||||
testSetPermission(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetPermissionWithoutCPK() throws Exception {
|
|
||||||
testSetPermission(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testSetPermission(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
Assume.assumeTrue(fs.getIsNamespaceEnabled(getTestTracingContext(fs, false)));
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
FsPermission permission = new FsPermission(FsAction.EXECUTE,
|
|
||||||
FsAction.EXECUTE, FsAction.EXECUTE);
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.setPermission(testFileName, permission.toString(),
|
|
||||||
getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetAclWithCPK() throws Exception {
|
|
||||||
testSetAcl(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSetAclWithoutCPK() throws Exception {
|
|
||||||
testSetAcl(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testSetAcl(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext));
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
|
|
||||||
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL));
|
|
||||||
final Map<String, String> aclEntries = AbfsAclHelper
|
|
||||||
.deserializeAclSpec(AclEntry.aclSpecToString(aclSpec));
|
|
||||||
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.setAcl(testFileName, AbfsAclHelper.serializeAclSpec(aclEntries),
|
|
||||||
tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetAclWithCPK() throws Exception {
|
|
||||||
testGetAcl(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testGetAclWithoutCPK() throws Exception {
|
|
||||||
testGetAcl(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testGetAcl(final boolean isWithCPK) throws Exception {
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
TracingContext tracingContext = getTestTracingContext(fs, false);
|
|
||||||
Assume.assumeTrue(fs.getIsNamespaceEnabled(tracingContext));
|
|
||||||
createFileAndGetContent(fs, testFileName, FILE_SIZE);
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation =
|
|
||||||
abfsClient.getAclStatus(testFileName, tracingContext);
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCheckAccessWithCPK() throws Exception {
|
|
||||||
testCheckAccess(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testCheckAccessWithoutCPK() throws Exception {
|
|
||||||
testCheckAccess(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testCheckAccess(final boolean isWithCPK) throws Exception {
|
|
||||||
boolean isHNSEnabled = getConfiguration()
|
|
||||||
.getBoolean(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT, false);
|
|
||||||
Assume.assumeTrue(FS_AZURE_TEST_NAMESPACE_ENABLED_ACCOUNT + " is false",
|
|
||||||
isHNSEnabled);
|
|
||||||
Assume.assumeTrue("AuthType has to be OAuth",
|
|
||||||
getAuthType() == AuthType.OAuth);
|
|
||||||
|
|
||||||
final AzureBlobFileSystem fs = getAbfs(isWithCPK);
|
|
||||||
final String testFileName = getFileName();
|
|
||||||
fs.create(new Path(testFileName)).close();
|
|
||||||
AbfsClient abfsClient = fs.getAbfsClient();
|
|
||||||
AbfsRestOperation abfsRestOperation = abfsClient
|
|
||||||
.checkAccess(testFileName, "rwx", getTestTracingContext(fs, false));
|
|
||||||
assertCPKHeaders(abfsRestOperation, false);
|
|
||||||
assertNoCPKResponseHeadersPresent(abfsRestOperation);
|
|
||||||
}
|
|
||||||
|
|
||||||
private byte[] createFileAndGetContent(AzureBlobFileSystem fs,
|
|
||||||
String fileName, int fileSize) throws IOException {
|
|
||||||
byte[] fileContent = getRandomBytesArray(fileSize);
|
|
||||||
Path testFilePath = createFileWithContent(fs, fileName, fileContent);
|
|
||||||
ContractTestUtils.verifyFileContents(fs, testFilePath, fileContent);
|
|
||||||
return fileContent;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertCPKHeaders(AbfsRestOperation abfsRestOperation,
|
|
||||||
boolean isCPKHeaderExpected) {
|
|
||||||
assertHeader(abfsRestOperation, X_MS_ENCRYPTION_KEY, isCPKHeaderExpected);
|
|
||||||
assertHeader(abfsRestOperation, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
isCPKHeaderExpected);
|
|
||||||
assertHeader(abfsRestOperation, X_MS_ENCRYPTION_ALGORITHM,
|
|
||||||
isCPKHeaderExpected);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertNoCPKResponseHeadersPresent(
|
|
||||||
AbfsRestOperation abfsRestOperation) {
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, false,
|
|
||||||
X_MS_REQUEST_SERVER_ENCRYPTED, "");
|
|
||||||
assertResponseHeader(abfsRestOperation, false, X_MS_ENCRYPTION_KEY_SHA256,
|
|
||||||
"");
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertResponseHeader(AbfsRestOperation abfsRestOperation,
|
|
||||||
boolean isHeaderExpected, String headerName, String expectedValue) {
|
|
||||||
final AbfsHttpOperation result = abfsRestOperation.getResult();
|
|
||||||
final String value = result.getResponseHeader(headerName);
|
|
||||||
if (isHeaderExpected) {
|
|
||||||
Assertions.assertThat(value).isEqualTo(expectedValue);
|
|
||||||
} else {
|
|
||||||
Assertions.assertThat(value).isNull();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertHeader(AbfsRestOperation abfsRestOperation,
|
|
||||||
String headerName, boolean isCPKHeaderExpected) {
|
|
||||||
assertTrue(abfsRestOperation != null);
|
|
||||||
Optional<AbfsHttpHeader> header = abfsRestOperation.getRequestHeaders()
|
|
||||||
.stream().filter(abfsHttpHeader -> abfsHttpHeader.getName()
|
|
||||||
.equalsIgnoreCase(headerName)).findFirst();
|
|
||||||
String desc;
|
|
||||||
if (isCPKHeaderExpected) {
|
|
||||||
desc =
|
|
||||||
"CPK header " + headerName + " is expected, but the same is absent.";
|
|
||||||
} else {
|
|
||||||
desc = "CPK header " + headerName
|
|
||||||
+ " is not expected, but the same is present.";
|
|
||||||
}
|
|
||||||
Assertions.assertThat(header.isPresent()).describedAs(desc)
|
|
||||||
.isEqualTo(isCPKHeaderExpected);
|
|
||||||
}
|
|
||||||
|
|
||||||
private byte[] getSHA256Hash(String key) throws IOException {
|
|
||||||
try {
|
|
||||||
final MessageDigest digester = MessageDigest.getInstance("SHA-256");
|
|
||||||
return digester.digest(key.getBytes(StandardCharsets.UTF_8));
|
|
||||||
} catch (NoSuchAlgorithmException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getCPKSha(final AzureBlobFileSystem abfs) throws IOException {
|
|
||||||
Configuration conf = abfs.getConf();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
String encryptionKey = conf
|
|
||||||
.get(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
if (encryptionKey == null || encryptionKey.isEmpty()) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
return getBase64EncodedString(getSHA256Hash(encryptionKey));
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getBase64EncodedString(byte[] bytes) {
|
|
||||||
return java.util.Base64.getEncoder().encodeToString(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Path createFileWithContent(FileSystem fs, String fileName,
|
|
||||||
byte[] fileContent) throws IOException {
|
|
||||||
Path testFilePath = new Path(fileName);
|
|
||||||
try (FSDataOutputStream oStream = fs.create(testFilePath)) {
|
|
||||||
oStream.write(fileContent);
|
|
||||||
oStream.flush();
|
|
||||||
}
|
|
||||||
return testFilePath;
|
|
||||||
}
|
|
||||||
|
|
||||||
private String convertXmsPropertiesToCommaSeparatedString(
|
|
||||||
final Hashtable<String, String> properties)
|
|
||||||
throws CharacterCodingException {
|
|
||||||
StringBuilder commaSeparatedProperties = new StringBuilder();
|
|
||||||
final CharsetEncoder encoder = Charset.forName(XMS_PROPERTIES_ENCODING)
|
|
||||||
.newEncoder();
|
|
||||||
for (Map.Entry<String, String> propertyEntry : properties.entrySet()) {
|
|
||||||
String key = propertyEntry.getKey();
|
|
||||||
String value = propertyEntry.getValue();
|
|
||||||
Boolean canEncodeValue = encoder.canEncode(value);
|
|
||||||
if (!canEncodeValue) {
|
|
||||||
throw new CharacterCodingException();
|
|
||||||
}
|
|
||||||
String encodedPropertyValue = Base64
|
|
||||||
.encode(encoder.encode(CharBuffer.wrap(value)).array());
|
|
||||||
commaSeparatedProperties.append(key).append(AbfsHttpConstants.EQUAL)
|
|
||||||
.append(encodedPropertyValue);
|
|
||||||
commaSeparatedProperties.append(AbfsHttpConstants.COMMA);
|
|
||||||
}
|
|
||||||
if (commaSeparatedProperties.length() != 0) {
|
|
||||||
commaSeparatedProperties
|
|
||||||
.deleteCharAt(commaSeparatedProperties.length() - 1);
|
|
||||||
}
|
|
||||||
return commaSeparatedProperties.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getOctalNotation(FsPermission fsPermission) {
|
|
||||||
Preconditions.checkNotNull(fsPermission, "fsPermission");
|
|
||||||
return String
|
|
||||||
.format(AbfsHttpConstants.PERMISSION_FORMAT, fsPermission.toOctal());
|
|
||||||
}
|
|
||||||
|
|
||||||
private byte[] getRandomBytesArray(int length) {
|
|
||||||
final byte[] b = new byte[length];
|
|
||||||
new Random().nextBytes(b);
|
|
||||||
return b;
|
|
||||||
}
|
|
||||||
|
|
||||||
private AzureBlobFileSystem getAbfs(boolean withCPK) throws IOException {
|
|
||||||
return getAbfs(withCPK, "12345678901234567890123456789012");
|
|
||||||
}
|
|
||||||
|
|
||||||
private AzureBlobFileSystem getAbfs(boolean withCPK, String cpk)
|
|
||||||
throws IOException {
|
|
||||||
Configuration conf = getRawConfiguration();
|
|
||||||
if (withCPK) {
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + getAccountName(),
|
|
||||||
cpk);
|
|
||||||
} else {
|
|
||||||
conf.unset(
|
|
||||||
FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + getAccountName());
|
|
||||||
}
|
|
||||||
return (AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
private AzureBlobFileSystem getSameFSWithWrongCPK(
|
|
||||||
final AzureBlobFileSystem fs) throws IOException {
|
|
||||||
AbfsConfiguration abfsConf = fs.getAbfsStore().getAbfsConfiguration();
|
|
||||||
Configuration conf = abfsConf.getRawConfiguration();
|
|
||||||
String accountName = conf.get(FS_AZURE_ABFS_ACCOUNT_NAME);
|
|
||||||
String cpk = conf
|
|
||||||
.get(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName);
|
|
||||||
if (cpk == null || cpk.isEmpty()) {
|
|
||||||
cpk = "01234567890123456789012345678912";
|
|
||||||
}
|
|
||||||
cpk = "different-" + cpk;
|
|
||||||
String differentCpk = cpk.substring(0, ENCRYPTION_KEY_LEN - 1);
|
|
||||||
conf.set(FS_AZURE_CLIENT_PROVIDED_ENCRYPTION_KEY + "." + accountName,
|
|
||||||
differentCpk);
|
|
||||||
conf.set("fs.defaultFS",
|
|
||||||
"abfs://" + getFileSystemName() + "@" + accountName);
|
|
||||||
AzureBlobFileSystem sameFSWithDifferentCPK =
|
|
||||||
(AzureBlobFileSystem) FileSystem.newInstance(conf);
|
|
||||||
return sameFSWithDifferentCPK;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -100,17 +100,14 @@ public void checkCorrelationConfigValidation(String clientCorrelationId,
|
|||||||
TracingHeaderFormat.ALL_ID_FORMAT, null);
|
TracingHeaderFormat.ALL_ID_FORMAT, null);
|
||||||
boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(tracingContext);
|
boolean isNamespaceEnabled = fs.getIsNamespaceEnabled(tracingContext);
|
||||||
String path = getRelativePath(new Path("/testDir"));
|
String path = getRelativePath(new Path("/testDir"));
|
||||||
String permission = isNamespaceEnabled
|
AzureBlobFileSystemStore.Permissions permissions
|
||||||
? getOctalNotation(FsPermission.getDirDefault())
|
= new AzureBlobFileSystemStore.Permissions(isNamespaceEnabled,
|
||||||
: null;
|
FsPermission.getDefault(), FsPermission.getUMask(fs.getConf()));
|
||||||
String umask = isNamespaceEnabled
|
|
||||||
? getOctalNotation(FsPermission.getUMask(fs.getConf()))
|
|
||||||
: null;
|
|
||||||
|
|
||||||
//request should not fail for invalid clientCorrelationID
|
//request should not fail for invalid clientCorrelationID
|
||||||
AbfsRestOperation op = fs.getAbfsClient()
|
AbfsRestOperation op = fs.getAbfsClient()
|
||||||
.createPath(path, false, true, permission, umask, false, null,
|
.createPath(path, false, true, permissions, false, null, null,
|
||||||
tracingContext);
|
tracingContext);
|
||||||
|
|
||||||
int statusCode = op.getResult().getStatusCode();
|
int statusCode = op.getResult().getStatusCode();
|
||||||
Assertions.assertThat(statusCode).describedAs("Request should not fail")
|
Assertions.assertThat(statusCode).describedAs("Request should not fail")
|
||||||
|
@ -64,6 +64,7 @@ public final class TestConfigurationKeys {
|
|||||||
public static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
|
public static final String TEST_CONFIGURATION_FILE_NAME = "azure-test.xml";
|
||||||
public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-";
|
public static final String TEST_CONTAINER_PREFIX = "abfs-testcontainer-";
|
||||||
public static final int TEST_TIMEOUT = 15 * 60 * 1000;
|
public static final int TEST_TIMEOUT = 15 * 60 * 1000;
|
||||||
|
public static final int ENCRYPTION_KEY_LEN = 32;
|
||||||
|
|
||||||
private TestConfigurationKeys() {}
|
private TestConfigurationKeys() {}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,71 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.extensions;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ABFSKey;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.ENCRYPTION_KEY_LEN;
|
||||||
|
|
||||||
|
public class MockEncryptionContextProvider implements EncryptionContextProvider {
|
||||||
|
private HashMap<String, String> pathToContextMap = new HashMap<>();
|
||||||
|
private HashMap<String, byte[]> contextToKeyByteMap = new HashMap<>();
|
||||||
|
@Override
|
||||||
|
public void initialize(Configuration configuration, String accountName,
|
||||||
|
String fileSystem) throws IOException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ABFSKey getEncryptionContext(String path)
|
||||||
|
throws IOException {
|
||||||
|
String newContext = UUID.randomUUID().toString();
|
||||||
|
pathToContextMap.put(path, newContext);
|
||||||
|
byte[] newKey = new byte[ENCRYPTION_KEY_LEN];
|
||||||
|
new Random().nextBytes(newKey);
|
||||||
|
ABFSKey key = new ABFSKey(newKey);
|
||||||
|
contextToKeyByteMap.put(newContext, key.getEncoded());
|
||||||
|
return new ABFSKey(newContext.getBytes(StandardCharsets.UTF_8));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ABFSKey getEncryptionKey(String path, ABFSKey encryptionContext) throws IOException {
|
||||||
|
String encryptionContextString =
|
||||||
|
new String(encryptionContext.getEncoded(), StandardCharsets.UTF_8);
|
||||||
|
if (!encryptionContextString.equals(pathToContextMap.get(path))) {
|
||||||
|
throw new IOException("encryption context does not match path");
|
||||||
|
}
|
||||||
|
return new ABFSKey(contextToKeyByteMap.get(encryptionContextString));
|
||||||
|
}
|
||||||
|
|
||||||
|
public byte[] getEncryptionKeyForTest(String encryptionContext) {
|
||||||
|
return contextToKeyByteMap.get(encryptionContext);
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getEncryptionContextForTest(String path) {
|
||||||
|
return pathToContextMap.get(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -0,0 +1,34 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.azurebfs.services;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.azurebfs.extensions.EncryptionContextProvider;
|
||||||
|
|
||||||
|
public final class AbfsClientUtils {
|
||||||
|
private AbfsClientUtils() {
|
||||||
|
|
||||||
|
}
|
||||||
|
public static void setIsNamespaceEnabled(final AbfsClient abfsClient, final Boolean isNamespaceEnabled) {
|
||||||
|
abfsClient.setIsNamespaceEnabled(isNamespaceEnabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void setEncryptionContextProvider(final AbfsClient abfsClient, final EncryptionContextProvider provider) {
|
||||||
|
abfsClient.setEncryptionContextProvider(provider);
|
||||||
|
}
|
||||||
|
}
|
@ -135,7 +135,7 @@ private String getUserAgentString(AbfsConfiguration config,
|
|||||||
boolean includeSSLProvider) throws IOException {
|
boolean includeSSLProvider) throws IOException {
|
||||||
AbfsClientContext abfsClientContext = new AbfsClientContextBuilder().build();
|
AbfsClientContext abfsClientContext = new AbfsClientContextBuilder().build();
|
||||||
AbfsClient client = new AbfsClient(new URL("https://azure.com"), null,
|
AbfsClient client = new AbfsClient(new URL("https://azure.com"), null,
|
||||||
config, (AccessTokenProvider) null, abfsClientContext);
|
config, (AccessTokenProvider) null, null, abfsClientContext);
|
||||||
String sslProviderName = null;
|
String sslProviderName = null;
|
||||||
if (includeSSLProvider) {
|
if (includeSSLProvider) {
|
||||||
sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory()
|
sslProviderName = DelegatingSSLSocketFactory.getDefaultFactory()
|
||||||
@ -341,6 +341,7 @@ public static AbfsClient createTestClientFromCurrentContext(
|
|||||||
(currentAuthType == AuthType.OAuth
|
(currentAuthType == AuthType.OAuth
|
||||||
? abfsConfig.getTokenProvider()
|
? abfsConfig.getTokenProvider()
|
||||||
: null),
|
: null),
|
||||||
|
null,
|
||||||
abfsClientContext);
|
abfsClientContext);
|
||||||
|
|
||||||
return testClient;
|
return testClient;
|
||||||
@ -384,6 +385,10 @@ public static AbfsClient getMockAbfsClient(AbfsClient baseAbfsClientInstance,
|
|||||||
client = ITestAbfsClient.setAbfsClientField(client, "baseUrl",
|
client = ITestAbfsClient.setAbfsClientField(client, "baseUrl",
|
||||||
baseAbfsClientInstance.getBaseUrl());
|
baseAbfsClientInstance.getBaseUrl());
|
||||||
|
|
||||||
|
// override xMsVersion
|
||||||
|
client = ITestAbfsClient.setAbfsClientField(client, "xMsVersion",
|
||||||
|
baseAbfsClientInstance.getxMsVersion());
|
||||||
|
|
||||||
// override auth provider
|
// override auth provider
|
||||||
if (currentAuthType == AuthType.SharedKey) {
|
if (currentAuthType == AuthType.SharedKey) {
|
||||||
client = ITestAbfsClient.setAbfsClientField(client, "sharedKeyCredentials",
|
client = ITestAbfsClient.setAbfsClientField(client, "sharedKeyCredentials",
|
||||||
@ -608,7 +613,7 @@ public void testExpectHundredContinue() throws Exception {
|
|||||||
.isTrue();
|
.isTrue();
|
||||||
|
|
||||||
intercept(AzureBlobFileSystemException.class,
|
intercept(AzureBlobFileSystemException.class,
|
||||||
() -> testClient.append(finalTestPath, buffer, appendRequestParameters, null, tracingContext));
|
() -> testClient.append(finalTestPath, buffer, appendRequestParameters, null, null, tracingContext));
|
||||||
|
|
||||||
// Verify that the request was not exponentially retried because of user error.
|
// Verify that the request was not exponentially retried because of user error.
|
||||||
Assertions.assertThat(tracingContext.getRetryCount())
|
Assertions.assertThat(tracingContext.getRetryCount())
|
||||||
|
@ -39,9 +39,9 @@
|
|||||||
import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest;
|
import org.apache.hadoop.fs.azurebfs.AbstractAbfsIntegrationTest;
|
||||||
import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem;
|
import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem;
|
||||||
import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore;
|
import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystemStore;
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.AzureBlobFileSystemException;
|
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TimeoutException;
|
import org.apache.hadoop.fs.azurebfs.contracts.exceptions.TimeoutException;
|
||||||
import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
|
import org.apache.hadoop.fs.azurebfs.contracts.services.ReadBufferStatus;
|
||||||
|
import org.apache.hadoop.fs.azurebfs.security.ContextEncryptionAdapter;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TestCachedSASToken;
|
import org.apache.hadoop.fs.azurebfs.utils.TestCachedSASToken;
|
||||||
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
import org.apache.hadoop.fs.azurebfs.utils.TracingContext;
|
||||||
import org.apache.hadoop.fs.impl.OpenFileParameters;
|
import org.apache.hadoop.fs.impl.OpenFileParameters;
|
||||||
@ -49,6 +49,7 @@
|
|||||||
import static org.mockito.ArgumentMatchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||||
import static org.mockito.ArgumentMatchers.anyString;
|
import static org.mockito.ArgumentMatchers.anyString;
|
||||||
|
import static org.mockito.ArgumentMatchers.nullable;
|
||||||
import static org.mockito.Mockito.doReturn;
|
import static org.mockito.Mockito.doReturn;
|
||||||
import static org.mockito.Mockito.doThrow;
|
import static org.mockito.Mockito.doThrow;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
@ -168,14 +169,14 @@ private void queueReadAheads(AbfsInputStream inputStream) {
|
|||||||
inputStream.getTracingContext());
|
inputStream.getTracingContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyReadCallCount(AbfsClient client, int count) throws
|
private void verifyReadCallCount(AbfsClient client, int count)
|
||||||
AzureBlobFileSystemException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
// ReadAhead threads are triggered asynchronously.
|
// ReadAhead threads are triggered asynchronously.
|
||||||
// Wait a second before verifying the number of total calls.
|
// Wait a second before verifying the number of total calls.
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
verify(client, times(count)).read(any(String.class), any(Long.class),
|
verify(client, times(count)).read(any(String.class), any(Long.class),
|
||||||
any(byte[].class), any(Integer.class), any(Integer.class),
|
any(byte[].class), any(Integer.class), any(Integer.class),
|
||||||
any(String.class), any(String.class), any(TracingContext.class));
|
any(String.class), any(String.class), any(), any(TracingContext.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkEvictedStatus(AbfsInputStream inputStream, int position, boolean expectedToThrowException)
|
private void checkEvictedStatus(AbfsInputStream inputStream, int position, boolean expectedToThrowException)
|
||||||
@ -241,7 +242,8 @@ private void checkGetPathStatusCalls(Path testFile, FileStatus fileStatus,
|
|||||||
.ofNullable(new OpenFileParameters().withStatus(fileStatus)), null, tracingContext);
|
.ofNullable(new OpenFileParameters().withStatus(fileStatus)), null, tracingContext);
|
||||||
verify(mockClient, times(0).description((String.format(
|
verify(mockClient, times(0).description((String.format(
|
||||||
"FileStatus [from %s result] provided, GetFileStatus should not be invoked",
|
"FileStatus [from %s result] provided, GetFileStatus should not be invoked",
|
||||||
source)))).getPathStatus(anyString(), anyBoolean(), any(TracingContext.class));
|
source)))).getPathStatus(anyString(), anyBoolean(), any(TracingContext.class), any(
|
||||||
|
ContextEncryptionAdapter.class));
|
||||||
|
|
||||||
// verify GetPathStatus invoked when FileStatus not provided
|
// verify GetPathStatus invoked when FileStatus not provided
|
||||||
abfsStore.openFileForRead(testFile,
|
abfsStore.openFileForRead(testFile,
|
||||||
@ -249,7 +251,8 @@ private void checkGetPathStatusCalls(Path testFile, FileStatus fileStatus,
|
|||||||
tracingContext);
|
tracingContext);
|
||||||
verify(mockClient, times(1).description(
|
verify(mockClient, times(1).description(
|
||||||
"GetPathStatus should be invoked when FileStatus not provided"))
|
"GetPathStatus should be invoked when FileStatus not provided"))
|
||||||
.getPathStatus(anyString(), anyBoolean(), any(TracingContext.class));
|
.getPathStatus(anyString(), anyBoolean(), any(TracingContext.class), nullable(
|
||||||
|
ContextEncryptionAdapter.class));
|
||||||
|
|
||||||
Mockito.reset(mockClient); //clears invocation count for next test case
|
Mockito.reset(mockClient); //clears invocation count for next test case
|
||||||
}
|
}
|
||||||
@ -330,7 +333,7 @@ public void testFailedReadAhead() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAhead.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAhead.txt");
|
||||||
|
|
||||||
@ -364,7 +367,7 @@ public void testFailedReadAheadEviction() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAheadEviction.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testFailedReadAheadEviction.txt");
|
||||||
|
|
||||||
@ -409,7 +412,7 @@ public void testOlderReadAheadFailure() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testOlderReadAheadFailure.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testOlderReadAheadFailure.txt");
|
||||||
|
|
||||||
@ -463,7 +466,7 @@ public void testSuccessfulReadAhead() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt");
|
||||||
int beforeReadCompletedListSize = ReadBufferManager.getBufferManager().getCompletedReadListSize();
|
int beforeReadCompletedListSize = ReadBufferManager.getBufferManager().getCompletedReadListSize();
|
||||||
@ -518,7 +521,8 @@ public void testStreamPurgeDuringReadAheadCallExecuting() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), nullable(ContextEncryptionAdapter.class),
|
||||||
|
any(TracingContext.class));
|
||||||
|
|
||||||
final ReadBufferManager readBufferManager
|
final ReadBufferManager readBufferManager
|
||||||
= ReadBufferManager.getBufferManager();
|
= ReadBufferManager.getBufferManager();
|
||||||
@ -584,7 +588,7 @@ public void testReadAheadManagerForFailedReadAhead() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForFailedReadAhead.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForFailedReadAhead.txt");
|
||||||
|
|
||||||
@ -637,7 +641,7 @@ public void testReadAheadManagerForOlderReadAheadFailure() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForOlderReadAheadFailure.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testReadAheadManagerForOlderReadAheadFailure.txt");
|
||||||
|
|
||||||
@ -691,7 +695,7 @@ public void testReadAheadManagerForSuccessfulReadAhead() throws Exception {
|
|||||||
.when(client)
|
.when(client)
|
||||||
.read(any(String.class), any(Long.class), any(byte[].class),
|
.read(any(String.class), any(Long.class), any(byte[].class),
|
||||||
any(Integer.class), any(Integer.class), any(String.class),
|
any(Integer.class), any(Integer.class), any(String.class),
|
||||||
any(String.class), any(TracingContext.class));
|
any(String.class), any(), any(TracingContext.class));
|
||||||
|
|
||||||
AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt");
|
AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt");
|
||||||
|
|
||||||
|
@ -120,10 +120,11 @@ public void verifyShortWriteRequest() throws Exception {
|
|||||||
AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf);
|
AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf);
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class),
|
when(client.append(anyString(), any(byte[].class),
|
||||||
any(AppendRequestParameters.class), any(), any(TracingContext.class)))
|
any(AppendRequestParameters.class), any(),
|
||||||
|
any(), any(TracingContext.class)))
|
||||||
.thenReturn(op);
|
.thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
||||||
isNull(), any(TracingContext.class))).thenReturn(op);
|
isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
|
|
||||||
AbfsOutputStream out = new AbfsOutputStream(
|
AbfsOutputStream out = new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -157,13 +158,13 @@ public void verifyShortWriteRequest() throws Exception {
|
|||||||
WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false, null, true);
|
WRITE_SIZE, 0, 2 * WRITE_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(),
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(),
|
||||||
any(TracingContext.class));
|
any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -184,8 +185,8 @@ public void verifyWriteRequest() throws Exception {
|
|||||||
TracingHeaderFormat.ALL_ID_FORMAT, null);
|
TracingHeaderFormat.ALL_ID_FORMAT, null);
|
||||||
|
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(TracingContext.class))).thenReturn(op);
|
when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(TracingContext.class))).thenReturn(op);
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
|
|
||||||
AbfsOutputStream out = new AbfsOutputStream(
|
AbfsOutputStream out = new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -211,16 +212,13 @@ public void verifyWriteRequest() throws Exception {
|
|||||||
AppendRequestParameters secondReqParameters = new AppendRequestParameters(
|
AppendRequestParameters secondReqParameters = new AppendRequestParameters(
|
||||||
BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false, null, true);
|
BUFFER_SIZE, 0, 5*WRITE_SIZE-BUFFER_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(eq(PATH), any(byte[].class),
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(),
|
refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
any(TracingContext.class));
|
verify(client, times(1)).append(eq(PATH), any(byte[].class),
|
||||||
verify(client, times(1)).append(
|
refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(),
|
|
||||||
any(TracingContext.class));
|
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(eq(PATH), any(byte[].class), any(), any(),
|
||||||
eq(PATH), any(byte[].class), any(), any(),
|
any(), any(TracingContext.class));
|
||||||
any(TracingContext.class));
|
|
||||||
|
|
||||||
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
||||||
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
||||||
@ -231,7 +229,8 @@ public void verifyWriteRequest() throws Exception {
|
|||||||
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
||||||
|
|
||||||
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
||||||
acFlushSASToken.capture(), isNull(), acTracingContext.capture());
|
acFlushSASToken.capture(), isNull(), isNull(),
|
||||||
|
acTracingContext.capture());
|
||||||
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
||||||
assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
assertThat(Arrays.asList(Long.valueOf(5*WRITE_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
||||||
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
||||||
@ -257,8 +256,8 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception {
|
|||||||
FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), null);
|
FSOperationType.WRITE, abfsConf.getTracingHeaderFormat(), null);
|
||||||
|
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(TracingContext.class))).thenReturn(op);
|
when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(TracingContext.class))).thenReturn(op);
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
when(op.getSasToken()).thenReturn("testToken");
|
when(op.getSasToken()).thenReturn("testToken");
|
||||||
when(op.getResult()).thenReturn(httpOp);
|
when(op.getResult()).thenReturn(httpOp);
|
||||||
|
|
||||||
@ -287,12 +286,12 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception {
|
|||||||
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
|
|
||||||
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
||||||
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
||||||
@ -303,7 +302,8 @@ public void verifyWriteRequestOfBufferSizeAndClose() throws Exception {
|
|||||||
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
||||||
|
|
||||||
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
||||||
acFlushSASToken.capture(), isNull(), acTracingContext.capture());
|
acFlushSASToken.capture(), isNull(), isNull(),
|
||||||
|
acTracingContext.capture());
|
||||||
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
||||||
assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
||||||
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
||||||
@ -327,10 +327,10 @@ public void verifyWriteRequestOfBufferSize() throws Exception {
|
|||||||
|
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class),
|
when(client.append(anyString(), any(byte[].class),
|
||||||
any(AppendRequestParameters.class), any(), any(TracingContext.class)))
|
any(AppendRequestParameters.class), any(), any(), any(TracingContext.class)))
|
||||||
.thenReturn(op);
|
.thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(),
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(),
|
||||||
any(), isNull(), any(TracingContext.class))).thenReturn(op);
|
any(), isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
when(op.getSasToken()).thenReturn("testToken");
|
when(op.getSasToken()).thenReturn("testToken");
|
||||||
when(op.getResult()).thenReturn(httpOp);
|
when(op.getResult()).thenReturn(httpOp);
|
||||||
|
|
||||||
@ -361,12 +361,12 @@ public void verifyWriteRequestOfBufferSize() throws Exception {
|
|||||||
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -385,10 +385,10 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception {
|
|||||||
|
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class),
|
when(client.append(anyString(), any(byte[].class),
|
||||||
any(AppendRequestParameters.class), any(), any(TracingContext.class)))
|
any(AppendRequestParameters.class), any(), any(), any(TracingContext.class)))
|
||||||
.thenReturn(op);
|
.thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
||||||
isNull(), any(TracingContext.class))).thenReturn(op);
|
isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
|
|
||||||
AbfsOutputStream out = new AbfsOutputStream(
|
AbfsOutputStream out = new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -417,12 +417,12 @@ public void verifyWriteRequestOfBufferSizeWithAppendBlob() throws Exception {
|
|||||||
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true, null, true);
|
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, true, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -445,10 +445,10 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception {
|
|||||||
|
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class),
|
when(client.append(anyString(), any(byte[].class),
|
||||||
any(AppendRequestParameters.class), any(), any(TracingContext.class)))
|
any(AppendRequestParameters.class), any(), any(), any(TracingContext.class)))
|
||||||
.thenReturn(op);
|
.thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
||||||
isNull(), any(TracingContext.class))).thenReturn(op);
|
isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
|
|
||||||
AbfsOutputStream out = new AbfsOutputStream(
|
AbfsOutputStream out = new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -477,12 +477,12 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception {
|
|||||||
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
|
|
||||||
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushPath = ArgumentCaptor.forClass(String.class);
|
||||||
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
ArgumentCaptor<Long> acFlushPosition = ArgumentCaptor.forClass(Long.class);
|
||||||
@ -493,7 +493,7 @@ public void verifyWriteRequestOfBufferSizeAndHFlush() throws Exception {
|
|||||||
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
ArgumentCaptor<String> acFlushSASToken = ArgumentCaptor.forClass(String.class);
|
||||||
|
|
||||||
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
|
||||||
acFlushSASToken.capture(), isNull(), acTracingContext.capture());
|
acFlushSASToken.capture(), isNull(), isNull(), acTracingContext.capture());
|
||||||
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
|
||||||
assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
|
||||||
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
|
||||||
@ -515,10 +515,10 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception {
|
|||||||
AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf);
|
AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf);
|
||||||
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
when(client.getAbfsPerfTracker()).thenReturn(tracker);
|
||||||
when(client.append(anyString(), any(byte[].class),
|
when(client.append(anyString(), any(byte[].class),
|
||||||
any(AppendRequestParameters.class), any(), any(TracingContext.class)))
|
any(AppendRequestParameters.class), any(), any(), any(TracingContext.class)))
|
||||||
.thenReturn(op);
|
.thenReturn(op);
|
||||||
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(),
|
||||||
isNull(), any(TracingContext.class))).thenReturn(op);
|
isNull(), any(), any(TracingContext.class))).thenReturn(op);
|
||||||
|
|
||||||
AbfsOutputStream out = new AbfsOutputStream(
|
AbfsOutputStream out = new AbfsOutputStream(
|
||||||
populateAbfsOutputStreamContext(
|
populateAbfsOutputStreamContext(
|
||||||
@ -549,12 +549,12 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception {
|
|||||||
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null, true);
|
||||||
|
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(firstReqParameters), any(), any(), any(TracingContext.class));
|
||||||
verify(client, times(1)).append(
|
verify(client, times(1)).append(
|
||||||
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), refEq(secondReqParameters), any(), any(), any(TracingContext.class));
|
||||||
// confirm there were only 2 invocations in all
|
// confirm there were only 2 invocations in all
|
||||||
verify(client, times(2)).append(
|
verify(client, times(2)).append(
|
||||||
eq(PATH), any(byte[].class), any(), any(), any(TracingContext.class));
|
eq(PATH), any(byte[].class), any(), any(), any(), any(TracingContext.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user