HADOOP-15795. Make HTTPS the default protocol for ABFS.

Contributed by Da Zhou.
This commit is contained in:
Steve Loughran 2018-10-03 12:52:53 +01:00
parent 095c269620
commit 7051bd78b1
No known key found for this signature in database
GPG Key ID: D22CF846DBB162A0
11 changed files with 185 additions and 67 deletions

View File

@ -162,6 +162,10 @@ public class AbfsConfiguration{
DefaultValue = "") DefaultValue = "")
private String abfsExternalAuthorizationClass; private String abfsExternalAuthorizationClass;
@BooleanConfigurationValidatorAnnotation(ConfigurationKey = FS_AZURE_ALWAYS_USE_HTTPS,
DefaultValue = DEFAULT_ENABLE_HTTPS)
private boolean alwaysUseHttps;
private Map<String, String> storageAccountKeys; private Map<String, String> storageAccountKeys;
public AbfsConfiguration(final Configuration rawConfig, String accountName) public AbfsConfiguration(final Configuration rawConfig, String accountName)
@ -433,6 +437,10 @@ public AbfsDelegationTokenManager getDelegationTokenManager() throws IOException
return new AbfsDelegationTokenManager(getRawConfiguration()); return new AbfsDelegationTokenManager(getRawConfiguration());
} }
public boolean isHttpsAlwaysUsed() {
return this.alwaysUseHttps;
}
public AccessTokenProvider getTokenProvider() throws TokenAccessProviderException { public AccessTokenProvider getTokenProvider() throws TokenAccessProviderException {
AuthType authType = getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey); AuthType authType = getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey);
if (authType == AuthType.OAuth) { if (authType == AuthType.OAuth) {

View File

@ -105,7 +105,7 @@ public void initialize(URI uri, Configuration configuration)
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.userGroupInformation = UserGroupInformation.getCurrentUser(); this.userGroupInformation = UserGroupInformation.getCurrentUser();
this.user = userGroupInformation.getUserName(); this.user = userGroupInformation.getUserName();
this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecure(), configuration, userGroupInformation); this.abfsStore = new AzureBlobFileSystemStore(uri, this.isSecureScheme(), configuration, userGroupInformation);
final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration(); final AbfsConfiguration abfsConfiguration = abfsStore.getAbfsConfiguration();
this.setWorkingDirectory(this.getHomeDirectory()); this.setWorkingDirectory(this.getHomeDirectory());
@ -154,7 +154,7 @@ public String toString() {
return sb.toString(); return sb.toString();
} }
public boolean isSecure() { public boolean isSecureScheme() {
return false; return false;
} }

View File

@ -115,7 +115,7 @@ public class AzureBlobFileSystemStore {
private boolean isNamespaceEnabledSet; private boolean isNamespaceEnabledSet;
private boolean isNamespaceEnabled; private boolean isNamespaceEnabled;
public AzureBlobFileSystemStore(URI uri, boolean isSecure, Configuration configuration, UserGroupInformation userGroupInformation) public AzureBlobFileSystemStore(URI uri, boolean isSecureScheme, Configuration configuration, UserGroupInformation userGroupInformation)
throws AzureBlobFileSystemException, IOException { throws AzureBlobFileSystemException, IOException {
this.uri = uri; this.uri = uri;
@ -142,13 +142,11 @@ public AzureBlobFileSystemStore(URI uri, boolean isSecure, Configuration configu
this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList( this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList(
abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA))); abfsConfiguration.getAzureAtomicRenameDirs().split(AbfsHttpConstants.COMMA)));
if (AuthType.OAuth == abfsConfiguration.getEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey) boolean usingOauth = (AuthType.OAuth == abfsConfiguration.getEnum(
&& !FileSystemUriSchemes.ABFS_SECURE_SCHEME.equals(uri.getScheme())) { FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.SharedKey));
throw new IllegalArgumentException(
String.format("Incorrect URI %s, URI scheme must be abfss when authenticating using Oauth.", uri));
}
initializeClient(uri, fileSystemName, accountName, isSecure); boolean useHttps = (usingOauth || abfsConfiguration.isHttpsAlwaysUsed()) ? true : isSecureScheme;
initializeClient(uri, fileSystemName, accountName, useHttps);
} }
private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, InvalidUriException { private String[] authorityParts(URI uri) throws InvalidUriAuthorityException, InvalidUriException {

View File

@ -28,7 +28,7 @@
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class SecureAzureBlobFileSystem extends AzureBlobFileSystem { public class SecureAzureBlobFileSystem extends AzureBlobFileSystem {
@Override @Override
public boolean isSecure() { public boolean isSecureScheme() {
return true; return true;
} }

View File

@ -48,6 +48,7 @@ public final class ConfigurationKeys {
public static final String AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION = "fs.azure.createRemoteFileSystemDuringInitialization"; public static final String AZURE_CREATE_REMOTE_FILESYSTEM_DURING_INITIALIZATION = "fs.azure.createRemoteFileSystemDuringInitialization";
public static final String AZURE_SKIP_USER_GROUP_METADATA_DURING_INITIALIZATION = "fs.azure.skipUserGroupMetadataDuringInitialization"; public static final String AZURE_SKIP_USER_GROUP_METADATA_DURING_INITIALIZATION = "fs.azure.skipUserGroupMetadataDuringInitialization";
public static final String FS_AZURE_ENABLE_AUTOTHROTTLING = "fs.azure.enable.autothrottling"; public static final String FS_AZURE_ENABLE_AUTOTHROTTLING = "fs.azure.enable.autothrottling";
public static final String FS_AZURE_ALWAYS_USE_HTTPS = "fs.azure.always.use.https";
public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key"; public static final String FS_AZURE_ATOMIC_RENAME_KEY = "fs.azure.atomic.rename.key";
public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth"; public static final String FS_AZURE_READ_AHEAD_QUEUE_DEPTH = "fs.azure.readaheadqueue.depth";
public static final String FS_AZURE_ENABLE_FLUSH = "fs.azure.enable.flush"; public static final String FS_AZURE_ENABLE_FLUSH = "fs.azure.enable.flush";

View File

@ -63,5 +63,7 @@ public final class FileSystemConfigurations {
= SSLSocketFactoryEx.SSLChannelMode.Default; = SSLSocketFactoryEx.SSLChannelMode.Default;
public static final boolean DEFAULT_ENABLE_DELEGATION_TOKEN = false; public static final boolean DEFAULT_ENABLE_DELEGATION_TOKEN = false;
public static final boolean DEFAULT_ENABLE_HTTPS = true;
private FileSystemConfigurations() {} private FileSystemConfigurations() {}
} }

View File

@ -70,7 +70,7 @@ private FileStatus validateStatus(final AzureBlobFileSystem fs, final Path name,
String errorInStatus = "error in " + fileStatus + " from " + fs; String errorInStatus = "error in " + fileStatus + " from " + fs;
// When running with Oauth, the owner and group info retrieved from server will be digit ids. // When running with Oauth, the owner and group info retrieved from server will be digit ids.
if (this.getAuthType() != AuthType.OAuth && !fs.isSecure()) { if (this.getAuthType() != AuthType.OAuth && !fs.isSecureScheme()) {
assertEquals(errorInStatus + ": owner", assertEquals(errorInStatus + ": owner",
fs.getOwnerUser(), fileStatus.getOwner()); fs.getOwnerUser(), fileStatus.getOwner());
assertEquals(errorInStatus + ": group", assertEquals(errorInStatus + ": group",

View File

@ -0,0 +1,101 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azurebfs;
import java.lang.reflect.Field;
import java.net.URL;
import java.util.Arrays;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
import org.apache.hadoop.fs.azurebfs.services.AuthType;
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ALWAYS_USE_HTTPS;
/**
* Parameterized test of ABFS CLIENT URL scheme verification.
*/
@RunWith(Parameterized.class)
public class ITestClientUrlScheme extends AbstractAbfsIntegrationTest{
@Parameterized.Parameter
public boolean useSecureScheme;
@Parameterized.Parameter(1)
public boolean alwaysUseHttps;
@Parameterized.Parameters
public static Iterable<Object[]> params() {
return Arrays.asList(
new Object[][]{
{false, false},
{false, true},
{true, true},
{true, false}
});
}
public ITestClientUrlScheme() throws Exception {
super();
// authentication like OAUTH must use HTTPS
Assume.assumeTrue("ITestClientUrlScheme is skipped because auth type is not SharedKey",
getAuthType() == AuthType.SharedKey);
}
@Test
public void testClientUrlScheme() throws Exception {
String[] urlWithoutScheme = this.getTestUrl().split(":");
String fsUrl;
// update filesystem scheme
if (useSecureScheme) {
fsUrl = FileSystemUriSchemes.ABFS_SECURE_SCHEME + ":" + urlWithoutScheme[1];
} else {
fsUrl = FileSystemUriSchemes.ABFS_SCHEME + ":" + urlWithoutScheme[1];
}
Configuration config = getRawConfiguration();
config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUrl.toString());
config.setBoolean(FS_AZURE_ALWAYS_USE_HTTPS, alwaysUseHttps);
AbfsClient client = this.getFileSystem(config).getAbfsClient();
Field baseUrlField = AbfsClient.class.
getDeclaredField("baseUrl");
baseUrlField.setAccessible(true);
String url = ((URL) baseUrlField.get(client)).toString();
// HTTP is enabled only when "abfs://XXX" is used and FS_AZURE_ALWAYS_USE_HTTPS
// is set as false, otherwise HTTPS should be used.
if (!useSecureScheme && !alwaysUseHttps) {
Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTP_SCHEME));
} else {
Assert.assertTrue(url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME));
}
}
}

View File

@ -0,0 +1,63 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azurebfs;
import java.lang.reflect.Field;
import java.net.URL;
import org.junit.Assume;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
import org.apache.hadoop.fs.azurebfs.services.AbfsClient;
import org.apache.hadoop.fs.azurebfs.services.AuthType;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* Test Oauth fail fast when uri scheme is incorrect.
*/
public class ITestOauthOverAbfsScheme extends AbstractAbfsIntegrationTest {
public ITestOauthOverAbfsScheme() throws Exception {
Assume.assumeTrue("ITestOauthOverAbfsScheme is skipped because auth type is not OAuth",
getAuthType() == AuthType.OAuth);
}
@Test
public void testOauthOverSchemeAbfs() throws Exception {
String[] urlWithoutScheme = this.getTestUrl().split(":");
String fsUrl;
// update filesystem scheme to use abfs://
fsUrl = FileSystemUriSchemes.ABFS_SCHEME + ":" + urlWithoutScheme[1];
Configuration config = getRawConfiguration();
config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUrl.toString());
AbfsClient client = this.getFileSystem(config).getAbfsClient();
Field baseUrlField = AbfsClient.class.
getDeclaredField("baseUrl");
baseUrlField.setAccessible(true);
String url = ((URL) baseUrlField.get(client)).toString();
Assume.assumeTrue("OAuth authentication over scheme abfs must use HTTPS",
url.startsWith(FileSystemUriSchemes.HTTPS_SCHEME));
}
}

View File

@ -56,7 +56,7 @@ public Path getTestPath() {
public String toString() { public String toString() {
final StringBuilder sb = new StringBuilder( final StringBuilder sb = new StringBuilder(
"AbfsFileSystemContract{"); "AbfsFileSystemContract{");
sb.append("isSecure=").append(isSecure); sb.append("isSecureScheme=").append(isSecure);
sb.append(super.toString()); sb.append(super.toString());
sb.append('}'); sb.append('}');
return sb.toString(); return sb.toString();

View File

@ -1,55 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.azurebfs.services;
import java.net.URI;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.azurebfs.constants.FileSystemUriSchemes;
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME;
import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.FS_AZURE_ABFS_ACCOUNT_NAME;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/**
* Test Oauth fail fast when uri scheme is incorrect.
*/
public class TestOauthFailOverHttp {
@Test
public void testOauthFailWithSchemeAbfs() throws Exception {
Configuration conf = new Configuration();
final String account = "fakeaccount.dfs.core.windows.net";
conf.set(FS_AZURE_ABFS_ACCOUNT_NAME, account);
conf.setEnum(FS_AZURE_ACCOUNT_AUTH_TYPE_PROPERTY_NAME, AuthType.OAuth);
URI defaultUri = new URI(FileSystemUriSchemes.ABFS_SCHEME,
"fakecontainer@" + account,
null,
null,
null);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultUri.toString());
// IllegalArgumentException is expected
// when authenticating using Oauth and scheme is not abfss
intercept(IllegalArgumentException.class, "Incorrect URI",
() -> FileSystem.get(conf));
}
}