From f40e10b349ea3f67ad2a15911af5a12bcee38294 Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Tue, 15 Jan 2019 16:40:39 +0530 Subject: [PATCH] HDFS-14129. RBF: Create new policy provider for router. Contributed by Ranith Sardar. --- .../hadoop-common/src/main/conf/hadoop-policy.xml | 10 ++++++++++ .../org/apache/hadoop/fs/CommonConfigurationKeys.java | 2 ++ .../org/apache/hadoop/hdfs/protocol/HdfsConstants.java | 5 +++++ .../hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java | 6 +++--- .../server/federation/router/RouterAdminServer.java | 10 ++++------ .../hdfs/server/federation/router/RouterRpcServer.java | 4 ++-- .../hadoop/fs/contract/router/RouterHDFSContract.java | 4 ++++ 7 files changed, 30 insertions(+), 11 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml index bd7c11124f..e1640f9754 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -109,6 +109,16 @@ active and stand-by states of namenode. + + security.router.admin.protocol.acl + * + ACL for RouterAdmin Protocol. The ACL is a comma-separated + list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + security.zkfc.protocol.acl * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 384e5d1e5f..2e6b132d74 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -218,6 +218,8 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl"; public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL = "security.client.datanode.protocol.acl"; + public static final String SECURITY_ROUTER_ADMIN_PROTOCOL_ACL = + "security.router.admin.protocol.acl"; public static final String SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl"; public static final String diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 6de186a1bc..c449a2e0d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -92,6 +92,11 @@ public final class HdfsConstants { */ public static final String CLIENT_NAMENODE_PROTOCOL_NAME = "org.apache.hadoop.hdfs.protocol.ClientProtocol"; + /** + * Router admin Protocol Names. + */ + public static final String ROUTER_ADMIN_PROTOCOL_NAME = + "org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol"; // Timeouts for communicating with DataNode for streaming writes/reads public static final int READ_TIMEOUT = 60 * 1000; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java index 96fa794183..d308616ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java @@ -19,10 +19,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.TokenInfo; @@ -35,9 +35,9 @@ @InterfaceAudience.Private @InterfaceStability.Stable @KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) + serverPrincipal = RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY) @TokenInfo(DelegationTokenSelector.class) -@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME, +@ProtocolInfo(protocolName = HdfsConstants.ROUTER_ADMIN_PROTOCOL_NAME, protocolVersion = 1) public interface RouterAdminProtocolPB extends RouterAdminProtocolService.BlockingInterface { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 027dd11144..e2d944c4d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -29,16 +29,16 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; +import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; -import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache; @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; @@ -89,8 +88,7 @@ * router. It is created, started, and stopped by {@link Router}. */ public class RouterAdminServer extends AbstractService - implements MountTableManager, RouterStateManager, NameserviceManager, - GenericRefreshProtocol { + implements RouterAdminProtocol { private static final Logger LOG = LoggerFactory.getLogger(RouterAdminServer.class); @@ -159,7 +157,7 @@ public RouterAdminServer(Configuration conf, Router router) // Set service-level authorization security policy if (conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false)) { - this.adminServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + this.adminServer.refreshServiceAcl(conf, new RouterPolicyProvider()); } // The RPC-server port can be ephemeral... ensure we have the correct info diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index ad5980b8d3..0d4f94c5a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -62,7 +62,6 @@ import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -103,6 +102,7 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -275,7 +275,7 @@ public RouterRpcServer(Configuration configuration, Router router, this.serviceAuthEnabled = conf.getBoolean( HADOOP_SECURITY_AUTHORIZATION, false); if (this.serviceAuthEnabled) { - rpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); + rpcServer.refreshServiceAcl(conf, new RouterPolicyProvider()); } // We don't want the server to log the full stack trace for some exceptions diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java index 510cb95ee1..46339a388b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/RouterHDFSContract.java @@ -90,6 +90,10 @@ public static MiniDFSCluster getCluster() { return cluster.getCluster(); } + public static MiniRouterDFSCluster getRouterCluster() { + return cluster; + } + public static FileSystem getFileSystem() throws IOException { //assumes cluster is not null Assert.assertNotNull("cluster not created", cluster);