HDFS-14129. RBF: Create new policy provider for router. Contributed by Ranith Sardar.

This commit is contained in:
Surendra Singh Lilhore 2019-01-15 16:40:39 +05:30 committed by Brahma Reddy Battula
parent 221f24cbdc
commit f40e10b349
7 changed files with 30 additions and 11 deletions

View File

@ -109,6 +109,16 @@
active and stand-by states of namenode.</description> active and stand-by states of namenode.</description>
</property> </property>
<property>
<name>security.router.admin.protocol.acl</name>
<value>*</value>
<description>ACL for RouterAdmin Protocol. The ACL is a comma-separated
list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.
</description>
</property>
<property> <property>
<name>security.zkfc.protocol.acl</name> <name>security.zkfc.protocol.acl</name>
<value>*</value> <value>*</value>

View File

@ -218,6 +218,8 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl"; SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl";
public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL = public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL =
"security.client.datanode.protocol.acl"; "security.client.datanode.protocol.acl";
public static final String SECURITY_ROUTER_ADMIN_PROTOCOL_ACL =
"security.router.admin.protocol.acl";
public static final String public static final String
SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl"; SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl";
public static final String public static final String

View File

@ -92,6 +92,11 @@ public final class HdfsConstants {
*/ */
public static final String CLIENT_NAMENODE_PROTOCOL_NAME = public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
"org.apache.hadoop.hdfs.protocol.ClientProtocol"; "org.apache.hadoop.hdfs.protocol.ClientProtocol";
/**
* Router admin Protocol Names.
*/
public static final String ROUTER_ADMIN_PROTOCOL_NAME =
"org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol";
// Timeouts for communicating with DataNode for streaming writes/reads // Timeouts for communicating with DataNode for streaming writes/reads
public static final int READ_TIMEOUT = 60 * 1000; public static final int READ_TIMEOUT = 60 * 1000;

View File

@ -19,10 +19,10 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenInfo;
@ -35,9 +35,9 @@
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Stable @InterfaceStability.Stable
@KerberosInfo( @KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) serverPrincipal = RBFConfigKeys.DFS_ROUTER_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(DelegationTokenSelector.class) @TokenInfo(DelegationTokenSelector.class)
@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME, @ProtocolInfo(protocolName = HdfsConstants.ROUTER_ADMIN_PROTOCOL_NAME,
protocolVersion = 1) protocolVersion = 1)
public interface RouterAdminProtocolPB extends public interface RouterAdminProtocolPB extends
RouterAdminProtocolService.BlockingInterface { RouterAdminProtocolService.BlockingInterface {

View File

@ -29,16 +29,16 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService; import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocol;
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore; import org.apache.hadoop.hdfs.server.federation.store.DisabledNameserviceStore;
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore; import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache; import org.apache.hadoop.hdfs.server.federation.store.StateStoreCache;
@ -66,7 +66,6 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.ipc.GenericRefreshProtocol;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.ipc.RPC.Server;
@ -89,8 +88,7 @@
* router. It is created, started, and stopped by {@link Router}. * router. It is created, started, and stopped by {@link Router}.
*/ */
public class RouterAdminServer extends AbstractService public class RouterAdminServer extends AbstractService
implements MountTableManager, RouterStateManager, NameserviceManager, implements RouterAdminProtocol {
GenericRefreshProtocol {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(RouterAdminServer.class); LoggerFactory.getLogger(RouterAdminServer.class);
@ -159,7 +157,7 @@ public RouterAdminServer(Configuration conf, Router router)
// Set service-level authorization security policy // Set service-level authorization security policy
if (conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false)) { if (conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false)) {
this.adminServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); this.adminServer.refreshServiceAcl(conf, new RouterPolicyProvider());
} }
// The RPC-server port can be ephemeral... ensure we have the correct info // The RPC-server port can be ephemeral... ensure we have the correct info

View File

@ -62,7 +62,6 @@
import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@ -103,6 +102,7 @@
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.RouterPolicyProvider;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -275,7 +275,7 @@ public RouterRpcServer(Configuration configuration, Router router,
this.serviceAuthEnabled = conf.getBoolean( this.serviceAuthEnabled = conf.getBoolean(
HADOOP_SECURITY_AUTHORIZATION, false); HADOOP_SECURITY_AUTHORIZATION, false);
if (this.serviceAuthEnabled) { if (this.serviceAuthEnabled) {
rpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider()); rpcServer.refreshServiceAcl(conf, new RouterPolicyProvider());
} }
// We don't want the server to log the full stack trace for some exceptions // We don't want the server to log the full stack trace for some exceptions

View File

@ -90,6 +90,10 @@ public static MiniDFSCluster getCluster() {
return cluster.getCluster(); return cluster.getCluster();
} }
public static MiniRouterDFSCluster getRouterCluster() {
return cluster;
}
public static FileSystem getFileSystem() throws IOException { public static FileSystem getFileSystem() throws IOException {
//assumes cluster is not null //assumes cluster is not null
Assert.assertNotNull("cluster not created", cluster); Assert.assertNotNull("cluster not created", cluster);