diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 9dfdd2fe48..06614db5c4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -204,6 +204,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final String HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING = "security.trace.protocol.acl"; + public static final String + HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE = + "security.datanode.lifeline.protocol.acl"; public static final String SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl"; public static final String diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java index 5e534302e6..8c20553938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeLifelineProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; @@ -76,7 +77,10 @@ public class HDFSPolicyProvider extends PolicyProvider { GenericRefreshProtocol.class), new Service( CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING, - TraceAdminProtocol.class) + TraceAdminProtocol.class), + new Service( + CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE, + DatanodeLifelineProtocol.class) }; @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index 6c0783a593..2a14ad4a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -21,6 +21,7 @@ import java.net.InetSocketAddress; import java.net.URL; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -45,9 +46,12 @@ import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.net.NetUtils; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; -class JournalNodeRpcServer implements QJournalProtocol { +@InterfaceAudience.Private +@VisibleForTesting +public class JournalNodeRpcServer implements QJournalProtocol { private static final int HANDLER_COUNT = 5; private final JournalNode jn; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index a63edeaaf9..6b52949868 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -44,6 +44,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationTaskStatus; import org.apache.hadoop.crypto.CryptoProtocolVersion; @@ -208,7 +209,9 @@ * This class is responsible for handling all of the RPC calls to the NameNode. * It is created, started, and stopped by {@link NameNode}. */ -class NameNodeRpcServer implements NamenodeProtocols { +@InterfaceAudience.Private +@VisibleForTesting +public class NameNodeRpcServer implements NamenodeProtocols { private static final Logger LOG = NameNode.LOG; private static final Logger stateChangeLog = NameNode.stateChangeLog; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java new file mode 100644 index 0000000000..95aa89f543 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +import org.apache.commons.lang.ClassUtils; +import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol; +import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer; +import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.security.authorize.Service; + +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test suite covering HDFSPolicyProvider. We expect that it contains a + * security policy definition for every RPC protocol used in HDFS. The test + * suite works by scanning an RPC server's class to find the protocol interfaces + * it implements, and then comparing that to the protocol interfaces covered in + * HDFSPolicyProvider. This is a parameterized test repeated for multiple HDFS + * RPC server classes. + */ +@RunWith(Parameterized.class) +public class TestHDFSPolicyProvider { + + private static final Logger LOG = + LoggerFactory.getLogger(TestHDFSPolicyProvider.class); + + private static List> policyProviderProtocols; + + private static final Comparator> CLASS_NAME_COMPARATOR = + new Comparator>() { + @Override + public int compare(Class lhs, Class rhs) { + return lhs.getName().compareTo(rhs.getName()); + } + }; + + @Rule + public TestName testName = new TestName(); + + private final Class rpcServerClass; + + @BeforeClass + public static void initialize() { + Service[] services = new HDFSPolicyProvider().getServices(); + policyProviderProtocols = new ArrayList<>(services.length); + for (Service service : services) { + policyProviderProtocols.add(service.getProtocol()); + } + Collections.sort(policyProviderProtocols, CLASS_NAME_COMPARATOR); + } + + public TestHDFSPolicyProvider(Class rpcServerClass) { + this.rpcServerClass = rpcServerClass; + } + + @Parameters(name = "protocolsForServer-{0}") + public static List[]> data() { + return Arrays.asList(new Class[][]{ + {NameNodeRpcServer.class}, + {DataNode.class}, + {JournalNodeRpcServer.class} + }); + } + + @Test + public void testPolicyProviderForServer() { + List ifaces = ClassUtils.getAllInterfaces(rpcServerClass); + List> serverProtocols = new ArrayList<>(ifaces.size()); + for (Object obj : ifaces) { + Class iface = (Class)obj; + // ReconfigurationProtocol is not covered in HDFSPolicyProvider + // currently, so we have a special case to skip it. This needs follow-up + // investigation. + if (iface.getSimpleName().endsWith("Protocol") && + iface != ReconfigurationProtocol.class) { + serverProtocols.add(iface); + } + } + Collections.sort(serverProtocols, CLASS_NAME_COMPARATOR); + LOG.info("Running test {} for RPC server {}. Found server protocols {} " + + "and policy provider protocols {}.", testName.getMethodName(), + rpcServerClass.getName(), serverProtocols, policyProviderProtocols); + assertFalse("Expected to find at least one protocol in server.", + serverProtocols.isEmpty()); + assertTrue( + String.format("Expected all protocols for server %s to be defined in " + + "%s. Server contains protocols %s. Policy provider contains " + + "protocols %s.", rpcServerClass.getName(), + HDFSPolicyProvider.class.getName(), serverProtocols, + policyProviderProtocols), + policyProviderProtocols.containsAll(serverProtocols)); + } +}