HDFS-16686. GetJournalEditServlet fails to authorize valid Kerberos request (#4724) (#4794)

This commit is contained in:
Steve Vaughan 2022-09-13 13:50:23 -04:00 committed by GitHub
parent 2532eca013
commit 357c83db94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 275 additions and 165 deletions

View File

@ -27,11 +27,11 @@
import javax.servlet.ServletContext; import javax.servlet.ServletContext;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import org.apache.commons.text.StringEscapeUtils; import org.apache.commons.text.StringEscapeUtils;
import org.apache.hadoop.hdfs.server.namenode.DfsServlet;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -64,7 +64,7 @@
* </ul> * </ul>
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class GetJournalEditServlet extends HttpServlet { public class GetJournalEditServlet extends DfsServlet {
private static final long serialVersionUID = -4635891628211723009L; private static final long serialVersionUID = -4635891628211723009L;
private static final Logger LOG = private static final Logger LOG =
@ -77,17 +77,11 @@ public class GetJournalEditServlet extends HttpServlet {
protected boolean isValidRequestor(HttpServletRequest request, Configuration conf) protected boolean isValidRequestor(HttpServletRequest request, Configuration conf)
throws IOException { throws IOException {
String remotePrincipal = request.getUserPrincipal().getName(); UserGroupInformation ugi = getUGI(request, conf);
String remoteShortName = request.getRemoteUser();
if (remotePrincipal == null) { // This really shouldn't happen...
LOG.warn("Received null remoteUser while authorizing access to " +
"GetJournalEditServlet");
return false;
}
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Validating request made by " + remotePrincipal + LOG.debug("Validating request made by " + ugi.getUserName() +
" / " + remoteShortName + ". This user is: " + " / " + ugi.getShortUserName() + ". This user is: " +
UserGroupInformation.getLoginUser()); UserGroupInformation.getLoginUser());
} }
@ -115,9 +109,9 @@ protected boolean isValidRequestor(HttpServletRequest request, Configuration con
for (String v : validRequestors) { for (String v : validRequestors) {
if (LOG.isDebugEnabled()) if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is comparing to valid requestor: " + v); LOG.debug("isValidRequestor is comparing to valid requestor: " + v);
if (v != null && v.equals(remotePrincipal)) { if (v != null && v.equals(ugi.getUserName())) {
if (LOG.isDebugEnabled()) if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing: " + remotePrincipal); LOG.debug("isValidRequestor is allowing: " + ugi.getUserName());
return true; return true;
} }
} }
@ -125,16 +119,16 @@ protected boolean isValidRequestor(HttpServletRequest request, Configuration con
// Additionally, we compare the short name of the requestor to this JN's // Additionally, we compare the short name of the requestor to this JN's
// username, because we want to allow requests from other JNs during // username, because we want to allow requests from other JNs during
// recovery, but we can't enumerate the full list of JNs. // recovery, but we can't enumerate the full list of JNs.
if (remoteShortName.equals( if (ugi.getShortUserName().equals(
UserGroupInformation.getLoginUser().getShortUserName())) { UserGroupInformation.getLoginUser().getShortUserName())) {
if (LOG.isDebugEnabled()) if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is allowing other JN principal: " + LOG.debug("isValidRequestor is allowing other JN principal: " +
remotePrincipal); ugi.getUserName());
return true; return true;
} }
if (LOG.isDebugEnabled()) if (LOG.isDebugEnabled())
LOG.debug("isValidRequestor is rejecting: " + remotePrincipal); LOG.debug("isValidRequestor is rejecting: " + ugi.getUserName());
return false; return false;
} }

View File

@ -33,6 +33,8 @@
import javax.management.ReflectionException; import javax.management.ReflectionException;
import javax.management.openmbean.CompositeDataSupport; import javax.management.openmbean.CompositeDataSupport;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -80,16 +82,38 @@ public static void runCmd(DFSAdmin dfsadmin, boolean success,
} }
} }
@Rule
public TemporaryFolder folder = new TemporaryFolder();
/**
* Create a default HDFS configuration which has test-specific data directories. This is
* intended to protect against interactions between test runs that might corrupt results. Each
* test run's data is automatically cleaned-up by JUnit.
*
* @return a default configuration with test-specific data directories
*/
public Configuration getHdfsConfiguration() throws IOException {
Configuration conf = new HdfsConfiguration();
// Override the file system locations with test-specific temporary folders
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
folder.newFolder("dfs/name").toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
folder.newFolder("dfs/namesecondary").toString());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
folder.newFolder("dfs/data").toString());
return conf;
}
/** /**
* Test DFSAdmin Upgrade Command. * Test DFSAdmin Upgrade Command.
*/ */
@Test @Test
public void testDFSAdminRollingUpgradeCommands() throws Exception { public void testDFSAdminRollingUpgradeCommands() throws Exception {
// start a cluster // start a cluster
final Configuration conf = new HdfsConfiguration(); final Configuration conf = getHdfsConfiguration();
MiniDFSCluster cluster = null; try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive(); cluster.waitActive();
final Path foo = new Path("/foo"); final Path foo = new Path("/foo");
@ -149,8 +173,6 @@ public void testDFSAdminRollingUpgradeCommands() throws Exception {
Assert.assertTrue(dfs.exists(bar)); Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz)); Assert.assertTrue(dfs.exists(baz));
} }
} finally {
if(cluster != null) cluster.shutdown();
} }
} }
@ -172,8 +194,8 @@ public void testRollingUpgradeWithQJM() throws Exception {
LOG.info("nn1Dir=" + nn1Dir); LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir); LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf = new HdfsConfiguration(); final Configuration conf = getHdfsConfiguration();
final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build(); try (MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build()) {
mjc.waitActive(); mjc.waitActive();
setConf(conf, nn1Dir, mjc); setConf(conf, nn1Dir, mjc);
@ -283,6 +305,7 @@ public void testRollingUpgradeWithQJM() throws Exception {
if (cluster2 != null) cluster2.shutdown(); if (cluster2 != null) cluster2.shutdown();
} }
} }
}
private static CompositeDataSupport getBean() private static CompositeDataSupport getBean()
throws MalformedObjectNameException, MBeanException, throws MalformedObjectNameException, MBeanException,
@ -309,10 +332,8 @@ private static void checkMxBean() throws Exception {
@Test @Test
public void testRollback() throws Exception { public void testRollback() throws Exception {
// start a cluster // start a cluster
final Configuration conf = new HdfsConfiguration(); final Configuration conf = getHdfsConfiguration();
MiniDFSCluster cluster = null; try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
final Path foo = new Path("/foo"); final Path foo = new Path("/foo");
@ -352,8 +373,6 @@ public void testRollback() throws Exception {
startRollingUpgrade(foo, bar, file, data, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
rollbackRollingUpgrade(foo, bar, file, data, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
} finally {
if(cluster != null) cluster.shutdown();
} }
} }
@ -407,10 +426,8 @@ private static void rollbackRollingUpgrade(Path foo, Path bar,
@Test @Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// start a cluster // start a cluster
final Configuration conf = new HdfsConfiguration(); final Configuration conf = getHdfsConfiguration();
MiniDFSCluster cluster = null; try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
final DFSAdmin dfsadmin = new DFSAdmin(conf); final DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);
@ -431,8 +448,6 @@ public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// ping should fail. // ping should fail.
assertEquals(-1, dfsadmin.run(args1)); assertEquals(-1, dfsadmin.run(args1));
} finally {
if (cluster != null) cluster.shutdown();
} }
} }
@ -462,7 +477,7 @@ private void testFinalize(int nnCount) throws Exception {
private void testFinalize(int nnCount, boolean skipImageDeltaCheck) private void testFinalize(int nnCount, boolean skipImageDeltaCheck)
throws Exception { throws Exception {
final Configuration conf = new HdfsConfiguration(); final Configuration conf = getHdfsConfiguration();
MiniQJMHACluster cluster = null; MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo"); final Path foo = new Path("/foo");
final Path bar = new Path("/bar"); final Path bar = new Path("/bar");
@ -528,10 +543,8 @@ public void testQueryWithMultipleNN() throws Exception {
} }
private void testQuery(int nnCount) throws Exception{ private void testQuery(int nnCount) throws Exception{
final Configuration conf = new Configuration(); final Configuration conf = getHdfsConfiguration();
MiniQJMHACluster cluster = null; try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build()) {
try {
cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster(); MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive(); dfsCluster.waitActive();
@ -561,19 +574,13 @@ private void testQuery(int nnCount) throws Exception{
// The NN should have a copy of the fsimage in case of rollbacks. // The NN should have a copy of the fsimage in case of rollbacks.
Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage() Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage()
.hasRollbackFSImage()); .hasRollbackFSImage());
} finally {
if (cluster != null) {
cluster.shutdown();
}
} }
} }
@Test (timeout = 300000) @Test (timeout = 300000)
public void testQueryAfterRestart() throws IOException, InterruptedException { public void testQueryAfterRestart() throws IOException, InterruptedException {
final Configuration conf = new Configuration(); final Configuration conf = getHdfsConfiguration();
MiniDFSCluster cluster = null; try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive(); cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
@ -587,10 +594,6 @@ public void testQueryAfterRestart() throws IOException, InterruptedException {
cluster.restartNameNodes(); cluster.restartNameNodes();
dfs.rollingUpgrade(RollingUpgradeAction.QUERY); dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
} finally {
if (cluster != null) {
cluster.shutdown();
}
} }
} }
@ -606,7 +609,7 @@ public void testCheckpointWithMultipleNN() throws IOException, InterruptedExcept
@Test(timeout = 60000) @Test(timeout = 60000)
public void testRollBackImage() throws Exception { public void testRollBackImage() throws Exception {
final Configuration conf = new Configuration(); final Configuration conf = getHdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2);
@ -651,15 +654,14 @@ public void duringUploadInProgess()
} }
public void testCheckpoint(int nnCount) throws IOException, InterruptedException { public void testCheckpoint(int nnCount) throws IOException, InterruptedException {
final Configuration conf = new Configuration(); final Configuration conf = getHdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo"); final Path foo = new Path("/foo");
try { try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount)
cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build(); .build()) {
MiniDFSCluster dfsCluster = cluster.getDfsCluster(); MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive(); dfsCluster.waitActive();
@ -681,17 +683,14 @@ public void testCheckpoint(int nnCount) throws IOException, InterruptedException
verifyNNCheckpoint(dfsCluster, txid, i); verifyNNCheckpoint(dfsCluster, txid, i);
} }
} finally {
if (cluster != null) {
cluster.shutdown();
}
} }
} }
/** /**
* Verify that the namenode at the given index has an FSImage with a TxId up to txid-1 * Verify that the namenode at the given index has an FSImage with a TxId up to txid-1
*/ */
private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex) throws InterruptedException { private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex)
throws InterruptedException {
int retries = 0; int retries = 0;
while (++retries < 5) { while (++retries < 5) {
NNStorage storage = dfsCluster.getNamesystem(nnIndex).getFSImage() NNStorage storage = dfsCluster.getNamesystem(nnIndex).getFSImage()
@ -732,7 +731,7 @@ public void testCheckpointWithSNN() throws Exception {
SecondaryNameNode snn = null; SecondaryNameNode snn = null;
try { try {
Configuration conf = new HdfsConfiguration(); Configuration conf = getHdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build(); cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive(); cluster.waitActive();

View File

@ -35,7 +35,7 @@
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
public class MiniQJMHACluster { public class MiniQJMHACluster implements AutoCloseable {
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private MiniJournalCluster journalCluster; private MiniJournalCluster journalCluster;
private final Configuration conf; private final Configuration conf;
@ -189,4 +189,15 @@ public void shutdown() throws IOException {
cluster.shutdown(); cluster.shutdown();
journalCluster.shutdown(); journalCluster.shutdown();
} }
@Override
public void close() {
try {
shutdown();
} catch (IOException shutdownFailure) {
LOG.warn("Exception while closing journal cluster", shutdownFailure);
}
}
} }

View File

@ -0,0 +1,106 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.qjournal.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestGetJournalEditServlet {
private final static Configuration CONF = new HdfsConfiguration();
private final static GetJournalEditServlet SERVLET = new GetJournalEditServlet();
@BeforeClass
public static void setUp() throws ServletException {
// Configure Hadoop
CONF.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");
CONF.set(DFSConfigKeys.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1/$2@$0]([nsdj]n/.*@REALM\\.TLD)s/.*/hdfs/\nDEFAULT");
CONF.set(DFSConfigKeys.DFS_NAMESERVICES, "ns");
CONF.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "nn/_HOST@REALM.TLD");
// Configure Kerberos UGI
UserGroupInformation.setConfiguration(CONF);
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(
"jn/somehost@REALM.TLD"));
// Initialize the servlet
ServletConfig config = mock(ServletConfig.class);
SERVLET.init(config);
}
/**
* Unauthenticated user should be rejected.
*
* @throws IOException for unexpected validation failures
*/
@Test
public void testWithoutUser() throws IOException {
// Test: Make a request without specifying a user
HttpServletRequest request = mock(HttpServletRequest.class);
boolean isValid = SERVLET.isValidRequestor(request, CONF);
// Verify: The request is invalid
assertThat(isValid).isFalse();
}
/**
* Namenode requests should be authorized, since it will match the configured namenode.
*
* @throws IOException for unexpected validation failures
*/
@Test
public void testRequestNameNode() throws IOException, ServletException {
// Test: Make a request from a namenode
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getParameter(UserParam.NAME)).thenReturn("nn/localhost@REALM.TLD");
boolean isValid = SERVLET.isValidRequestor(request, CONF);
assertThat(isValid).isTrue();
}
/**
* There is a fallback using the short name, which is used by journalnodes.
*
* @throws IOException for unexpected validation failures
*/
@Test
public void testRequestShortName() throws IOException {
// Test: Make a request from a namenode
HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getParameter(UserParam.NAME)).thenReturn("jn/localhost@REALM.TLD");
boolean isValid = SERVLET.isValidRequestor(request, CONF);
assertThat(isValid).isTrue();
}
}