From a3f3a6c5a32cd8bf9838ed96b6306dfa3518a721 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Tue, 6 Nov 2012 22:05:34 +0000 Subject: [PATCH 1/4] HDFS-4155. libhdfs implementation of hsync API. Contributed by Liang Xie. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406372 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/main/native/libhdfs/hdfs.c | 26 +++++++++++++++++++ .../src/main/native/libhdfs/hdfs.h | 11 ++++++++ .../native/libhdfs/test_libhdfs_threaded.c | 1 + 4 files changed, 40 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6dbd6665d7..948f7defe0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -368,6 +368,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4059. Add number of stale DataNodes to metrics. (Jing Zhao via suresh) + HDFS-4155. libhdfs implementation of hsync API (Liang Xie via todd) + IMPROVEMENTS HDFS-3925. Prettify PipelineAck#toString() for printing to a log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index a180dd24c7..ba980a7a53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -1388,6 +1388,32 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f) return 0; } +int hdfsHSync(hdfsFS fs, hdfsFile f) +{ + //Get the JNIEnv* corresponding to current thread + JNIEnv* env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + + //Sanity check + if (!f || f->type != OUTPUT) { + errno = EBADF; + return -1; + } + + jobject jOutputStream = f->file; + jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream, + HADOOP_OSTRM, "hsync", "()V"); + if (jthr) { + errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hdfsHSync: FSDataOutputStream#hsync"); + return -1; + } + return 0; +} + int hdfsAvailable(hdfsFS fs, hdfsFile f) { // JAVA EQUIVALENT diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h index fa71c8384c..7973e0a5e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h @@ -393,6 +393,17 @@ extern "C" { int hdfsHFlush(hdfsFS fs, hdfsFile file); + /** + * hdfsHSync - Similar to posix fsync, Flush out the data in client's + * user buffer. all the way to the disk device (but the disk may have + * it in its cache). + * @param fs configured filesystem handle + * @param file file handle + * @return 0 on success, -1 on error and sets errno + */ + int hdfsHSync(hdfsFS fs, hdfsFile file); + + /** * hdfsAvailable - Number of bytes that can be read from this * input stream without blocking. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c index d9cb0d9648..c56c89300f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c @@ -150,6 +150,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs) return EIO; } EXPECT_ZERO(hdfsFlush(fs, file)); + EXPECT_ZERO(hdfsHSync(fs, file)); EXPECT_ZERO(hdfsCloseFile(fs, file)); /* Let's re-open the file for reading */ From 251230a12601295849f51083a32d9f996fa353de Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Tue, 6 Nov 2012 22:23:51 +0000 Subject: [PATCH 2/4] HADOOP-9004. Reverting the commit r1406202 to address patch issue git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406379 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 - .../security/SecurityUtilTestHelper.java | 15 -- .../security/TestUGIWithExternalKdc.java | 74 ---------- .../datanode/SecureDataNodeStarter.java | 37 ++--- .../apache/hadoop/hdfs/MiniDFSCluster.java | 44 +----- .../datanode/TestStartSecureDataNode.java | 117 ---------------- .../TestSecureNameNodeWithExternalKdc.java | 129 ------------------ 7 files changed, 21 insertions(+), 398 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 669c9e64d8..e349cb4526 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -129,9 +129,6 @@ Trunk (Unreleased) HADOOP-8776. Provide an option in test-patch that can enable/disable compiling native code. (Chris Nauroth via suresh) - HADOOP-9004. Allow security unit tests to use external KDC. (Stephen Chu - via suresh) - BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java index 8a9ad05c34..7c5f5e1e14 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java @@ -27,19 +27,4 @@ public class SecurityUtilTestHelper { public static void setTokenServiceUseIp(boolean flag) { SecurityUtil.setTokenServiceUseIp(flag); } - - /** - * Return true if externalKdc=true and the location of the krb5.conf - * file has been specified, and false otherwise. - */ - public static boolean isExternalKdcRunning() { - String externalKdc = System.getProperty("externalKdc"); - String krb5Conf = System.getProperty("java.security.krb5.conf"); - if(externalKdc == null || !externalKdc.equals("true") || - krb5Conf == null) { - return false; - } - return true; - } - } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java deleted file mode 100644 index 2f55b11fd1..0000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.security; - -import java.io.IOException; - -import junit.framework.Assert; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; - -/** - * Tests kerberos keytab login using a user-specified external KDC - * - * To run, users must specify the following system properties: - * externalKdc=true - * java.security.krb5.conf - * user.principal - * user.keytab - */ -public class TestUGIWithExternalKdc { - - @Before - public void testExternalKdcRunning() { - Assume.assumeTrue(isExternalKdcRunning()); - } - - @Test - public void testLogin() throws IOException { - String userPrincipal = System.getProperty("user.principal"); - String userKeyTab = System.getProperty("user.keytab"); - Assert.assertNotNull("User principal was not specified", userPrincipal); - Assert.assertNotNull("User keytab was not specified", userKeyTab); - - Configuration conf = new Configuration(); - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); - UserGroupInformation.setConfiguration(conf); - - UserGroupInformation ugi = UserGroupInformation - .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); - - Assert.assertEquals(AuthenticationMethod.KERBEROS, - ugi.getAuthenticationMethod()); - - try { - UserGroupInformation - .loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM", userKeyTab); - Assert.fail("Login should have failed"); - } catch (Exception ex) { - ex.printStackTrace(); - } - } - -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index c5e9c9ca85..bcfcd9f76f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -38,8 +38,6 @@ import javax.net.ssl.SSLServerSocketFactory; -import com.google.common.annotations.VisibleForTesting; - /** * Utility class to start a datanode in a secure cluster, first obtaining * privileged resources before main startup and handing them to the datanode. @@ -75,25 +73,6 @@ public void init(DaemonContext context) throws Exception { // Stash command-line arguments for regular datanode args = context.getArguments(); - sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); - resources = getSecureResources(sslFactory, conf); - } - - @Override - public void start() throws Exception { - System.err.println("Starting regular datanode initialization"); - DataNode.secureMain(args, resources); - } - - @Override public void destroy() { - sslFactory.destroy(); - } - - @Override public void stop() throws Exception { /* Nothing to do */ } - - @VisibleForTesting - public static SecureResources getSecureResources(final SSLFactory sslFactory, - Configuration conf) throws Exception { // Obtain secure port for data streaming to datanode InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, @@ -106,12 +85,13 @@ public static SecureResources getSecureResources(final SSLFactory sslFactory, // Check that we got the port we need if (ss.getLocalPort() != streamingAddr.getPort()) { throw new RuntimeException("Unable to bind on specified streaming port in secure " + - "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); + "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); } // Obtain secure listener for web server Connector listener; if (HttpConfig.isSecure()) { + sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); try { sslFactory.init(); } catch (GeneralSecurityException ex) { @@ -146,7 +126,18 @@ protected SSLServerSocketFactory createFactory() throws Exception { } System.err.println("Opened streaming server at " + streamingAddr); System.err.println("Opened info server at " + infoSocAddr); - return new SecureResources(ss, listener); + resources = new SecureResources(ss, listener); } + @Override + public void start() throws Exception { + System.err.println("Starting regular datanode initialization"); + DataNode.secureMain(args, resources); + } + + @Override public void destroy() { + sslFactory.destroy(); + } + + @Override public void stop() throws Exception { /* Nothing to do */ } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 851b52541f..0c238581ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -81,8 +81,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; -import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter; -import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -97,7 +95,6 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -148,7 +145,6 @@ public static class Builder { private boolean setupHostsFile = false; private MiniDFSNNTopology nnTopology = null; private boolean checkExitOnShutdown = true; - private boolean checkDataNodeAddrConfig = false; private boolean checkDataNodeHostConfig = false; public Builder(Configuration conf) { @@ -267,14 +263,6 @@ public Builder checkExitOnShutdown(boolean val) { return this; } - /** - * Default: false - */ - public Builder checkDataNodeAddrConfig(boolean val) { - this.checkDataNodeAddrConfig = val; - return this; - } - /** * Default: false */ @@ -348,7 +336,6 @@ private MiniDFSCluster(Builder builder) throws IOException { builder.setupHostsFile, builder.nnTopology, builder.checkExitOnShutdown, - builder.checkDataNodeAddrConfig, builder.checkDataNodeHostConfig); } @@ -356,14 +343,11 @@ public class DataNodeProperties { DataNode datanode; Configuration conf; String[] dnArgs; - SecureResources secureResources; - DataNodeProperties(DataNode node, Configuration conf, String[] args, - SecureResources secureResources) { + DataNodeProperties(DataNode node, Configuration conf, String[] args) { this.datanode = node; this.conf = conf; this.dnArgs = args; - this.secureResources = secureResources; } } @@ -589,7 +573,7 @@ public MiniDFSCluster(int nameNodePort, manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false); + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false); } private void initMiniDFSCluster( @@ -600,7 +584,6 @@ private void initMiniDFSCluster( String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, - boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException { ExitUtil.disableSystemExit(); @@ -664,7 +647,7 @@ private void initMiniDFSCluster( // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, - hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); + hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); @@ -1178,18 +1161,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, if (hosts != null) { NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); } - - SecureResources secureResources = null; - if (UserGroupInformation.isSecurityEnabled()) { - SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf); - try { - secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf); - } catch (Exception ex) { - ex.printStackTrace(); - } - } - DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, - secureResources); + DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); @@ -1204,7 +1176,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, racks[i-curDatanodesNum]); } dn.runDatanodeDaemon(); - dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources)); + dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs)); } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; @@ -1635,16 +1607,14 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException { Configuration conf = dnprop.conf; String[] args = dnprop.dnArgs; - SecureResources secureResources = dnprop.secureResources; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getXferAddress(); conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); } - dataNodes.add(new DataNodeProperties( - DataNode.createDataNode(args, conf, secureResources), - newconf, args, secureResources)); + dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), + newconf, args)); numDataNodes++; return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java deleted file mode 100644 index d1b2d668de..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; - -/** - * This test starts a 1 NameNode 1 DataNode MiniDFSCluster with - * kerberos authentication enabled using user-specified KDC, - * principals, and keytabs. - * - * A secure DataNode has to be started by root, so this test needs to - * be run by root. - * - * To run, users must specify the following system properties: - * externalKdc=true - * java.security.krb5.conf - * dfs.namenode.kerberos.principal - * dfs.namenode.kerberos.internal.spnego.principal - * dfs.namenode.keytab.file - * dfs.datanode.kerberos.principal - * dfs.datanode.keytab.file - */ -public class TestStartSecureDataNode { - final static private int NUM_OF_DATANODES = 1; - - @Before - public void testExternalKdcRunning() { - // Tests are skipped if external KDC is not running. - Assume.assumeTrue(isExternalKdcRunning()); - } - - @Test - public void testSecureNameNode() throws IOException, InterruptedException { - MiniDFSCluster cluster = null; - try { - String nnPrincipal = - System.getProperty("dfs.namenode.kerberos.principal"); - String nnSpnegoPrincipal = - System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); - String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); - assertNotNull("NameNode principal was not specified", nnPrincipal); - assertNotNull("NameNode SPNEGO principal was not specified", - nnSpnegoPrincipal); - assertNotNull("NameNode keytab was not specified", nnKeyTab); - - String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal"); - String dnKeyTab = System.getProperty("dfs.datanode.keytab.file"); - assertNotNull("DataNode principal was not specified", dnPrincipal); - assertNotNull("DataNode keytab was not specified", dnKeyTab); - - Configuration conf = new HdfsConfiguration(); - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); - conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); - conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - nnSpnegoPrincipal); - conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); - conf.set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, dnPrincipal); - conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab); - // Secure DataNode requires using ports lower than 1024. - conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004"); - conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006"); - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700"); - - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(NUM_OF_DATANODES) - .checkDataNodeAddrConfig(true) - .build(); - cluster.waitActive(); - assertTrue(cluster.isDataNodeUp()); - - } catch (Exception ex) { - ex.printStackTrace(); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java deleted file mode 100644 index e98e112c63..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; - -/** - * This test brings up a MiniDFSCluster with 1 NameNode and 0 - * DataNodes with kerberos authentication enabled using user-specified - * KDC, principals, and keytabs. - * - * To run, users must specify the following system properties: - * externalKdc=true - * java.security.krb5.conf - * dfs.namenode.kerberos.principal - * dfs.namenode.kerberos.internal.spnego.principal - * dfs.namenode.keytab.file - * user.principal (do not specify superuser!) - * user.keytab - */ -public class TestSecureNameNodeWithExternalKdc { - final static private int NUM_OF_DATANODES = 0; - - @Before - public void testExternalKdcRunning() { - // Tests are skipped if external KDC is not running. - Assume.assumeTrue(isExternalKdcRunning()); - } - - @Test - public void testSecureNameNode() throws IOException, InterruptedException { - MiniDFSCluster cluster = null; - try { - String nnPrincipal = - System.getProperty("dfs.namenode.kerberos.principal"); - String nnSpnegoPrincipal = - System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); - String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); - assertNotNull("NameNode principal was not specified", nnPrincipal); - assertNotNull("NameNode SPNEGO principal was not specified", - nnSpnegoPrincipal); - assertNotNull("NameNode keytab was not specified", nnKeyTab); - - Configuration conf = new HdfsConfiguration(); - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); - conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); - conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - nnSpnegoPrincipal); - conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); - - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES) - .build(); - final MiniDFSCluster clusterRef = cluster; - cluster.waitActive(); - FileSystem fsForCurrentUser = cluster.getFileSystem(); - fsForCurrentUser.mkdirs(new Path("/tmp")); - fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission( - (short) 511)); - - // The user specified should not be a superuser - String userPrincipal = System.getProperty("user.principal"); - String userKeyTab = System.getProperty("user.keytab"); - assertNotNull("User principal was not specified", userPrincipal); - assertNotNull("User keytab was not specified", userKeyTab); - - UserGroupInformation ugi = UserGroupInformation - .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); - FileSystem fs = ugi.doAs(new PrivilegedExceptionAction() { - @Override - public FileSystem run() throws Exception { - return clusterRef.getFileSystem(); - } - }); - try { - Path p = new Path("/users"); - fs.mkdirs(p); - fail("User must not be allowed to write in /"); - } catch (IOException expected) { - } - - Path p = new Path("/tmp/alpha"); - fs.mkdirs(p); - assertNotNull(fs.listStatus(p)); - assertEquals(AuthenticationMethod.KERBEROS, - ugi.getAuthenticationMethod()); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } -} From 1e7010cf38115604d6fa3aa5728362c86644e66a Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 6 Nov 2012 22:34:29 +0000 Subject: [PATCH 3/4] HDFS-3979. For hsync, datanode should wait for the local sync to complete before sending ack. Contributed by Lars Hofhansl git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406382 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/datanode/BlockReceiver.java | 37 ++++++++++++------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 948f7defe0..b2766bd3b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -558,6 +558,9 @@ Release 2.0.3-alpha - Unreleased HDFS-1331. dfs -test should work like /bin/test (Andy Isaacson via daryn) + HDFS-3979. For hsync, datanode should wait for the local sync to complete + before sending ack. (Lars Hofhansl via szetszwo) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 0f1ccb9435..6995fd2d4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -319,9 +319,6 @@ public void close() throws IOException { * @throws IOException */ void flushOrSync(boolean isSync) throws IOException { - if (isSync && (out != null || checksumOut != null)) { - datanode.metrics.incrFsyncCount(); - } long flushTotalNanos = 0; if (checksumOut != null) { long flushStartNanos = System.nanoTime(); @@ -347,6 +344,9 @@ void flushOrSync(boolean isSync) throws IOException { } if (checksumOut != null || out != null) { datanode.metrics.addFlushNanos(flushTotalNanos); + if (isSync) { + datanode.metrics.incrFsyncCount(); + } } } @@ -438,8 +438,10 @@ private int receivePacket() throws IOException { int len = header.getDataLen(); boolean syncBlock = header.getSyncBlock(); - // make sure the block gets sync'ed upon close - this.syncOnClose |= syncBlock && lastPacketInBlock; + // avoid double sync'ing on close + if (syncBlock && lastPacketInBlock) { + this.syncOnClose = false; + } // update received bytes long firstByteInBlock = offsetInBlock; @@ -448,11 +450,11 @@ private int receivePacket() throws IOException { replicaInfo.setNumBytes(offsetInBlock); } - // put in queue for pending acks - if (responder != null) { - ((PacketResponder)responder.getRunnable()).enqueue(seqno, - lastPacketInBlock, offsetInBlock); - } + // put in queue for pending acks, unless sync was requested + if (responder != null && !syncBlock) { + ((PacketResponder) responder.getRunnable()).enqueue(seqno, + lastPacketInBlock, offsetInBlock); + } //First write the packet to the mirror: if (mirrorOut != null && !mirrorError) { @@ -471,8 +473,8 @@ private int receivePacket() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Receiving an empty packet or the end of the block " + block); } - // flush unless close() would flush anyway - if (syncBlock && !lastPacketInBlock) { + // sync block if requested + if (syncBlock) { flushOrSync(true); } } else { @@ -563,8 +565,8 @@ private int receivePacket() throws IOException { checksumBuf.arrayOffset() + checksumBuf.position(), checksumLen); } - /// flush entire packet, sync unless close() will sync - flushOrSync(syncBlock && !lastPacketInBlock); + /// flush entire packet, sync if requested + flushOrSync(syncBlock); replicaInfo.setLastChecksumAndDataLen( offsetInBlock, lastChunkChecksum @@ -580,6 +582,13 @@ private int receivePacket() throws IOException { } } + // if sync was requested, put in queue for pending acks here + // (after the fsync finished) + if (responder != null && syncBlock) { + ((PacketResponder) responder.getRunnable()).enqueue(seqno, + lastPacketInBlock, offsetInBlock); + } + if (throttler != null) { // throttle I/O throttler.throttle(len); } From c013142a12692df90f3b3bc5878918f2c9f8c55e Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Wed, 7 Nov 2012 00:48:17 +0000 Subject: [PATCH 4/4] HADOOP-9004. Allow security unit tests to use external KDC. Contributed by Stephen Chu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406413 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../security/SecurityUtilTestHelper.java | 15 ++ .../security/TestUGIWithExternalKdc.java | 74 ++++++++++ .../datanode/SecureDataNodeStarter.java | 37 +++-- .../apache/hadoop/hdfs/MiniDFSCluster.java | 44 +++++- .../datanode/TestStartSecureDataNode.java | 117 ++++++++++++++++ .../TestSecureNameNodeWithExternalKdc.java | 129 ++++++++++++++++++ 7 files changed, 398 insertions(+), 21 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e349cb4526..669c9e64d8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -129,6 +129,9 @@ Trunk (Unreleased) HADOOP-8776. Provide an option in test-patch that can enable/disable compiling native code. (Chris Nauroth via suresh) + HADOOP-9004. Allow security unit tests to use external KDC. (Stephen Chu + via suresh) + BUG FIXES HADOOP-8177. MBeans shouldn't try to register when it fails to create MBeanName. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java index 7c5f5e1e14..8a9ad05c34 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java @@ -27,4 +27,19 @@ public class SecurityUtilTestHelper { public static void setTokenServiceUseIp(boolean flag) { SecurityUtil.setTokenServiceUseIp(flag); } + + /** + * Return true if externalKdc=true and the location of the krb5.conf + * file has been specified, and false otherwise. + */ + public static boolean isExternalKdcRunning() { + String externalKdc = System.getProperty("externalKdc"); + String krb5Conf = System.getProperty("java.security.krb5.conf"); + if(externalKdc == null || !externalKdc.equals("true") || + krb5Conf == null) { + return false; + } + return true; + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java new file mode 100644 index 0000000000..2f55b11fd1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.security; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * Tests kerberos keytab login using a user-specified external KDC + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * user.principal + * user.keytab + */ +public class TestUGIWithExternalKdc { + + @Before + public void testExternalKdcRunning() { + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testLogin() throws IOException { + String userPrincipal = System.getProperty("user.principal"); + String userKeyTab = System.getProperty("user.keytab"); + Assert.assertNotNull("User principal was not specified", userPrincipal); + Assert.assertNotNull("User keytab was not specified", userKeyTab); + + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); + + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); + + Assert.assertEquals(AuthenticationMethod.KERBEROS, + ugi.getAuthenticationMethod()); + + try { + UserGroupInformation + .loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM", userKeyTab); + Assert.fail("Login should have failed"); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index bcfcd9f76f..c5e9c9ca85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -38,6 +38,8 @@ import javax.net.ssl.SSLServerSocketFactory; +import com.google.common.annotations.VisibleForTesting; + /** * Utility class to start a datanode in a secure cluster, first obtaining * privileged resources before main startup and handing them to the datanode. @@ -73,6 +75,25 @@ public void init(DaemonContext context) throws Exception { // Stash command-line arguments for regular datanode args = context.getArguments(); + sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); + resources = getSecureResources(sslFactory, conf); + } + + @Override + public void start() throws Exception { + System.err.println("Starting regular datanode initialization"); + DataNode.secureMain(args, resources); + } + + @Override public void destroy() { + sslFactory.destroy(); + } + + @Override public void stop() throws Exception { /* Nothing to do */ } + + @VisibleForTesting + public static SecureResources getSecureResources(final SSLFactory sslFactory, + Configuration conf) throws Exception { // Obtain secure port for data streaming to datanode InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, @@ -85,13 +106,12 @@ public void init(DaemonContext context) throws Exception { // Check that we got the port we need if (ss.getLocalPort() != streamingAddr.getPort()) { throw new RuntimeException("Unable to bind on specified streaming port in secure " + - "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); + "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort()); } // Obtain secure listener for web server Connector listener; if (HttpConfig.isSecure()) { - sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); try { sslFactory.init(); } catch (GeneralSecurityException ex) { @@ -126,18 +146,7 @@ protected SSLServerSocketFactory createFactory() throws Exception { } System.err.println("Opened streaming server at " + streamingAddr); System.err.println("Opened info server at " + infoSocAddr); - resources = new SecureResources(ss, listener); + return new SecureResources(ss, listener); } - @Override - public void start() throws Exception { - System.err.println("Starting regular datanode initialization"); - DataNode.secureMain(args, resources); - } - - @Override public void destroy() { - sslFactory.destroy(); - } - - @Override public void stop() throws Exception { /* Nothing to do */ } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 0c238581ba..851b52541f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -81,6 +81,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataStorage; +import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter; +import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -95,6 +97,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -145,6 +148,7 @@ public static class Builder { private boolean setupHostsFile = false; private MiniDFSNNTopology nnTopology = null; private boolean checkExitOnShutdown = true; + private boolean checkDataNodeAddrConfig = false; private boolean checkDataNodeHostConfig = false; public Builder(Configuration conf) { @@ -263,6 +267,14 @@ public Builder checkExitOnShutdown(boolean val) { return this; } + /** + * Default: false + */ + public Builder checkDataNodeAddrConfig(boolean val) { + this.checkDataNodeAddrConfig = val; + return this; + } + /** * Default: false */ @@ -336,6 +348,7 @@ private MiniDFSCluster(Builder builder) throws IOException { builder.setupHostsFile, builder.nnTopology, builder.checkExitOnShutdown, + builder.checkDataNodeAddrConfig, builder.checkDataNodeHostConfig); } @@ -343,11 +356,14 @@ public class DataNodeProperties { DataNode datanode; Configuration conf; String[] dnArgs; + SecureResources secureResources; - DataNodeProperties(DataNode node, Configuration conf, String[] args) { + DataNodeProperties(DataNode node, Configuration conf, String[] args, + SecureResources secureResources) { this.datanode = node; this.conf = conf; this.dnArgs = args; + this.secureResources = secureResources; } } @@ -573,7 +589,7 @@ public MiniDFSCluster(int nameNodePort, manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, operation, racks, hosts, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false); + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false); } private void initMiniDFSCluster( @@ -584,6 +600,7 @@ private void initMiniDFSCluster( String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, + boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException { ExitUtil.disableSystemExit(); @@ -647,7 +664,7 @@ private void initMiniDFSCluster( // Start the DataNodes startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks, - hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig); + hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); waitClusterUp(); //make sure ProxyUsers uses the latest conf ProxyUsers.refreshSuperUserGroupsConfiguration(conf); @@ -1161,7 +1178,18 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, if (hosts != null) { NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost"); } - DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf); + + SecureResources secureResources = null; + if (UserGroupInformation.isSecurityEnabled()) { + SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, dnConf); + try { + secureResources = SecureDataNodeStarter.getSecureResources(sslFactory, dnConf); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, + secureResources); if(dn == null) throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY)); @@ -1176,7 +1204,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, racks[i-curDatanodesNum]); } dn.runDatanodeDaemon(); - dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs)); + dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources)); } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; @@ -1607,14 +1635,16 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException { Configuration conf = dnprop.conf; String[] args = dnprop.dnArgs; + SecureResources secureResources = dnprop.secureResources; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { InetSocketAddress addr = dnprop.datanode.getXferAddress(); conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort()); } - dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), - newconf, args)); + dataNodes.add(new DataNodeProperties( + DataNode.createDataNode(args, conf, secureResources), + newconf, args, secureResources)); numDataNodes++; return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java new file mode 100644 index 0000000000..ba5587276c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStartSecureDataNode.java @@ -0,0 +1,117 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * This test starts a 1 NameNode 1 DataNode MiniDFSCluster with + * kerberos authentication enabled using user-specified KDC, + * principals, and keytabs. + * + * A secure DataNode has to be started by root, so this test needs to + * be run by root. + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * dfs.namenode.kerberos.principal + * dfs.namenode.kerberos.internal.spnego.principal + * dfs.namenode.keytab.file + * dfs.datanode.kerberos.principal + * dfs.datanode.keytab.file + */ +public class TestStartSecureDataNode { + final static private int NUM_OF_DATANODES = 1; + + @Before + public void testExternalKdcRunning() { + // Tests are skipped if external KDC is not running. + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testSecureNameNode() throws IOException, InterruptedException { + MiniDFSCluster cluster = null; + try { + String nnPrincipal = + System.getProperty("dfs.namenode.kerberos.principal"); + String nnSpnegoPrincipal = + System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); + String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); + assertNotNull("NameNode principal was not specified", nnPrincipal); + assertNotNull("NameNode SPNEGO principal was not specified", + nnSpnegoPrincipal); + assertNotNull("NameNode keytab was not specified", nnKeyTab); + + String dnPrincipal = System.getProperty("dfs.datanode.kerberos.principal"); + String dnKeyTab = System.getProperty("dfs.datanode.keytab.file"); + assertNotNull("DataNode principal was not specified", dnPrincipal); + assertNotNull("DataNode keytab was not specified", dnKeyTab); + + Configuration conf = new HdfsConfiguration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + nnSpnegoPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); + conf.set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, dnPrincipal); + conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, dnKeyTab); + // Secure DataNode requires using ports lower than 1024. + conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:1004"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:1006"); + conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, "700"); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_OF_DATANODES) + .checkDataNodeAddrConfig(true) + .build(); + cluster.waitActive(); + assertTrue(cluster.isDataNodeUp()); + + } catch (Exception ex) { + ex.printStackTrace(); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java new file mode 100644 index 0000000000..e98e112c63 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNodeWithExternalKdc.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning; +import org.junit.Assume; +import org.junit.Before; +import org.junit.Test; + +/** + * This test brings up a MiniDFSCluster with 1 NameNode and 0 + * DataNodes with kerberos authentication enabled using user-specified + * KDC, principals, and keytabs. + * + * To run, users must specify the following system properties: + * externalKdc=true + * java.security.krb5.conf + * dfs.namenode.kerberos.principal + * dfs.namenode.kerberos.internal.spnego.principal + * dfs.namenode.keytab.file + * user.principal (do not specify superuser!) + * user.keytab + */ +public class TestSecureNameNodeWithExternalKdc { + final static private int NUM_OF_DATANODES = 0; + + @Before + public void testExternalKdcRunning() { + // Tests are skipped if external KDC is not running. + Assume.assumeTrue(isExternalKdcRunning()); + } + + @Test + public void testSecureNameNode() throws IOException, InterruptedException { + MiniDFSCluster cluster = null; + try { + String nnPrincipal = + System.getProperty("dfs.namenode.kerberos.principal"); + String nnSpnegoPrincipal = + System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); + String nnKeyTab = System.getProperty("dfs.namenode.keytab.file"); + assertNotNull("NameNode principal was not specified", nnPrincipal); + assertNotNull("NameNode SPNEGO principal was not specified", + nnSpnegoPrincipal); + assertNotNull("NameNode keytab was not specified", nnKeyTab); + + Configuration conf = new HdfsConfiguration(); + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, nnPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, + nnSpnegoPrincipal); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, nnKeyTab); + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES) + .build(); + final MiniDFSCluster clusterRef = cluster; + cluster.waitActive(); + FileSystem fsForCurrentUser = cluster.getFileSystem(); + fsForCurrentUser.mkdirs(new Path("/tmp")); + fsForCurrentUser.setPermission(new Path("/tmp"), new FsPermission( + (short) 511)); + + // The user specified should not be a superuser + String userPrincipal = System.getProperty("user.principal"); + String userKeyTab = System.getProperty("user.keytab"); + assertNotNull("User principal was not specified", userPrincipal); + assertNotNull("User keytab was not specified", userKeyTab); + + UserGroupInformation ugi = UserGroupInformation + .loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab); + FileSystem fs = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return clusterRef.getFileSystem(); + } + }); + try { + Path p = new Path("/users"); + fs.mkdirs(p); + fail("User must not be allowed to write in /"); + } catch (IOException expected) { + } + + Path p = new Path("/tmp/alpha"); + fs.mkdirs(p); + assertNotNull(fs.listStatus(p)); + assertEquals(AuthenticationMethod.KERBEROS, + ugi.getAuthenticationMethod()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +}