diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a02ab0fb27..ee76afad94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -244,6 +244,8 @@ Branch-2 ( Unreleased changes ) HDFS-3516. Check content-type in WebHdfsFileSystem. (szetszwo) + HDFS-3535. Audit logging should log denied accesses. (Andy Isaacson via eli) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f965ed04d2..9531ee2adf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -240,8 +240,15 @@ protected StringBuilder initialValue() { private static final void logAuditEvent(UserGroupInformation ugi, InetAddress addr, String cmd, String src, String dst, HdfsFileStatus stat) { + logAuditEvent(true, ugi, addr, cmd, src, dst, stat); + } + + private static final void logAuditEvent(boolean succeeded, + UserGroupInformation ugi, InetAddress addr, String cmd, String src, + String dst, HdfsFileStatus stat) { final StringBuilder sb = auditBuffer.get(); sb.setLength(0); + sb.append("allowed=").append(succeeded).append("\t"); sb.append("ugi=").append(ugi).append("\t"); sb.append("ip=").append(addr).append("\t"); sb.append("cmd=").append(cmd).append("\t"); @@ -1018,6 +1025,21 @@ private boolean isAccessTimeSupported() { void setPermission(String src, FsPermission permission) throws AccessControlException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { + try { + setPermissionInt(src, permission); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "setPermission", src, null, null); + } + throw e; + } + } + + private void setPermissionInt(String src, FsPermission permission) + throws AccessControlException, FileNotFoundException, SafeModeException, + UnresolvedLinkException, IOException { HdfsFileStatus resultingStat = null; writeLock(); try { @@ -1049,6 +1071,21 @@ void setPermission(String src, FsPermission permission) void setOwner(String src, String username, String group) throws AccessControlException, FileNotFoundException, SafeModeException, UnresolvedLinkException, IOException { + try { + setOwnerInt(src, username, group); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "setOwner", src, null, null); + } + throw e; + } + } + + private void setOwnerInt(String src, String username, String group) + throws AccessControlException, FileNotFoundException, SafeModeException, + UnresolvedLinkException, IOException { HdfsFileStatus resultingStat = null; writeLock(); try { @@ -1106,6 +1143,22 @@ LocatedBlocks getBlockLocations(String clientMachine, String src, LocatedBlocks getBlockLocations(String src, long offset, long length, boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode) throws FileNotFoundException, UnresolvedLinkException, IOException { + try { + return getBlockLocationsInt(src, offset, length, doAccessTime, + needBlockToken, checkSafeMode); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "open", src, null, null); + } + throw e; + } + } + + private LocatedBlocks getBlockLocationsInt(String src, long offset, long length, + boolean doAccessTime, boolean needBlockToken, boolean checkSafeMode) + throws FileNotFoundException, UnresolvedLinkException, IOException { if (isPermissionEnabled) { checkPathAccess(src, FsAction.READ); } @@ -1202,6 +1255,20 @@ private LocatedBlocks getBlockLocationsUpdateTimes(String src, */ void concat(String target, String [] srcs) throws IOException, UnresolvedLinkException { + try { + concatInt(target, srcs); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getLoginUser(), + Server.getRemoteIp(), + "concat", Arrays.toString(srcs), target, null); + } + throw e; + } + } + + private void concatInt(String target, String [] srcs) + throws IOException, UnresolvedLinkException { if(FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("concat " + Arrays.toString(srcs) + " to " + target); @@ -1354,6 +1421,20 @@ private void concatInternal(String target, String [] srcs) * written to the edits log but is not flushed. */ void setTimes(String src, long mtime, long atime) + throws IOException, UnresolvedLinkException { + try { + setTimesInt(src, mtime, atime); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "setTimes", src, null, null); + } + throw e; + } + } + + private void setTimesInt(String src, long mtime, long atime) throws IOException, UnresolvedLinkException { if (!isAccessTimeSupported() && atime != -1) { throw new IOException("Access time for hdfs is not configured. " + @@ -1390,6 +1471,21 @@ void setTimes(String src, long mtime, long atime) void createSymlink(String target, String link, PermissionStatus dirPerms, boolean createParent) throws IOException, UnresolvedLinkException { + try { + createSymlinkInt(target, link, dirPerms, createParent); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "createSymlink", link, target, null); + } + throw e; + } + } + + private void createSymlinkInt(String target, String link, + PermissionStatus dirPerms, boolean createParent) + throws IOException, UnresolvedLinkException { HdfsFileStatus resultingStat = null; writeLock(); try { @@ -1457,8 +1553,22 @@ private void createSymlinkInternal(String target, String link, * @return true if successful; * false if file does not exist or is a directory */ - boolean setReplication(final String src, final short replication - ) throws IOException { + boolean setReplication(final String src, final short replication) + throws IOException { + try { + return setReplicationInt(src, replication); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "setReplication", src, null, null); + } + throw e; + } + } + + private boolean setReplicationInt(final String src, final short replication) + throws IOException { blockManager.verifyReplication(src, replication, null); final boolean isFile; @@ -1491,7 +1601,7 @@ boolean setReplication(final String src, final short replication } return isFile; } - + long getPreferredBlockSize(String filename) throws IOException, UnresolvedLinkException { readLock(); @@ -1537,6 +1647,24 @@ void startFile(String src, PermissionStatus permissions, String holder, short replication, long blockSize) throws AccessControlException, SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { + try { + startFileInt(src, permissions, holder, clientMachine, flag, createParent, + replication, blockSize); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "create", src, null, null); + } + throw e; + } + } + + private void startFileInt(String src, PermissionStatus permissions, String holder, + String clientMachine, EnumSet flag, boolean createParent, + short replication, long blockSize) throws AccessControlException, + SafeModeException, FileAlreadyExistsException, UnresolvedLinkException, + FileNotFoundException, ParentNotDirectoryException, IOException { writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -1840,6 +1968,22 @@ LocatedBlock appendFile(String src, String holder, String clientMachine) throws AccessControlException, SafeModeException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, IOException { + try { + return appendFileInt(src, holder, clientMachine); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "append", src, null, null); + } + throw e; + } + } + + private LocatedBlock appendFileInt(String src, String holder, String clientMachine) + throws AccessControlException, SafeModeException, + FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, IOException { if (!supportAppends) { throw new UnsupportedOperationException( "Append is not enabled on this NameNode. Use the " + @@ -2326,6 +2470,20 @@ boolean checkFileProgress(INodeFile v, boolean checkall) { */ @Deprecated boolean renameTo(String src, String dst) + throws IOException, UnresolvedLinkException { + try { + return renameToInt(src, dst); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "rename", src, dst, null); + } + throw e; + } + } + + private boolean renameToInt(String src, String dst) throws IOException, UnresolvedLinkException { boolean status = false; HdfsFileStatus resultingStat = null; @@ -2437,20 +2595,35 @@ private void renameToInternal(String src, String dst, * @see ClientProtocol#delete(String, boolean) for detailed descriptoin and * description of exceptions */ - boolean delete(String src, boolean recursive) - throws AccessControlException, SafeModeException, - UnresolvedLinkException, IOException { - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); - } - boolean status = deleteInternal(src, recursive, true); - if (status && auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(UserGroupInformation.getCurrentUser(), + boolean delete(String src, boolean recursive) + throws AccessControlException, SafeModeException, + UnresolvedLinkException, IOException { + try { + return deleteInt(src, recursive); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), Server.getRemoteIp(), "delete", src, null, null); } - return status; + throw e; } + } + + private boolean deleteInt(String src, boolean recursive) + throws AccessControlException, SafeModeException, + UnresolvedLinkException, IOException { + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); + } + boolean status = deleteInternal(src, recursive, true); + if (status && auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "delete", src, null, null); + } + return status; + } /** * Remove a file/directory from the namespace. @@ -2606,6 +2779,20 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) */ boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException, UnresolvedLinkException { + try { + return mkdirsInt(src, permissions, createParent); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "mkdirs", src, null, null); + } + throw e; + } + } + + private boolean mkdirsInt(String src, PermissionStatus permissions, + boolean createParent) throws IOException, UnresolvedLinkException { boolean status = false; if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src); @@ -3057,6 +3244,21 @@ void renewLease(String holder) throws IOException { */ DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) + throws AccessControlException, UnresolvedLinkException, IOException { + try { + return getListingInt(src, startAfter, needLocation); + } catch (AccessControlException e) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(false, UserGroupInformation.getCurrentUser(), + Server.getRemoteIp(), + "listStatus", src, null, null); + } + throw e; + } + } + + private DirectoryListing getListingInt(String src, byte[] startAfter, + boolean needLocation) throws AccessControlException, UnresolvedLinkException, IOException { DirectoryListing dl; readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java new file mode 100644 index 0000000000..694d84f4d1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.*; +import org.junit.Before; +import org.junit.After; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.util.regex.Pattern; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; +import org.apache.log4j.RollingFileAppender; +import org.junit.Test; + +/** + * A JUnit test that audit logs are generated + */ +public class TestAuditLogs { + static final String auditLogFile = System.getProperty("test.build.dir", + "build/test") + "/audit.log"; + + // Pattern for: + // allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null + static final Pattern auditPattern = Pattern.compile( + "allowed=.*?\\s" + + "ugi=.*?\\s" + + "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + + "cmd=.*?\\ssrc=.*?\\sdst=null\\s" + + "perm=.*?"); + static final Pattern successPattern = Pattern.compile( + ".*allowed=true.*"); + static final String username = "bob"; + static final String[] groups = { "group1" }; + static final String fileName = "/srcdat"; + + DFSTestUtil util; + MiniDFSCluster cluster; + FileSystem fs; + String fnames[]; + Configuration conf; + UserGroupInformation userGroupInfo; + + @Before + public void setupCluster() throws Exception { + conf = new HdfsConfiguration(); + final long precision = 1L; + conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision); + conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L); + util = new DFSTestUtil("TestAuditAllowed", 20, 3, 8*1024); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); + fs = cluster.getFileSystem(); + util.createFiles(fs, fileName); + + fnames = util.getFileNames(fileName); + util.waitReplication(fs, fileName, (short)3); + userGroupInfo = UserGroupInformation.createUserForTesting(username, groups); + } + + @After + public void teardownCluster() throws Exception { + util.cleanup(fs, "/srcdat"); + fs.close(); + cluster.shutdown(); + } + + /** test that allowed operation puts proper entry in audit log */ + @Test + public void testAuditAllowed() throws Exception { + final Path file = new Path(fnames[0]); + FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); + + setupAuditLogs(); + InputStream istream = userfs.open(file); + int val = istream.read(); + istream.close(); + verifyAuditLogs(true); + assertTrue("failed to read from file", val > 0); + } + + /** test that denied operation puts proper entry in audit log */ + @Test + public void testAuditDenied() throws Exception { + final Path file = new Path(fnames[0]); + FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf); + + fs.setPermission(file, new FsPermission((short)0600)); + fs.setOwner(file, "root", null); + + setupAuditLogs(); + + try { + userfs.open(file); + fail("open must not succeed"); + } catch(AccessControlException e) { + System.out.println("got access denied, as expected."); + } + verifyAuditLogs(false); + } + + /** Sets up log4j logger for auditlogs */ + private void setupAuditLogs() throws IOException { + File file = new File(auditLogFile); + if (file.exists()) { + file.delete(); + } + Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + logger.setLevel(Level.INFO); + PatternLayout layout = new PatternLayout("%m%n"); + RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile); + logger.addAppender(appender); + } + + private void verifyAuditLogs(boolean expectSuccess) throws IOException { + // Turn off the logs + Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + logger.setLevel(Level.OFF); + + // Ensure audit log has only one entry + BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); + String line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected audit event not found in audit log", + auditPattern.matcher(line).matches()); + assertTrue("Expected success=" + expectSuccess, + successPattern.matcher(line).matches() == expectSuccess); + assertNull("Unexpected event in audit log", reader.readLine()); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 9b59802b1d..531dc879d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -76,8 +76,9 @@ public class TestFsck { "build/test") + "/audit.log"; // Pattern for: - // ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null + // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null static final Pattern fsckPattern = Pattern.compile( + "allowed=.*?\\s" + "ugi=.*?\\s" + "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + "cmd=fsck\\ssrc=\\/\\sdst=null\\s" +