diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ced13ce358..5fbbd5513c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -329,6 +329,9 @@ Release 2.5.0 - UNRELEASED HDFS-6210. Support GETACLSTATUS operation in WebImageViewer. (Akira Ajisaka via wheat9) + HDFS-6269. NameNode Audit Log should differentiate between webHDFS open and + HDFS open. (Eric Payne via jeagles) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 16b356468a..b2ec8f9cdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7760,6 +7760,8 @@ public void logAuditEvent(boolean succeeded, String userName, } sb.append(trackingId); } + sb.append("\t").append("proto="); + sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc"); logAuditMessage(sb.toString()); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 94950caa5b..98297ca91c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -91,6 +91,9 @@ public TestAuditLogs(boolean useAsyncLog) { "perm=.*?"); static final Pattern successPattern = Pattern.compile( ".*allowed=true.*"); + static final Pattern webOpenPattern = Pattern.compile( + ".*cmd=open.*proto=webhdfs.*"); + static final String username = "bob"; static final String[] groups = { "group1" }; static final String fileName = "/srcdat"; @@ -240,6 +243,22 @@ public void testAuditWebHdfsDenied() throws Exception { verifyAuditLogsRepeat(false, 2); } + /** test that open via webhdfs puts proper entry in audit log */ + @Test + public void testAuditWebHdfsOpen() throws Exception { + final Path file = new Path(fnames[0]); + + fs.setPermission(file, new FsPermission((short)0644)); + fs.setOwner(file, "root", null); + + setupAuditLogs(); + + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); + webfs.open(file); + + verifyAuditLogsCheckPattern(true, 3, webOpenPattern); + } + /** Sets up log4j logger for auditlogs */ private void setupAuditLogs() throws IOException { Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); @@ -303,4 +322,38 @@ private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe) reader.close(); } } + + // Ensure audit log has exactly N entries + private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern) + throws IOException { + // Turn off the logs + Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + logger.setLevel(Level.OFF); + + // Close the appenders and force all logs to be flushed + Enumeration appenders = logger.getAllAppenders(); + while (appenders.hasMoreElements()) { + Appender appender = (Appender)appenders.nextElement(); + appender.close(); + } + + BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); + String line = null; + boolean ret = true; + boolean patternMatches = false; + + try { + for (int i = 0; i < ndupe; i++) { + line = reader.readLine(); + assertNotNull(line); + patternMatches |= pattern.matcher(line).matches(); + ret &= successPattern.matcher(line).matches(); + } + assertNull("Unexpected event in audit log", reader.readLine()); + assertTrue("Expected audit event not found in audit log", patternMatches); + assertTrue("Expected success=" + expectSuccess, ret == expectSuccess); + } finally { + reader.close(); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 5a0c3559da..b1ecc52b4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -99,13 +99,13 @@ public class TestFsck { "ugi=.*?\\s" + "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + "cmd=fsck\\ssrc=\\/\\sdst=null\\s" + - "perm=null"); + "perm=null\\s" + "proto=.*"); static final Pattern getfileinfoPattern = Pattern.compile( "allowed=.*?\\s" + "ugi=.*?\\s" + "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" + "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" + - "perm=null"); + "perm=null\\s" + "proto=.*"); static final Pattern numCorruptBlocksPattern = Pattern.compile( ".*Corrupt blocks:\t\t([0123456789]*).*");