diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index a1828a3cec..21127f8216 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,3 +31,40 @@ LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb836..9645c026fc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -184,4 +184,36 @@ private OzoneConsts() {
public static final String CHUNKS_PATH = "chunksPath";
public static final String CONTAINER_DB_TYPE = "containerDBType";
public static final String CHECKSUM = "checksum";
+
+ // For OM Audit usage
+ public static final String VOLUME = "volume";
+ public static final String BUCKET = "bucket";
+ public static final String KEY = "key";
+ public static final String QUOTA = "quota";
+ public static final String QUOTA_IN_BYTES = "quotaInBytes";
+ public static final String CLIENT_ID = "clientID";
+ public static final String OWNER = "owner";
+ public static final String ADMIN = "admin";
+ public static final String USERNAME = "username";
+ public static final String PREV_KEY = "prevKey";
+ public static final String START_KEY = "startKey";
+ public static final String MAX_KEYS = "maxKeys";
+ public static final String PREFIX = "prefix";
+ public static final String KEY_PREFIX = "keyPrefix";
+ public static final String ACLS = "acls";
+ public static final String USER_ACL = "userAcl";
+ public static final String ADD_ACLS = "addAcls";
+ public static final String REMOVE_ACLS = "removeAcls";
+ public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
+ public static final String TO_KEY_NAME = "toKeyName";
+ public static final String STORAGE_TYPE = "storageType";
+ public static final String IS_VERSION_ENABLED = "isVersionEnabled";
+ public static final String CREATION_TIME = "creationTime";
+ public static final String DATA_SIZE = "dataSize";
+ public static final String REPLICATION_TYPE = "replicationType";
+ public static final String REPLICATION_FACTOR = "replicationFactor";
+ public static final String KEY_LOCATION_INFO = "keyLocationInfo";
+
+
+
}
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 75ceeb7f2e..6a304333a9 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -97,6 +97,8 @@ function ozonecmd_case
om)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager
+ HDFS_OM_OPTS="${HDFS_OM_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/om-audit-log4j2.properties"
+ HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_OPTS}"
;;
oz)
HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
new file mode 100644
index 0000000000..7d097a081a
--- /dev/null
+++ b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
@@ -0,0 +1,86 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership. The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+name=PropertiesConfig
+
+# Checks for config change periodically and reloads
+monitorInterval=30
+
+filter=read,write
+# filter.read.onMatch=DENY avoids logging all READ events
+# filter.read.onMatch=ACCEPT permits logging all READ events
+# The above two settings ignore the log levels in configuration
+# filter.read.onMatch=NEUTRAL permits logging of only those READ events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.read.type=MarkerFilter
+filter.read.marker=READ
+filter.read.onMatch=DENY
+filter.read.onMismatch=NEUTRAL
+
+# filter.write.onMatch=DENY avoids logging all WRITE events
+# filter.write.onMatch=ACCEPT permits logging all WRITE events
+# The above two settings ignore the log levels in configuration
+# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.write.type=MarkerFilter
+filter.write.marker=WRITE
+filter.write.onMatch=NEUTRAL
+filter.write.onMismatch=NEUTRAL
+
+# Log Levels are organized from most specific to least:
+# OFF (most specific, no logging)
+# FATAL (most specific, little data)
+# ERROR
+# WARN
+# INFO
+# DEBUG
+# TRACE (least specific, a lot of data)
+# ALL (least specific, all data)
+
+appenders=console, rolling
+appender.console.type=Console
+appender.console.name=STDOUT
+appender.console.layout.type=PatternLayout
+appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+
+#Rolling File Appender with size & time thresholds.
+#Rolling is triggered when either threshold is breached.
+#The rolled over file is compressed by default
+#Time interval is specified in seconds 86400s=1 day
+appender.rolling.type=RollingFile
+appender.rolling.name=RollingFile
+appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
+appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
+appender.rolling.layout.type=PatternLayout
+appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+appender.rolling.policies.type=Policies
+appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval=86400
+appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size=64MB
+
+loggers=audit
+logger.audit.type=AsyncLogger
+logger.audit.name=OMAudit
+logger.audit.level=INFO
+logger.audit.appenderRefs=rolling
+logger.audit.appenderRef.file.ref=RollingFile
+
+rootLogger.level=INFO
+rootLogger.appenderRefs=stdout
+rootLogger.appenderRef.stdout.ref=STDOUT
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index d780ea2c93..6488f5e368 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -18,24 +18,33 @@
package org.apache.hadoop.ozone.audit;
/**
- * Enum to define OM Action types for Audit.
+ * Enum to define Audit Action types for OzoneManager.
*/
public enum OMAction implements AuditAction {
+ // WRITE Actions
+ ALLOCATE_BLOCK("ALLOCATE_BLOCK"),
+ ALLOCATE_KEY("ALLOCATE_KEY"),
+ COMMIT_KEY("COMMIT_KEY"),
CREATE_VOLUME("CREATE_VOLUME"),
CREATE_BUCKET("CREATE_BUCKET"),
CREATE_KEY("CREATE_KEY"),
- READ_VOLUME("READ_VOLUME"),
- READ_BUCKET("READ_BUCKET"),
- READ_KEY("READ_BUCKET"),
- UPDATE_VOLUME("UPDATE_VOLUME"),
- UPDATE_BUCKET("UPDATE_BUCKET"),
- UPDATE_KEY("UPDATE_KEY"),
DELETE_VOLUME("DELETE_VOLUME"),
DELETE_BUCKET("DELETE_BUCKET"),
DELETE_KEY("DELETE_KEY"),
+ RENAME_KEY("RENAME_KEY"),
SET_OWNER("SET_OWNER"),
- SET_QUOTA("SET_QUOTA");
+ SET_QUOTA("SET_QUOTA"),
+ UPDATE_VOLUME("UPDATE_VOLUME"),
+ UPDATE_BUCKET("UPDATE_BUCKET"),
+ UPDATE_KEY("UPDATE_KEY"),
+ // READ Actions
+ LIST_BUCKETS("LIST_BUCKETS"),
+ LIST_VOLUMES("LIST_VOLUMES"),
+ LIST_KEYS("LIST_KEYS"),
+ READ_VOLUME("READ_VOLUME"),
+ READ_BUCKET("READ_BUCKET"),
+ READ_KEY("READ_BUCKET");
private String action;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
index 6aabfef6b4..1bd258e742 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -17,13 +17,17 @@
*/
package org.apache.hadoop.ozone.om.helpers;
+import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.Auditable;
import org.apache.hadoop.ozone.protocol.proto
.OzoneManagerProtocolProtos.BucketArgs;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -31,7 +35,7 @@
/**
* A class that encapsulates Bucket Arguments.
*/
-public final class OmBucketArgs {
+public final class OmBucketArgs implements Auditable {
/**
* Name of the volume in which the bucket belongs to.
*/
@@ -135,6 +139,25 @@ public static Builder newBuilder() {
return new Builder();
}
+ @Override
+ public Map toAuditMap() {
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.VOLUME, this.volumeName);
+ auditMap.put(OzoneConsts.BUCKET, this.bucketName);
+ if(this.addAcls != null){
+ auditMap.put(OzoneConsts.ADD_ACLS, this.addAcls.toString());
+ }
+ if(this.removeAcls != null){
+ auditMap.put(OzoneConsts.REMOVE_ACLS, this.removeAcls.toString());
+ }
+ auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
+ String.valueOf(this.isVersionEnabled));
+ if(this.storageType != null){
+ auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name());
+ }
+ return auditMap;
+ }
+
/**
* Builder for OmBucketArgs.
*/
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index bf5abddc43..0a136a7578 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -21,18 +21,22 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.Auditable;
import org.apache.hadoop.ozone.protocol.proto
.OzoneManagerProtocolProtos.BucketInfo;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
/**
* A class that encapsulates Bucket Info.
*/
-public final class OmBucketInfo {
+public final class OmBucketInfo implements Auditable {
/**
* Name of the volume in which the bucket belongs to.
*/
@@ -137,6 +141,21 @@ public static Builder newBuilder() {
return new Builder();
}
+ @Override
+ public Map toAuditMap() {
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.VOLUME, this.volumeName);
+ auditMap.put(OzoneConsts.BUCKET, this.bucketName);
+ auditMap.put(OzoneConsts.ACLS,
+ (this.acls != null) ? this.acls.toString() : null);
+ auditMap.put(OzoneConsts.IS_VERSION_ENABLED,
+ String.valueOf(this.isVersionEnabled));
+ auditMap.put(OzoneConsts.STORAGE_TYPE,
+ (this.storageType != null) ? this.storageType.name() : null);
+ auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime));
+ return auditMap;
+ }
+
/**
* Builder for OmBucketInfo.
*/
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
index aab35c57a4..d8d41d5d53 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -18,14 +18,18 @@
package org.apache.hadoop.ozone.om.helpers;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.Auditable;
+import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Map;
/**
* Args for key. Client use this to specify key's attributes on key creation
* (putKey()).
*/
-public final class OmKeyArgs {
+public final class OmKeyArgs implements Auditable {
private final String volumeName;
private final String bucketName;
private final String keyName;
@@ -82,6 +86,22 @@ public List getLocationInfoList() {
return locationInfoList;
}
+ @Override
+ public Map toAuditMap() {
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.VOLUME, this.volumeName);
+ auditMap.put(OzoneConsts.BUCKET, this.bucketName);
+ auditMap.put(OzoneConsts.KEY, this.keyName);
+ auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(this.dataSize));
+ auditMap.put(OzoneConsts.REPLICATION_TYPE,
+ (this.type != null) ? this.type.name() : null);
+ auditMap.put(OzoneConsts.REPLICATION_FACTOR,
+ (this.factor != null) ? this.factor.name() : null);
+ auditMap.put(OzoneConsts.KEY_LOCATION_INFO,
+ (this.locationInfoList != null) ? locationInfoList.toString() : null);
+ return auditMap;
+ }
+
/**
* Builder class of OmKeyArgs.
*/
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index c8b59b682d..27e25f9d28 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.ozone.om.helpers;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.Auditable;
import org.apache.hadoop.ozone.protocol.proto
.OzoneManagerProtocolProtos.OzoneAclInfo;
import org.apache.hadoop.ozone.protocol.proto
@@ -26,6 +28,7 @@
import java.io.IOException;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -35,7 +38,7 @@
/**
* A class that encapsulates the OmVolumeArgs Args.
*/
-public final class OmVolumeArgs {
+public final class OmVolumeArgs implements Auditable{
private final String adminName;
private final String ownerName;
private final String volume;
@@ -122,6 +125,17 @@ public static Builder newBuilder() {
return new Builder();
}
+ @Override
+ public Map toAuditMap() {
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.ADMIN, this.adminName);
+ auditMap.put(OzoneConsts.OWNER, this.ownerName);
+ auditMap.put(OzoneConsts.VOLUME, this.volume);
+ auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime));
+ auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes));
+ return auditMap;
+ }
+
/**
* Builder for OmVolumeArgs.
*/
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 71fa921cc5..887ddd04e7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -27,6 +27,14 @@
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditAction;
+import org.apache.hadoop.ozone.audit.AuditEventStatus;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditLoggerType;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
@@ -69,6 +77,8 @@
import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
import static org.apache.hadoop.hdds.server.ServerUtils
.updateRPCListenAddress;
+
+import org.apache.logging.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -78,6 +88,7 @@
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -104,6 +115,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
private static final Logger LOG =
LoggerFactory.getLogger(OzoneManager.class);
+ private static final AuditLogger AUDIT =
+ new AuditLogger(AuditLoggerType.OMLOGGER);
+
private static final String USAGE =
"Usage: \n ozone om [genericOptions] " + "[ "
+ StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone om [ "
@@ -335,8 +349,8 @@ private static void printUsage(PrintStream out) {
* @throws IOException in case OM instance creation fails.
*/
- public static OzoneManager createOm(String[] argv,
- OzoneConfiguration conf) throws IOException {
+ public static OzoneManager createOm(String[] argv, OzoneConfiguration conf)
+ throws IOException {
if (!isHddsEnabled(conf)) {
System.err.println("OM cannot be started in secure mode or when " +
OZONE_ENABLED + " is set to false");
@@ -486,8 +500,13 @@ public void createVolume(OmVolumeArgs args) throws IOException {
try {
metrics.incNumVolumeCreates();
volumeManager.createVolume(args);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_VOLUME,
+ args.toAuditMap()));
} catch (Exception ex) {
metrics.incNumVolumeCreateFails();
+ AUDIT.logWriteFailure(Level.ERROR,
+ buildAuditMessageForFailure(OMAction.CREATE_VOLUME,
+ args.toAuditMap()), ex);
throw ex;
}
}
@@ -501,11 +520,17 @@ public void createVolume(OmVolumeArgs args) throws IOException {
*/
@Override
public void setOwner(String volume, String owner) throws IOException {
+ Map auditMap = buildAuditMap(volume);
+ auditMap.put(OzoneConsts.OWNER, owner);
try {
metrics.incNumVolumeUpdates();
volumeManager.setOwner(volume, owner);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_OWNER,
+ auditMap));
} catch (Exception ex) {
metrics.incNumVolumeUpdateFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_OWNER,
+ auditMap), ex);
throw ex;
}
}
@@ -519,15 +544,22 @@ public void setOwner(String volume, String owner) throws IOException {
*/
@Override
public void setQuota(String volume, long quota) throws IOException {
+ Map auditMap = buildAuditMap(volume);
+ auditMap.put(OzoneConsts.QUOTA, String.valueOf(quota));
try {
metrics.incNumVolumeUpdates();
volumeManager.setQuota(volume, quota);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_QUOTA,
+ auditMap));
} catch (Exception ex) {
metrics.incNumVolumeUpdateFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_QUOTA,
+ auditMap), ex);
throw ex;
}
}
+ //TODO: Should define new OMAction type? How to log OzoneAclInfo ?
/**
* Checks if the specified user can access this volume.
*
@@ -540,12 +572,23 @@ public void setQuota(String volume, long quota) throws IOException {
@Override
public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = buildAuditMap(volume);
+ auditMap.put(OzoneConsts.USER_ACL, userAcl.getName());
try {
metrics.incNumVolumeCheckAccesses();
return volumeManager.checkVolumeAccess(volume, userAcl);
} catch (Exception ex) {
metrics.incNumVolumeCheckAccessFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_VOLUME,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_VOLUME,
+ auditMap));
+ }
}
}
@@ -558,12 +601,22 @@ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
*/
@Override
public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = buildAuditMap(volume);
try {
metrics.incNumVolumeInfos();
return volumeManager.getVolumeInfo(volume);
} catch (Exception ex) {
metrics.incNumVolumeInfoFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_VOLUME,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_VOLUME,
+ auditMap));
+ }
}
}
@@ -578,8 +631,12 @@ public void deleteVolume(String volume) throws IOException {
try {
metrics.incNumVolumeDeletes();
volumeManager.deleteVolume(volume);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_VOLUME,
+ buildAuditMap(volume)));
} catch (Exception ex) {
metrics.incNumVolumeDeleteFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_VOLUME,
+ buildAuditMap(volume)), ex);
throw ex;
}
}
@@ -597,13 +654,27 @@ public void deleteVolume(String volume) throws IOException {
*/
@Override
public List listVolumeByUser(String userName, String prefix,
- String prevKey, int maxKeys) throws IOException {
+ String prevKey, int maxKeys) throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.PREV_KEY, prevKey);
+ auditMap.put(OzoneConsts.PREFIX, prefix);
+ auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
+ auditMap.put(OzoneConsts.USERNAME, userName);
try {
metrics.incNumVolumeLists();
return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
} catch (Exception ex) {
metrics.incNumVolumeListFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
+ auditMap));
+ }
}
}
@@ -620,12 +691,26 @@ public List listVolumeByUser(String userName, String prefix,
@Override
public List listAllVolumes(String prefix, String prevKey, int
maxKeys) throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.PREV_KEY, prevKey);
+ auditMap.put(OzoneConsts.PREFIX, prefix);
+ auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
+ auditMap.put(OzoneConsts.USERNAME, null);
try {
metrics.incNumVolumeLists();
return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
} catch (Exception ex) {
metrics.incNumVolumeListFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES,
+ auditMap));
+ }
}
}
@@ -640,8 +725,12 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException {
try {
metrics.incNumBucketCreates();
bucketManager.createBucket(bucketInfo);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_BUCKET,
+ bucketInfo.toAuditMap()));
} catch (Exception ex) {
metrics.incNumBucketCreateFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_BUCKET,
+ bucketInfo.toAuditMap()), ex);
throw ex;
}
}
@@ -650,16 +739,30 @@ public void createBucket(OmBucketInfo bucketInfo) throws IOException {
* {@inheritDoc}
*/
@Override
- public List listBuckets(String volumeName,
- String startKey, String prefix, int maxNumOfBuckets)
+ public List listBuckets(String volumeName, String startKey,
+ String prefix, int maxNumOfBuckets)
throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = buildAuditMap(volumeName);
+ auditMap.put(OzoneConsts.START_KEY, startKey);
+ auditMap.put(OzoneConsts.PREFIX, prefix);
+ auditMap.put(OzoneConsts.MAX_NUM_OF_BUCKETS,
+ String.valueOf(maxNumOfBuckets));
try {
metrics.incNumBucketLists();
return bucketManager.listBuckets(volumeName,
startKey, prefix, maxNumOfBuckets);
} catch (IOException ex) {
metrics.incNumBucketListFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_BUCKETS,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_BUCKETS,
+ auditMap));
+ }
}
}
@@ -674,12 +777,23 @@ public List listBuckets(String volumeName,
@Override
public OmBucketInfo getBucketInfo(String volume, String bucket)
throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = buildAuditMap(volume);
+ auditMap.put(OzoneConsts.BUCKET, bucket);
try {
metrics.incNumBucketInfos();
return bucketManager.getBucketInfo(volume, bucket);
} catch (Exception ex) {
metrics.incNumBucketInfoFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_BUCKET,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_BUCKET,
+ auditMap));
+ }
}
}
@@ -692,23 +806,38 @@ public OmBucketInfo getBucketInfo(String volume, String bucket)
*/
@Override
public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+ boolean auditSuccess = true;
try {
metrics.incNumKeyAllocates();
return keyManager.openKey(args);
} catch (Exception ex) {
metrics.incNumKeyAllocateFails();
+ auditSuccess = false;
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_KEY,
+ args.toAuditMap()), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.ALLOCATE_KEY,
+ args.toAuditMap()));
+ }
}
}
@Override
public void commitKey(OmKeyArgs args, int clientID)
throws IOException {
+ Map auditMap = args.toAuditMap();
+ auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
try {
metrics.incNumKeyCommits();
keyManager.commitKey(args, clientID);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.COMMIT_KEY,
+ args.toAuditMap()));
} catch (Exception ex) {
metrics.incNumKeyCommitFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.COMMIT_KEY,
+ args.toAuditMap()), ex);
throw ex;
}
}
@@ -716,12 +845,24 @@ public void commitKey(OmKeyArgs args, int clientID)
@Override
public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = args.toAuditMap();
+ auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
try {
metrics.incNumBlockAllocateCalls();
return keyManager.allocateBlock(args, clientID);
} catch (Exception ex) {
metrics.incNumBlockAllocateCallFails();
+ auditSuccess = false;
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_BLOCK,
+ args.toAuditMap()), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ OMAction.ALLOCATE_BLOCK,
+ args.toAuditMap()));
+ }
}
}
@@ -734,22 +875,37 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
*/
@Override
public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+ boolean auditSuccess = true;
try {
metrics.incNumKeyLookups();
return keyManager.lookupKey(args);
} catch (Exception ex) {
metrics.incNumKeyLookupFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY,
+ args.toAuditMap()), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY,
+ args.toAuditMap()));
+ }
}
}
@Override
public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+ Map auditMap = args.toAuditMap();
+ auditMap.put(OzoneConsts.TO_KEY_NAME, toKeyName);
try {
metrics.incNumKeyRenames();
keyManager.renameKey(args, toKeyName);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.RENAME_KEY,
+ args.toAuditMap()));
} catch (IOException e) {
metrics.incNumKeyRenameFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.RENAME_KEY,
+ args.toAuditMap()), e);
throw e;
}
}
@@ -765,22 +921,40 @@ public void deleteKey(OmKeyArgs args) throws IOException {
try {
metrics.incNumKeyDeletes();
keyManager.deleteKey(args);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY,
+ args.toAuditMap()));
} catch (Exception ex) {
metrics.incNumKeyDeleteFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY,
+ args.toAuditMap()), ex);
throw ex;
}
}
@Override
public List listKeys(String volumeName, String bucketName,
- String startKey, String keyPrefix, int maxKeys) throws IOException {
+ String startKey, String keyPrefix, int maxKeys) throws IOException {
+ boolean auditSuccess = true;
+ Map auditMap = buildAuditMap(volumeName);
+ auditMap.put(OzoneConsts.BUCKET, bucketName);
+ auditMap.put(OzoneConsts.START_KEY, startKey);
+ auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
+ auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix);
try {
metrics.incNumKeyLists();
return keyManager.listKeys(volumeName, bucketName,
startKey, keyPrefix, maxKeys);
} catch (IOException ex) {
metrics.incNumKeyListFails();
+ auditSuccess = false;
+ AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_KEYS,
+ auditMap), ex);
throw ex;
+ } finally {
+ if(auditSuccess){
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_KEYS,
+ auditMap));
+ }
}
}
@@ -795,8 +969,12 @@ public void setBucketProperty(OmBucketArgs args)
try {
metrics.incNumBucketUpdates();
bucketManager.setBucketProperty(args);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.UPDATE_BUCKET,
+ args.toAuditMap()));
} catch (Exception ex) {
metrics.incNumBucketUpdateFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.UPDATE_BUCKET,
+ args.toAuditMap()), ex);
throw ex;
}
}
@@ -809,11 +987,17 @@ public void setBucketProperty(OmBucketArgs args)
* @throws IOException
*/
public void deleteBucket(String volume, String bucket) throws IOException {
+ Map auditMap = buildAuditMap(volume);
+ auditMap.put(OzoneConsts.BUCKET, bucket);
try {
metrics.incNumBucketDeletes();
bucketManager.deleteBucket(volume, bucket);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_BUCKET,
+ auditMap));
} catch (Exception ex) {
metrics.incNumBucketDeleteFails();
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_BUCKET,
+ auditMap), ex);
throw ex;
}
}
@@ -908,4 +1092,26 @@ public List getServiceList() throws IOException {
// metrics.incNumGetServiceListFails()
return services;
}
+
+ private Map buildAuditMap(String volume){
+ Map auditMap = new LinkedHashMap<>();
+ auditMap.put(OzoneConsts.VOLUME, volume);
+ return auditMap;
+ }
+
+ // TODO: Temporary method until AuditMessage is simplified
+ private AuditMessage buildAuditMessageForSuccess(AuditAction op,
+ Map auditMap) {
+ return new AuditMessage(Server.getRemoteUser().getUserName(),
+ Server.getRemoteIp().getHostAddress(), op.toString(), auditMap,
+ AuditEventStatus.SUCCESS.toString());
+ }
+
+ // TODO: Temporary method until AuditMessage is simplified
+ private AuditMessage buildAuditMessageForFailure(AuditAction op,
+ Map auditMap) {
+ return new AuditMessage(Server.getRemoteUser().getUserName(),
+ Server.getRemoteIp().getHostAddress(), op.toString(), auditMap,
+ AuditEventStatus.FAILURE.toString());
+ }
}