diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/OzoneExceptionMapper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/OzoneExceptionMapper.java index d16a64f13d..5b27210e59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/OzoneExceptionMapper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/exceptions/OzoneExceptionMapper.java @@ -22,14 +22,22 @@ import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; +import org.apache.log4j.MDC; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Class the represents various errors returned by the * Object Layer. */ public class OzoneExceptionMapper implements ExceptionMapper { + private static final Logger LOG = + LoggerFactory.getLogger(OzoneExceptionMapper.class); @Override public Response toResponse(OzoneException exception) { + LOG.info("Returning exception. ex: {}", exception.toJsonString()); + MDC.clear(); return Response.status((int)exception.getHttpCode()) .entity(exception.toJsonString()).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java index baa61425d1..da09353255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.slf4j.MDC; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; @@ -36,6 +37,7 @@ import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_FUNCTION; /** @@ -59,6 +61,7 @@ public class BucketHandler implements Bucket { public Response createBucket(String volume, String bucket, Request req, UriInfo info, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "createBucket"); return new BucketProcessTemplate() { @Override public Response doProcess(BucketArgs args) @@ -94,6 +97,7 @@ public Response doProcess(BucketArgs args) public Response updateBucket(String volume, String bucket, Request req, UriInfo info, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "updateBucket"); return new BucketProcessTemplate() { @Override public Response doProcess(BucketArgs args) @@ -136,6 +140,7 @@ public Response doProcess(BucketArgs args) public Response deleteBucket(String volume, String bucket, Request req, UriInfo info, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "deleteBucket"); return new BucketProcessTemplate() { @Override public Response doProcess(BucketArgs args) @@ -169,6 +174,7 @@ public Response listBucket(String volume, String bucket, final String info, final String startPage, Request req, UriInfo uriInfo, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "listBucket"); return new BucketProcessTemplate() { @Override public Response doProcess(BucketArgs args) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java index 7046b8fe7d..2639e23e09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java @@ -30,6 +30,11 @@ import org.apache.hadoop.ozone.web.utils.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; + + import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; import javax.ws.rs.core.Response; @@ -42,6 +47,11 @@ import java.util.List; import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; + /** @@ -49,6 +59,8 @@ * Bucket handling code. */ public abstract class BucketProcessTemplate { + private static final Logger LOG = + LoggerFactory.getLogger(BucketProcessTemplate.class); /** * This function serves as the common error handling function @@ -70,22 +82,30 @@ public Response handleCall(String volume, String bucket, Request request, // TODO : Add logging String reqID = OzoneUtils.getRequestID(); String hostName = OzoneUtils.getHostName(); + MDC.put(OZONE_COMPONENT, "ozone"); + MDC.put(OZONE_REQUEST, reqID); + UserArgs userArgs = null; try { + userArgs = new UserArgs(reqID, hostName, request, uriInfo, headers); + OzoneUtils.validate(request, headers, reqID, bucket, hostName); OzoneUtils.verifyBucketName(bucket); UserAuth auth = UserHandlerBuilder.getAuthHandler(); - UserArgs userArgs = - new UserArgs(reqID, hostName, request, uriInfo, headers); userArgs.setUserName(auth.getUser(userArgs)); + MDC.put(OZONE_USER, userArgs.getUserName()); BucketArgs args = new BucketArgs(volume, bucket, userArgs); - return doProcess(args); - } catch (IllegalArgumentException argExp) { - OzoneException ex = ErrorTable - .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName); - ex.setMessage(argExp.getMessage()); - throw ex; + MDC.put(OZONE_RESOURCE, args.getResourceName()); + Response response = doProcess(args); + LOG.info("Success"); + MDC.clear(); + return response; + + } catch (IllegalArgumentException argEx) { + LOG.debug("Invalid bucket. ex:{}", argEx); + throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, userArgs, + argEx); } catch (IOException fsExp) { handleIOException(bucket, reqID, hostName, fsExp); } @@ -133,6 +153,7 @@ void getAclsFromHeaders(BucketArgs args, boolean parseRemoveACL) */ void handleIOException(String bucket, String reqID, String hostName, IOException fsExp) throws OzoneException { + LOG.debug("IOException: {}", fsExp); if (fsExp instanceof FileAlreadyExistsException) { throw ErrorTable @@ -224,6 +245,7 @@ OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException { try { return OzoneConsts.Versioning.valueOf(version); } catch (IllegalArgumentException ex) { + LOG.debug("Malformed Version. version: {}", version); throw ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args, ex); } } @@ -239,10 +261,11 @@ OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException { * @throws OzoneException */ StorageType getStorageType(BucketArgs args) throws OzoneException { - + List storageClassString = null; try { - List storageClassString = + storageClassString = args.getHeaders().getRequestHeader(Header.OZONE_STORAGE_TYPE); + if (storageClassString == null) { return null; } @@ -254,6 +277,10 @@ StorageType getStorageType(BucketArgs args) throws OzoneException { } return StorageType.valueOf(storageClassString.get(0).toUpperCase()); } catch (IllegalArgumentException ex) { + if(storageClassString != null) { + LOG.debug("Malformed storage type. Type: {}", + storageClassString.get(0).toUpperCase()); + } throw ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args, ex); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java index 7607434f18..88e9052736 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java @@ -25,6 +25,9 @@ import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.interfaces.UserAuth; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; @@ -41,11 +44,17 @@ import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST; import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR; import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; /** * This class abstracts way the repetitive tasks in Key handling code. */ public abstract class KeyProcessTemplate { + private static final Logger LOG = + LoggerFactory.getLogger(KeyProcessTemplate.class); /** * This function serves as the common error handling function for all Key @@ -63,31 +72,37 @@ public Response handleCall(String volume, String bucket, String key, String reqID = OzoneUtils.getRequestID(); String hostName = OzoneUtils.getHostName(); + MDC.put(OZONE_COMPONENT, "ozone"); + MDC.put(OZONE_REQUEST, reqID); UserArgs userArgs = null; try { + userArgs = new UserArgs(reqID, hostName, request, info, headers); OzoneUtils.validate(request, headers, reqID, bucket, hostName); OzoneUtils.verifyBucketName(bucket); UserAuth auth = UserHandlerBuilder.getAuthHandler(); - userArgs = new UserArgs(reqID, hostName, request, info, headers); userArgs.setUserName(auth.getUser(userArgs)); + MDC.put(OZONE_USER, userArgs.getUserName()); KeyArgs args = new KeyArgs(volume, bucket, key, userArgs); - return doProcess(args, is, request, headers, info); + MDC.put(OZONE_RESOURCE, args.getResourceName()); + Response response = doProcess(args, is, request, headers, info); + LOG.info("Success"); + MDC.clear(); + return response; + } catch (IllegalArgumentException argExp) { - OzoneException ex = - newError(INVALID_BUCKET_NAME, reqID, bucket, hostName); - ex.setMessage(argExp.getMessage()); - throw ex; + LOG.debug("Invalid bucket in key call. ex:{}", argExp); + throw newError(INVALID_BUCKET_NAME, userArgs, argExp); } catch (IOException fsExp) { // TODO : Handle errors from the FileSystem , let us map to server error // for now. + LOG.debug("IOException. ex : {}", fsExp); throw ErrorTable.newError(ErrorTable.SERVER_ERROR, userArgs, fsExp); } catch (NoSuchAlgorithmException algoEx) { - OzoneException ex = - ErrorTable.newError(SERVER_ERROR, reqID, key, hostName); - ex.setMessage(algoEx.getMessage()); - throw ex; + LOG.debug("NoSuchAlgorithmException. Probably indicates an unusual java " + + "installation. ex : {}", algoEx); + throw ErrorTable.newError(SERVER_ERROR, userArgs, algoEx); } } @@ -131,10 +146,11 @@ public void checkFileHashMatch(KeyArgs args, String computedString, if (!contentString.equals(computedString)) { fs.deleteKey(args); - OzoneException ex = ErrorTable.newError(BAD_DIGEST, args.getRequestID(), - args.getKeyName(), args.getHostName()); - ex.setMessage(String.format("MD5 Digest mismatch. Expected %s Found " + - "%s", contentString, computedString)); + OzoneException ex = ErrorTable.newError(BAD_DIGEST, args); + String msg = String.format("MD5 Digest mismatch. Expected %s Found " + + "%s", contentString, computedString); + ex.setMessage(msg); + LOG.debug(msg); throw ex; } } @@ -158,10 +174,11 @@ public void checkFileLengthMatch(KeyArgs args, StorageHandler fs, throws IOException, OzoneException { if (bytesRead != contentLen) { fs.deleteKey(args); - OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY, - args.getRequestID(), args.getKeyName(), args.getHostName()); - ex.setMessage(String.format("Body length mismatch. Expected length : %d" + - " Found %d", contentLen, bytesRead)); + OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY, args); + String msg = String.format("Body length mismatch. Expected length : %d" + + " Found %d", contentLen, bytesRead); + ex.setMessage(msg); + LOG.debug(msg); throw ex; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java index 09a021ba87..2ce39ba520 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java @@ -26,6 +26,9 @@ import org.apache.hadoop.ozone.web.interfaces.UserAuth; import org.apache.hadoop.ozone.web.interfaces.Volume; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; @@ -35,6 +38,7 @@ import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_FUNCTION; /** * VolumeHandler handles volume specific HTTP calls. @@ -50,6 +54,8 @@ */ @InterfaceAudience.Private public class VolumeHandler implements Volume { + private static final Logger LOG = LoggerFactory.getLogger(VolumeHandler + .class); /** * Creates a volume. * @@ -67,6 +73,7 @@ public class VolumeHandler implements Volume { public Response createVolume(String volume, final String quota, Request req, UriInfo uriInfo, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "createVolume"); return new VolumeProcessTemplate() { @Override public Response doProcess(VolumeArgs args) @@ -119,6 +126,7 @@ public Response doProcess(VolumeArgs args) public Response updateVolume(String volume, final String quota, Request req, UriInfo uriInfo, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "updateVolume"); return new VolumeProcessTemplate() { @Override public Response doProcess(VolumeArgs args) @@ -171,6 +179,8 @@ public Response doProcess(VolumeArgs args) @Override public Response deleteVolume(String volume, Request req, UriInfo uriInfo, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "deleteVolume"); + return new VolumeProcessTemplate() { @Override public Response doProcess(VolumeArgs args) @@ -202,6 +212,7 @@ public Response doProcess(VolumeArgs args) public Response getVolumeInfo(String volume, final String info, Request req, final UriInfo uriInfo, HttpHeaders headers) throws OzoneException { + MDC.put(OZONE_FUNCTION, "getVolumeInfo"); return new VolumeProcessTemplate() { @Override public Response doProcess(VolumeArgs args) @@ -215,6 +226,7 @@ public Response doProcess(VolumeArgs args) case Header.OZONE_LIST_QUERY_SERVICE: return getVolumesByUser(args); // Return list of volumes default: + LOG.debug("Unrecognized query param : {} ", info); OzoneException ozoneException = ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args); ozoneException.setMessage("Unrecognized query param : " + info); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java index d357da782c..7ca5d475c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java @@ -27,6 +27,9 @@ import org.apache.hadoop.ozone.web.response.ListVolumes; import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.utils.OzoneUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.MDC; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Request; @@ -38,6 +41,11 @@ import java.nio.file.NoSuchFileException; import static java.net.HttpURLConnection.HTTP_OK; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_COMPONENT; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_RESOURCE; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_REQUEST; +import static org.apache.hadoop.ozone.web.utils.OzoneConsts.OZONE_USER; + /** * This class abstracts way the repetitive tasks in @@ -45,6 +53,8 @@ */ @InterfaceAudience.Private public abstract class VolumeProcessTemplate { + private static final Logger LOG = + LoggerFactory.getLogger(VolumeProcessTemplate.class); /** @@ -64,24 +74,30 @@ public Response handleCall(String volume, Request request, UriInfo info, HttpHeaders headers) throws OzoneException { String reqID = OzoneUtils.getRequestID(); String hostName = OzoneUtils.getHostName(); + MDC.put(OZONE_COMPONENT, "ozone"); + MDC.put(OZONE_REQUEST, reqID); + UserArgs userArgs = null; try { - + userArgs = new UserArgs(reqID, hostName, request, info, headers); OzoneUtils.validate(request, headers, reqID, volume, hostName); // we use the same logic for both bucket and volume names OzoneUtils.verifyBucketName(volume); UserAuth auth = UserHandlerBuilder.getAuthHandler(); - UserArgs userArgs = new UserArgs(reqID, hostName, request, info, headers); userArgs.setUserName(auth.getUser(userArgs)); + MDC.put(OZONE_USER, userArgs.getUserName()); VolumeArgs args = new VolumeArgs(volume, userArgs); - return doProcess(args); + MDC.put(OZONE_RESOURCE, args.getResourceName()); + Response response = doProcess(args); + LOG.info("Success"); + MDC.clear(); + return response; + } catch (IllegalArgumentException ex) { - OzoneException exp = ErrorTable - .newError(ErrorTable.INVALID_VOLUME_NAME, reqID, volume, hostName); - exp.setMessage(ex.getMessage()); - throw exp; + LOG.debug("illegal argument. {}", ex); + throw ErrorTable.newError(ErrorTable.INVALID_VOLUME_NAME, userArgs, ex); } catch (IOException ex) { handleIOException(volume, reqID, hostName, ex); } @@ -142,6 +158,7 @@ private void handleIOException(String volume, String reqID, String hostName, exp.setMessage(fsExp.getMessage()); } } + LOG.debug("IOException: {}", exp); throw exp; } @@ -158,6 +175,7 @@ void setQuotaArgs(VolumeArgs args, String quota) throws OzoneException { try { args.setQuota(quota); } catch (IllegalArgumentException ex) { + LOG.debug("Malformed Quota: {}", ex); throw ErrorTable.newError(ErrorTable.MALFORMED_QUOTA, args, ex); } } @@ -227,7 +245,9 @@ Response getVolumesByUser(VolumeArgs args) throws OzoneException { args.getRequest(), args.getUri(), args.getHeaders()); return getVolumesByUser(user); } catch (IOException ex) { - OzoneException exp = ErrorTable.newError(ErrorTable.SERVER_ERROR, args); + LOG.debug("unable to get the volume list for the user. Ex: {}", ex); + OzoneException exp = ErrorTable.newError(ErrorTable.SERVER_ERROR, + args, ex); exp.setMessage("unable to get the volume list for the user"); throw exp; } @@ -242,20 +262,19 @@ Response getVolumesByUser(VolumeArgs args) throws OzoneException { * @throws OzoneException */ Response getBucketsInVolume(VolumeArgs args) throws OzoneException { - String requestID = OzoneUtils.getRequestID(); - String hostName = OzoneUtils.getHostName(); try { - UserAuth auth = UserHandlerBuilder.getAuthHandler(); - // TODO : Check for ACLS access. + // UserAuth auth = UserHandlerBuilder.getAuthHandler(); + // TODO : Check ACLS. StorageHandler fs = StorageHandlerBuilder.getStorageHandler(); ListBuckets bucketList = fs.listBuckets(args); return OzoneUtils.getResponse(args, HTTP_OK, bucketList.toJsonString()); } catch (IOException ex) { + LOG.debug("unable to get the bucket list for the specified volume." + + " Ex: {}", ex); OzoneException exp = - ErrorTable.newError(ErrorTable.SERVER_ERROR, requestID, "", hostName); + ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex); exp.setMessage("unable to get the bucket list for the specified volume."); throw exp; - } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java index fb0a7a6460..80f02d6fbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneConsts.java @@ -47,6 +47,13 @@ public final class OzoneConsts { "EEE, dd MMM yyyy HH:mm:ss zzz"; public static final String OZONE_TIME_ZONE = "GMT"; + public static final String OZONE_COMPONENT = "component"; + public static final String OZONE_FUNCTION = "function"; + public static final String OZONE_RESOURCE = "resource"; + public static final String OZONE_USER = "user"; + public static final String OZONE_REQUEST = "request"; + + /** * Supports Bucket Versioning. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties index 7378846711..4d6967abe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties @@ -47,3 +47,27 @@ log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n log4j.appender.DNMETRICSRFA.MaxBackupIndex=1 log4j.appender.DNMETRICSRFA.MaxFileSize=64MB + +# +# Add a logger for ozone that is separate from the Datanode. +# +log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE + +# Do not log into datanode logs. Remove this line to have single log. +log4j.additivity.org.apache.hadoop.ozone=false + +# For development purposes, log both to console and log file. +log4j.appender.OZONE=org.apache.log4j.ConsoleAppender +log4j.appender.OZONE.Threshold=info +log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout +log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ + %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n + +# Real ozone logger that writes to ozone.log +log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender +log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log +log4j.appender.FILE.Threshold=debug +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ + (%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ + %m%n