HDFS-4486. Add log category for long-running DFSClient notices. Contributed by Zhe Zhang.

This commit is contained in:
Colin Patrick Mccabe 2014-08-27 13:39:40 -07:00
parent d805cc27a9
commit 225569ece2
8 changed files with 53 additions and 20 deletions

View File

@ -32,6 +32,7 @@
import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
/** /**
* OpenSSL cipher using JNI. * OpenSSL cipher using JNI.
@ -82,6 +83,7 @@ static int get(String padding) throws NoSuchPaddingException {
String loadingFailure = null; String loadingFailure = null;
try { try {
if (!NativeCodeLoader.buildSupportsOpenssl()) { if (!NativeCodeLoader.buildSupportsOpenssl()) {
PerformanceAdvisory.LOG.debug("Build does not support openssl");
loadingFailure = "build does not support openssl."; loadingFailure = "build does not support openssl.";
} else { } else {
initIDs(); initIDs();

View File

@ -25,6 +25,7 @@
import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.util.PerformanceAdvisory;
/** /**
* OpenSSL secure random using JNI. * OpenSSL secure random using JNI.
@ -67,6 +68,8 @@ public static boolean isNativeCodeLoaded() {
public OpensslSecureRandom() { public OpensslSecureRandom() {
if (!nativeEnabled) { if (!nativeEnabled) {
PerformanceAdvisory.LOG.debug("Build does not support openssl, " +
"falling back to Java SecureRandom.");
fallback = new java.security.SecureRandom(); fallback = new java.security.SecureRandom();
} }
} }

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException; import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -196,7 +197,7 @@ public boolean verifyCanMlock() {
// This can happen if the user has an older version of libhadoop.so // This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO // installed - in this case we can continue without native IO
// after warning // after warning
LOG.error("Unable to initialize NativeIO libraries", t); PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
} }
} }
} }
@ -574,7 +575,7 @@ public static boolean access(String path, AccessRight desiredAccess)
// This can happen if the user has an older version of libhadoop.so // This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO // installed - in this case we can continue without native IO
// after warning // after warning
LOG.error("Unable to initialize NativeIO libraries", t); PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
} }
} }
} }
@ -593,7 +594,7 @@ public static boolean access(String path, AccessRight desiredAccess)
// This can happen if the user has an older version of libhadoop.so // This can happen if the user has an older version of libhadoop.so
// installed - in this case we can continue without native IO // installed - in this case we can continue without native IO
// after warning // after warning
LOG.error("Unable to initialize NativeIO libraries", t); PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
} }
} }
} }

View File

@ -24,6 +24,7 @@
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.PerformanceAdvisory;
public class JniBasedUnixGroupsMappingWithFallback implements public class JniBasedUnixGroupsMappingWithFallback implements
GroupMappingServiceProvider { GroupMappingServiceProvider {
@ -37,7 +38,7 @@ public JniBasedUnixGroupsMappingWithFallback() {
if (NativeCodeLoader.isNativeCodeLoaded()) { if (NativeCodeLoader.isNativeCodeLoaded()) {
this.impl = new JniBasedUnixGroupsMapping(); this.impl = new JniBasedUnixGroupsMapping();
} else { } else {
LOG.debug("Falling back to shell based"); PerformanceAdvisory.LOG.debug("Falling back to shell based");
this.impl = new ShellBasedUnixGroupsMapping(); this.impl = new ShellBasedUnixGroupsMapping();
} }
if (LOG.isDebugEnabled()){ if (LOG.isDebugEnabled()){

View File

@ -0,0 +1,24 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class PerformanceAdvisory {
public static final Log LOG = LogFactory.getLog(PerformanceAdvisory.class);
}

View File

@ -514,6 +514,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
per volume. (Arpit Agarwal) per volume. (Arpit Agarwal)
HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang
via Colin Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -54,6 +54,7 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -343,10 +344,9 @@ private BlockReader getLegacyBlockReaderLocal() throws IOException {
return null; return null;
} }
if (clientContext.getDisableLegacyBlockReaderLocal()) { if (clientContext.getDisableLegacyBlockReaderLocal()) {
if (LOG.isTraceEnabled()) { PerformanceAdvisory.LOG.debug(this + ": can't construct " +
LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " + "BlockReaderLocalLegacy because " +
"disableLegacyBlockReaderLocal is set."); "disableLegacyBlockReaderLocal is set.");
}
return null; return null;
} }
IOException ioe = null; IOException ioe = null;
@ -385,10 +385,8 @@ private BlockReader getBlockReaderLocal() throws InvalidToken {
getPathInfo(inetSocketAddress, conf); getPathInfo(inetSocketAddress, conf);
} }
if (!pathInfo.getPathState().getUsableForShortCircuit()) { if (!pathInfo.getPathState().getUsableForShortCircuit()) {
if (LOG.isTraceEnabled()) { PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " +
LOG.trace(this + ": " + pathInfo + " is not " + "usable for short circuit; giving up on BlockReaderLocal.");
"usable for short circuit; giving up on BlockReaderLocal.");
}
return null; return null;
} }
ShortCircuitCache cache = clientContext.getShortCircuitCache(); ShortCircuitCache cache = clientContext.getShortCircuitCache();
@ -404,8 +402,9 @@ private BlockReader getBlockReaderLocal() throws InvalidToken {
} }
if (info.getReplica() == null) { if (info.getReplica() == null) {
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {
LOG.trace(this + ": failed to get ShortCircuitReplica. " + PerformanceAdvisory.LOG.debug(this + ": failed to get " +
"Cannot construct BlockReaderLocal via " + pathInfo.getPath()); "ShortCircuitReplica. Cannot construct " +
"BlockReaderLocal via " + pathInfo.getPath());
} }
return null; return null;
} }
@ -580,11 +579,9 @@ private BlockReader getRemoteBlockReaderFromDomain() throws IOException {
getPathInfo(inetSocketAddress, conf); getPathInfo(inetSocketAddress, conf);
} }
if (!pathInfo.getPathState().getUsableForDataTransfer()) { if (!pathInfo.getPathState().getUsableForDataTransfer()) {
if (LOG.isTraceEnabled()) { PerformanceAdvisory.LOG.debug(this + ": not trying to create a " +
LOG.trace(this + ": not trying to create a remote block reader " + "remote block reader because the UNIX domain socket at " +
"because the UNIX domain socket at " + pathInfo + pathInfo + " is not usable.");
" is not usable.");
}
return null; return null;
} }
if (LOG.isTraceEnabled()) { if (LOG.isTraceEnabled()) {

View File

@ -33,6 +33,7 @@
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.cache.Cache; import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheBuilder;
import org.apache.hadoop.util.PerformanceAdvisory;
public class DomainSocketFactory { public class DomainSocketFactory {
private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class); private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class);
@ -105,7 +106,8 @@ public DomainSocketFactory(Conf conf) {
} }
if (feature == null) { if (feature == null) {
LOG.debug("Both short-circuit local reads and UNIX domain socket are disabled."); PerformanceAdvisory.LOG.debug(
"Both short-circuit local reads and UNIX domain socket are disabled.");
} else { } else {
if (conf.getDomainSocketPath().isEmpty()) { if (conf.getDomainSocketPath().isEmpty()) {
throw new HadoopIllegalArgumentException(feature + " is enabled but " throw new HadoopIllegalArgumentException(feature + " is enabled but "