HDFS-6123. Do not log stack trace for ReplicaAlreadyExistsException and SocketTimeoutException.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1579396 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-03-19 21:04:21 +00:00
parent 234edcadd5
commit cfb468332e
3 changed files with 26 additions and 9 deletions

View File

@ -421,6 +421,9 @@ Release 2.4.0 - UNRELEASED
HDFS-6068. Disallow snapshot names that are also invalid directory names. HDFS-6068. Disallow snapshot names that are also invalid directory names.
(sathish via szetszwo) (sathish via szetszwo)
HDFS-6123. Do not log stack trace for ReplicaAlreadyExistsException and
SocketTimeoutException. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery

View File

@ -560,7 +560,11 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out,
* part of a block and then decides not to read the rest (but leaves * part of a block and then decides not to read the rest (but leaves
* the socket open). * the socket open).
*/ */
LOG.info("exception: ", e); if (LOG.isTraceEnabled()) {
LOG.trace("Failed to send data:", e);
} else {
LOG.info("Failed to send data: " + e);
}
} else { } else {
/* Exception while writing to the client. Connection closure from /* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about * the other end is mostly the case and we do not care much about

View File

@ -18,12 +18,12 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_INVALID; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_INVALID;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_UNSUPPORTED; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_UNSUPPORTED;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS; import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
import static org.apache.hadoop.util.Time.now;
import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT; import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
import static org.apache.hadoop.util.Time.now;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.BufferedOutputStream; import java.io.BufferedOutputStream;
@ -43,7 +43,6 @@
import java.util.Arrays; import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.ShortCircuitShm.SlotId; import org.apache.hadoop.hdfs.ShortCircuitShm.SlotId;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
@ -51,9 +50,9 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op; import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver; import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
@ -231,10 +230,21 @@ public void run() {
} while ((peer != null) && } while ((peer != null) &&
(!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0)); (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) { } catch (Throwable t) {
LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " + String s = datanode.getDisplayName() + ":DataXceiver error processing "
((op == null) ? "unknown" : op.name()) + " operation " + + ((op == null) ? "unknown" : op.name()) + " operation "
" src: " + remoteAddress + + " src: " + remoteAddress + " dst: " + localAddress;
" dest: " + localAddress, t); if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// For WRITE_BLOCK, it is okay if the replica already exists since
// client and replication may write the same block to the same datanode
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info(s + "; " + t);
}
} else {
LOG.error(s, t);
}
} finally { } finally {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "