HDFS-73. DFSOutputStream does not close all the sockets. Contributed by Uma Maheswara Rao G

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1157232 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2011-08-12 19:57:15 +00:00
parent 7d612e9325
commit 504b801ca0
2 changed files with 24 additions and 5 deletions

View File

@ -964,6 +964,9 @@ Trunk (unreleased changes)
HDFS-2240. Fix a deadlock in LeaseRenewer by enforcing lock acquisition HDFS-2240. Fix a deadlock in LeaseRenewer by enforcing lock acquisition
ordering. (szetszwo) ordering. (szetszwo)
HDFS-73. DFSOutputStream does not close all the sockets.
(Uma Maheswara Rao G via eli)
BREAKDOWN OF HDFS-1073 SUBTASKS BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts. HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -36,7 +36,6 @@
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -63,7 +62,6 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -606,6 +604,7 @@ private void closeStream() {
try { try {
blockStream.close(); blockStream.close();
} catch (IOException e) { } catch (IOException e) {
setLastException(e);
} finally { } finally {
blockStream = null; blockStream = null;
} }
@ -614,10 +613,20 @@ private void closeStream() {
try { try {
blockReplyStream.close(); blockReplyStream.close();
} catch (IOException e) { } catch (IOException e) {
setLastException(e);
} finally { } finally {
blockReplyStream = null; blockReplyStream = null;
} }
} }
if (null != s) {
try {
s.close();
} catch (IOException e) {
setLastException(e);
} finally {
s = null;
}
}
} }
// //
@ -1003,16 +1012,20 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
persistBlocks.set(true); persistBlocks.set(true);
boolean result = false; boolean result = false;
DataOutputStream out = null;
try { try {
assert null == s : "Previous socket unclosed";
s = createSocketForPipeline(nodes[0], nodes.length, dfsClient); s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length); long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
// //
// Xmit header info to datanode // Xmit header info to datanode
// //
DataOutputStream out = new DataOutputStream(new BufferedOutputStream( out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout), NetUtils.getOutputStream(s, writeTimeout),
FSConstants.SMALL_BUFFER_SIZE)); FSConstants.SMALL_BUFFER_SIZE));
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
blockReplyStream = new DataInputStream(NetUtils.getInputStream(s)); blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
// send the request // send the request
@ -1038,7 +1051,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
+ firstBadLink); + firstBadLink);
} }
} }
assert null == blockStream : "Previous blockStream unclosed";
blockStream = out; blockStream = out;
result = true; // success result = true; // success
@ -1059,12 +1072,15 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
} }
hasError = true; hasError = true;
setLastException(ie); setLastException(ie);
blockReplyStream = null;
result = false; // error result = false; // error
} finally { } finally {
if (!result) { if (!result) {
IOUtils.closeSocket(s); IOUtils.closeSocket(s);
s = null; s = null;
IOUtils.closeStream(out);
out = null;
IOUtils.closeStream(blockReplyStream);
blockReplyStream = null;
} }
} }
return result; return result;