diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 4923a5010a..6629a83b36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3127,6 +3127,7 @@ public Peer newConnectedPeer(InetSocketAddress addr, peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this, blockToken, datanodeId); peer.setReadTimeout(socketTimeout); + peer.setWriteTimeout(socketTimeout); success = true; return peer; } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index f9da472dcf..0b77210a76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -1132,10 +1132,9 @@ public void testListFiles() throws IOException { cluster.shutdown(); } } - - + @Test(timeout=10000) - public void testDFSClientPeerTimeout() throws IOException { + public void testDFSClientPeerReadTimeout() throws IOException { final int timeout = 1000; final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); @@ -1152,11 +1151,11 @@ public void testDFSClientPeerTimeout() throws IOException { long start = Time.now(); try { peer.getInputStream().read(); - Assert.fail("should timeout"); + Assert.fail("read should timeout"); } catch (SocketTimeoutException ste) { long delta = Time.now() - start; - Assert.assertTrue("timedout too soon", delta >= timeout*0.9); - Assert.assertTrue("timedout too late", delta <= timeout*1.1); + Assert.assertTrue("read timedout too soon", delta >= timeout*0.9); + Assert.assertTrue("read timedout too late", delta <= timeout*1.1); } catch (Throwable t) { Assert.fail("wrong exception:"+t); } @@ -1178,4 +1177,36 @@ public void testGetServerDefaults() throws IOException { cluster.shutdown(); } } + + @Test(timeout=10000) + public void testDFSClientPeerWriteTimeout() throws IOException { + final int timeout = 1000; + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); + + // only need cluster to create a dfs client to get a peer + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Write 1 MB to a dummy socket to ensure the write times out + ServerSocket socket = new ServerSocket(0); + Peer peer = dfs.getClient().newConnectedPeer( + (InetSocketAddress) socket.getLocalSocketAddress(), null, null); + long start = Time.now(); + try { + byte[] buf = new byte[1024 * 1024]; + peer.getOutputStream().write(buf); + Assert.fail("write should timeout"); + } catch (SocketTimeoutException ste) { + long delta = Time.now() - start; + Assert.assertTrue("write timedout too soon", delta >= timeout * 0.9); + Assert.assertTrue("write timedout too late", delta <= timeout * 1.1); + } catch (Throwable t) { + Assert.fail("wrong exception:" + t); + } + } finally { + cluster.shutdown(); + } + } }