diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1329442605..2c45017eb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -621,6 +621,8 @@ Release 2.6.0 - UNRELEASED HDFS-7025. HDFS Credential Provider related Unit Test Failure. (Xiaoyu Yao via cnauroth) + HDFS-7005. DFS input streams do not timeout. + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index e4215f0602..27abfb897e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3015,6 +3015,7 @@ public Peer newConnectedPeer(InetSocketAddress addr, dfsClientConf.socketTimeout); peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this, blockToken, datanodeId); + peer.setReadTimeout(dfsClientConf.socketTimeout); success = true; return peer; } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index b71cc32fb8..da81d2fdd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -31,6 +31,9 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.SocketTimeoutException; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -60,6 +63,7 @@ import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -69,6 +73,7 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; +import org.junit.Assert; import org.junit.Test; import org.mockito.InOrder; import org.mockito.Mockito; @@ -961,4 +966,36 @@ public void testListFiles() throws IOException { cluster.shutdown(); } } + + + @Test(timeout=10000) + public void testDFSClientPeerTimeout() throws IOException { + final int timeout = 1000; + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); + + // only need cluster to create a dfs client to get a peer + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // use a dummy socket to ensure the read timesout + ServerSocket socket = new ServerSocket(0); + Peer peer = dfs.getClient().newConnectedPeer( + (InetSocketAddress) socket.getLocalSocketAddress(), null, null); + long start = Time.now(); + try { + peer.getInputStream().read(); + Assert.fail("should timeout"); + } catch (SocketTimeoutException ste) { + long delta = Time.now() - start; + Assert.assertTrue("timedout too soon", delta >= timeout*0.9); + Assert.assertTrue("timedout too late", delta <= timeout*1.1); + } catch (Throwable t) { + Assert.fail("wrong exception:"+t); + } + } finally { + cluster.shutdown(); + } + } }