HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.

This commit is contained in:
Kihwal Lee 2014-09-08 14:41:44 -05:00
parent df8c84cba8
commit 6a84f88c11
3 changed files with 40 additions and 0 deletions

View File

@ -621,6 +621,8 @@ Release 2.6.0 - UNRELEASED
HDFS-7025. HDFS Credential Provider related Unit Test Failure. HDFS-7025. HDFS Credential Provider related Unit Test Failure.
(Xiaoyu Yao via cnauroth) (Xiaoyu Yao via cnauroth)
HDFS-7005. DFS input streams do not timeout.
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -3015,6 +3015,7 @@ public Peer newConnectedPeer(InetSocketAddress addr,
dfsClientConf.socketTimeout); dfsClientConf.socketTimeout);
peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this, peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
blockToken, datanodeId); blockToken, datanodeId);
peer.setReadTimeout(dfsClientConf.socketTimeout);
success = true; success = true;
return peer; return peer;
} finally { } finally {

View File

@ -31,6 +31,9 @@
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
@ -60,6 +63,7 @@
import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@ -69,6 +73,7 @@
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mockito.InOrder; import org.mockito.InOrder;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -961,4 +966,36 @@ public void testListFiles() throws IOException {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Test(timeout=10000)
public void testDFSClientPeerTimeout() throws IOException {
final int timeout = 1000;
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
// only need cluster to create a dfs client to get a peer
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// use a dummy socket to ensure the read timesout
ServerSocket socket = new ServerSocket(0);
Peer peer = dfs.getClient().newConnectedPeer(
(InetSocketAddress) socket.getLocalSocketAddress(), null, null);
long start = Time.now();
try {
peer.getInputStream().read();
Assert.fail("should timeout");
} catch (SocketTimeoutException ste) {
long delta = Time.now() - start;
Assert.assertTrue("timedout too soon", delta >= timeout*0.9);
Assert.assertTrue("timedout too late", delta <= timeout*1.1);
} catch (Throwable t) {
Assert.fail("wrong exception:"+t);
}
} finally {
cluster.shutdown();
}
}
} }