diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 30188849bb..0d751bbc41 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -907,6 +907,18 @@
Socket connection timeout in milliseconds.
+
+ fs.s3a.socket.send.buffer
+ 8192
+ Socket send buffer hint to amazon connector. Represented in bytes.
+
+
+
+ fs.s3a.socket.recv.buffer
+ 8192
+ Socket receive buffer hint to amazon connector. Represented in bytes.
+
+
fs.s3a.paging.maximum
5000
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 3f6f3474c7..612b64873b 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -82,6 +82,14 @@ private Constants() {
public static final String SOCKET_TIMEOUT = "fs.s3a.connection.timeout";
public static final int DEFAULT_SOCKET_TIMEOUT = 200000;
+ // socket send buffer to be used in Amazon client
+ public static final String SOCKET_SEND_BUFFER = "fs.s3a.socket.send.buffer";
+ public static final int DEFAULT_SOCKET_SEND_BUFFER = 8 * 1024;
+
+ // socket send buffer to be used in Amazon client
+ public static final String SOCKET_RECV_BUFFER = "fs.s3a.socket.recv.buffer";
+ public static final int DEFAULT_SOCKET_RECV_BUFFER = 8 * 1024;
+
// number of records to get while paging through a directory listing
public static final String MAX_PAGING_KEYS = "fs.s3a.paging.maximum";
public static final int DEFAULT_MAX_PAGING_KEYS = 5000;
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 513b57c889..10595e2ef7 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -163,6 +163,11 @@ public void initialize(URI name, Configuration conf) throws IOException {
DEFAULT_ESTABLISH_TIMEOUT, 0));
awsConf.setSocketTimeout(intOption(conf, SOCKET_TIMEOUT,
DEFAULT_SOCKET_TIMEOUT, 0));
+ int sockSendBuffer = intOption(conf, SOCKET_SEND_BUFFER,
+ DEFAULT_SOCKET_SEND_BUFFER, 2048);
+ int sockRecvBuffer = intOption(conf, SOCKET_RECV_BUFFER,
+ DEFAULT_SOCKET_RECV_BUFFER, 2048);
+ awsConf.setSocketBufferSizeHints(sockSendBuffer, sockRecvBuffer);
String signerOverride = conf.getTrimmed(SIGNING_ALGORITHM, "");
if (!signerOverride.isEmpty()) {
LOG.debug("Signer override = {}", signerOverride);
diff --git a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 2b64754e66..01a2bae1a2 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -503,6 +503,18 @@ this capability.
which each use a thread from the threadpool.
+
+ fs.s3a.socket.send.buffer
+ 8192
+ Socket send buffer hint to amazon connector. Represented in bytes.
+
+
+
+ fs.s3a.socket.recv.buffer
+ 8192
+ Socket receive buffer hint to amazon connector. Represented in bytes.
+
+
fs.s3a.threads.keepalivetime
60
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3AInputStreamPerformance.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3AInputStreamPerformance.java
index 752e37433e..bddd8e26c8 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3AInputStreamPerformance.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/TestS3AInputStreamPerformance.java
@@ -44,6 +44,7 @@
import java.io.IOException;
import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
+import static org.apache.hadoop.fs.s3a.Constants.*;
/**
* Look at the performance of S3a operations.
@@ -71,6 +72,8 @@ public class TestS3AInputStreamPerformance extends S3AScaleTestBase {
@Before
public void openFS() throws IOException {
Configuration conf = getConf();
+ conf.setInt(SOCKET_SEND_BUFFER, 16 * 1024);
+ conf.setInt(SOCKET_RECV_BUFFER, 16 * 1024);
String testFile = conf.getTrimmed(KEY_CSVTEST_FILE, DEFAULT_CSVTEST_FILE);
if (testFile.isEmpty()) {
assumptionMessage = "Empty test property: " + KEY_CSVTEST_FILE;