HADOOP-15541. [s3a] Shouldn't try to drain stream before aborting

connection in case of timeout.
This commit is contained in:
Sean Mackrory 2018-07-05 13:52:00 -06:00
parent 705e2c1f7c
commit d503f65b66

View File

@ -36,6 +36,7 @@
import java.io.EOFException; import java.io.EOFException;
import java.io.IOException; import java.io.IOException;
import java.net.SocketTimeoutException;
import static org.apache.commons.lang3.StringUtils.isNotEmpty; import static org.apache.commons.lang3.StringUtils.isNotEmpty;
@ -155,11 +156,11 @@ private void setInputPolicy(S3AInputPolicy inputPolicy) {
* @throws IOException on any failure to open the object * @throws IOException on any failure to open the object
*/ */
@Retries.OnceTranslated @Retries.OnceTranslated
private synchronized void reopen(String reason, long targetPos, long length) private synchronized void reopen(String reason, long targetPos, long length,
throws IOException { boolean forceAbort) throws IOException {
if (wrappedStream != null) { if (wrappedStream != null) {
closeStream("reopen(" + reason + ")", contentRangeFinish, false); closeStream("reopen(" + reason + ")", contentRangeFinish, forceAbort);
} }
contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos, contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
@ -324,7 +325,7 @@ private void lazySeek(long targetPos, long len) throws IOException {
//re-open at specific location if needed //re-open at specific location if needed
if (wrappedStream == null) { if (wrappedStream == null) {
reopen("read from new offset", targetPos, len); reopen("read from new offset", targetPos, len, false);
} }
}); });
} }
@ -367,8 +368,11 @@ public synchronized int read() throws IOException {
b = wrappedStream.read(); b = wrappedStream.read();
} catch (EOFException e) { } catch (EOFException e) {
return -1; return -1;
} catch (SocketTimeoutException e) {
onReadFailure(e, 1, true);
b = wrappedStream.read();
} catch (IOException e) { } catch (IOException e) {
onReadFailure(e, 1); onReadFailure(e, 1, false);
b = wrappedStream.read(); b = wrappedStream.read();
} }
return b; return b;
@ -393,12 +397,13 @@ public synchronized int read() throws IOException {
* @throws IOException any exception thrown on the re-open attempt. * @throws IOException any exception thrown on the re-open attempt.
*/ */
@Retries.OnceTranslated @Retries.OnceTranslated
private void onReadFailure(IOException ioe, int length) throws IOException { private void onReadFailure(IOException ioe, int length, boolean forceAbort)
throws IOException {
LOG.info("Got exception while trying to read from stream {}" + LOG.info("Got exception while trying to read from stream {}" +
" trying to recover: " + ioe, uri); " trying to recover: " + ioe, uri);
streamStatistics.readException(); streamStatistics.readException();
reopen("failure recovery", pos, length); reopen("failure recovery", pos, length, forceAbort);
} }
/** /**
@ -446,8 +451,11 @@ public synchronized int read(byte[] buf, int off, int len)
} catch (EOFException e) { } catch (EOFException e) {
// the base implementation swallows EOFs. // the base implementation swallows EOFs.
return -1; return -1;
} catch (SocketTimeoutException e) {
onReadFailure(e, len, true);
bytes = wrappedStream.read(buf, off, len);
} catch (IOException e) { } catch (IOException e) {
onReadFailure(e, len); onReadFailure(e, len, false);
bytes= wrappedStream.read(buf, off, len); bytes= wrappedStream.read(buf, off, len);
} }
return bytes; return bytes;