HADOOP-18546. ABFS. disable purging list of in progress reads in abfs stream close() (#5176)

This addresses HADOOP-18521, "ABFS ReadBufferManager buffer sharing
across concurrent HTTP requests" by not trying to cancel
in progress reads.

It supercedes HADOOP-18528, which disables the prefetching.
If that patch is applied *after* this one, prefetching
will be disabled.

As well as changing the default value in the code,
core-default.xml is updated to set
fs.azure.enable.readahead = true

As a result, if Configuration.get("fs.azure.enable.readahead")
returns a non-null value, then it can be inferred that
it was set in or core-default.xml (the fix is present)
or in core-site.xml (someone asked for it).

Contributed by Pranav Saxena.
This commit is contained in:
Pranav Saxena 2022-12-07 12:15:45 -08:00 committed by GitHub
parent 2e88096266
commit c67c2b7569
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 78 additions and 10 deletions

View File

@ -2168,9 +2168,8 @@ The switch to turn S3A auditing on or off.
<property> <property>
<name>fs.azure.enable.readahead</name> <name>fs.azure.enable.readahead</name>
<value>false</value> <value>true</value>
<description>Disable readahead/prefetching in AbfsInputStream. <description>Enabled readahead/prefetching in AbfsInputStream.</description>
See HADOOP-18521</description>
</property> </property>
<property> <property>

View File

@ -109,7 +109,7 @@ public final class FileSystemConfigurations {
public static final boolean DEFAULT_ABFS_LATENCY_TRACK = false; public static final boolean DEFAULT_ABFS_LATENCY_TRACK = false;
public static final long DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS = 120; public static final long DEFAULT_SAS_TOKEN_RENEW_PERIOD_FOR_STREAMS_IN_SECONDS = 120;
public static final boolean DEFAULT_ENABLE_READAHEAD = false; public static final boolean DEFAULT_ENABLE_READAHEAD = true;
public static final String DEFAULT_FS_AZURE_USER_AGENT_PREFIX = EMPTY_STRING; public static final String DEFAULT_FS_AZURE_USER_AGENT_PREFIX = EMPTY_STRING;
public static final String DEFAULT_VALUE_UNKNOWN = "UNKNOWN"; public static final String DEFAULT_VALUE_UNKNOWN = "UNKNOWN";

View File

@ -35,7 +35,7 @@ public class AbfsInputStreamContext extends AbfsStreamContext {
private boolean tolerateOobAppends; private boolean tolerateOobAppends;
private boolean isReadAheadEnabled = false; private boolean isReadAheadEnabled = true;
private boolean alwaysReadBufferSize; private boolean alwaysReadBufferSize;

View File

@ -544,7 +544,6 @@ public synchronized void purgeBuffersForStream(AbfsInputStream stream) {
LOGGER.debug("Purging stale buffers for AbfsInputStream {} ", stream); LOGGER.debug("Purging stale buffers for AbfsInputStream {} ", stream);
readAheadQueue.removeIf(readBuffer -> readBuffer.getStream() == stream); readAheadQueue.removeIf(readBuffer -> readBuffer.getStream() == stream);
purgeList(stream, completedReadList); purgeList(stream, completedReadList);
purgeList(stream, inProgressList);
} }
/** /**
@ -642,4 +641,9 @@ void testMimicFullUseAndAddFailedBuffer(ReadBuffer buf) {
freeList.clear(); freeList.clear();
completedReadList.add(buf); completedReadList.add(buf);
} }
@VisibleForTesting
int getNumBuffers() {
return NUM_BUFFERS;
}
} }

View File

@ -34,7 +34,6 @@
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_AHEAD_RANGE; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_AHEAD_RANGE;
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.AZURE_READ_BUFFER_SIZE;
import static org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys.FS_AZURE_ENABLE_READAHEAD;
import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE; import static org.apache.hadoop.fs.azurebfs.constants.FileSystemConfigurations.MIN_BUFFER_SIZE;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile; import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@ -69,7 +68,6 @@ protected Configuration createConfiguration() {
protected AbstractFSContract createContract(final Configuration conf) { protected AbstractFSContract createContract(final Configuration conf) {
conf.setInt(AZURE_READ_AHEAD_RANGE, MIN_BUFFER_SIZE); conf.setInt(AZURE_READ_AHEAD_RANGE, MIN_BUFFER_SIZE);
conf.setInt(AZURE_READ_BUFFER_SIZE, MIN_BUFFER_SIZE); conf.setInt(AZURE_READ_BUFFER_SIZE, MIN_BUFFER_SIZE);
conf.setBoolean(FS_AZURE_ENABLE_READAHEAD, true);
return new AbfsFileSystemContract(conf, isSecure); return new AbfsFileSystemContract(conf, isSecure);
} }

View File

@ -82,6 +82,12 @@ public class TestAbfsInputStream extends
REDUCED_READ_BUFFER_AGE_THRESHOLD * 10; // 30 sec REDUCED_READ_BUFFER_AGE_THRESHOLD * 10; // 30 sec
private static final int ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE = 16 * ONE_MB; private static final int ALWAYS_READ_BUFFER_SIZE_TEST_FILE_SIZE = 16 * ONE_MB;
@Override
public void teardown() throws Exception {
super.teardown();
ReadBufferManager.getBufferManager().testResetReadBufferManager();
}
private AbfsRestOperation getMockRestOp() { private AbfsRestOperation getMockRestOp() {
AbfsRestOperation op = mock(AbfsRestOperation.class); AbfsRestOperation op = mock(AbfsRestOperation.class);
AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class); AbfsHttpOperation httpOp = mock(AbfsHttpOperation.class);
@ -106,7 +112,6 @@ private AbfsClient getMockAbfsClient() {
private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient, private AbfsInputStream getAbfsInputStream(AbfsClient mockAbfsClient,
String fileName) throws IOException { String fileName) throws IOException {
AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1);
inputStreamContext.isReadAheadEnabled(true);
// Create AbfsInputStream with the client instance // Create AbfsInputStream with the client instance
AbfsInputStream inputStream = new AbfsInputStream( AbfsInputStream inputStream = new AbfsInputStream(
mockAbfsClient, mockAbfsClient,
@ -132,7 +137,6 @@ public AbfsInputStream getAbfsInputStream(AbfsClient abfsClient,
boolean alwaysReadBufferSize, boolean alwaysReadBufferSize,
int readAheadBlockSize) throws IOException { int readAheadBlockSize) throws IOException {
AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1); AbfsInputStreamContext inputStreamContext = new AbfsInputStreamContext(-1);
inputStreamContext.isReadAheadEnabled(true);
// Create AbfsInputStream with the client instance // Create AbfsInputStream with the client instance
AbfsInputStream inputStream = new AbfsInputStream( AbfsInputStream inputStream = new AbfsInputStream(
abfsClient, abfsClient,
@ -495,6 +499,69 @@ public void testSuccessfulReadAhead() throws Exception {
checkEvictedStatus(inputStream, 0, true); checkEvictedStatus(inputStream, 0, true);
} }
/**
* This test expects InProgressList is not purged by the inputStream close.
*/
@Test
public void testStreamPurgeDuringReadAheadCallExecuting() throws Exception {
AbfsClient client = getMockAbfsClient();
AbfsRestOperation successOp = getMockRestOp();
final Long serverCommunicationMockLatency = 3_000L;
final Long readBufferTransferToInProgressProbableTime = 1_000L;
final Integer readBufferQueuedCount = 3;
Mockito.doAnswer(invocationOnMock -> {
//sleeping thread to mock the network latency from client to backend.
Thread.sleep(serverCommunicationMockLatency);
return successOp;
})
.when(client)
.read(any(String.class), any(Long.class), any(byte[].class),
any(Integer.class), any(Integer.class), any(String.class),
any(String.class), any(TracingContext.class));
final ReadBufferManager readBufferManager
= ReadBufferManager.getBufferManager();
final int readBufferTotal = readBufferManager.getNumBuffers();
final int expectedFreeListBufferCount = readBufferTotal
- readBufferQueuedCount;
try (AbfsInputStream inputStream = getAbfsInputStream(client,
"testSuccessfulReadAhead.txt")) {
// As this is try-with-resources block, the close() method of the created
// abfsInputStream object shall be called on the end of the block.
queueReadAheads(inputStream);
//Sleeping to give ReadBufferWorker to pick the readBuffers for processing.
Thread.sleep(readBufferTransferToInProgressProbableTime);
Assertions.assertThat(readBufferManager.getInProgressCopiedList())
.describedAs(String.format("InProgressList should have %d elements",
readBufferQueuedCount))
.hasSize(readBufferQueuedCount);
Assertions.assertThat(readBufferManager.getFreeListCopy())
.describedAs(String.format("FreeList should have %d elements",
expectedFreeListBufferCount))
.hasSize(expectedFreeListBufferCount);
Assertions.assertThat(readBufferManager.getCompletedReadListCopy())
.describedAs("CompletedList should have 0 elements")
.hasSize(0);
}
Assertions.assertThat(readBufferManager.getInProgressCopiedList())
.describedAs(String.format("InProgressList should have %d elements",
readBufferQueuedCount))
.hasSize(readBufferQueuedCount);
Assertions.assertThat(readBufferManager.getFreeListCopy())
.describedAs(String.format("FreeList should have %d elements",
expectedFreeListBufferCount))
.hasSize(expectedFreeListBufferCount);
Assertions.assertThat(readBufferManager.getCompletedReadListCopy())
.describedAs("CompletedList should have 0 elements")
.hasSize(0);
}
/** /**
* This test expects ReadAheadManager to throw exception if the read ahead * This test expects ReadAheadManager to throw exception if the read ahead
* thread had failed within the last thresholdAgeMilliseconds. * thread had failed within the last thresholdAgeMilliseconds.