HADOOP-13614. Purge some superfluous/obsolete S3 FS tests that are slowing test runs down. Contributed by Steve Loughran.

This commit is contained in:
Chris Nauroth 2016-10-26 08:27:26 -07:00
parent e90af4a89b
commit 9cad3e2350
26 changed files with 256 additions and 327 deletions

View File

@ -834,6 +834,7 @@ public static void verifyReceivedData(FileSystem fs, Path path,
long totalBytesRead = 0;
int nextExpectedNumber = 0;
NanoTimer timer = new NanoTimer();
try (InputStream inputStream = fs.open(path)) {
while (true) {
final int bytesRead = inputStream.read(testBuffer);
@ -862,6 +863,8 @@ public static void verifyReceivedData(FileSystem fs, Path path,
" bytes but only received " + totalBytesRead);
}
}
timer.end("Time to read %d bytes", expectedSize);
bandwidth(timer, expectedSize);
}
/**
@ -925,9 +928,12 @@ public static void createAndVerifyFile(FileSystem fs, Path parent, final long fi
final Path objectPath = new Path(parent, objectName);
// Write test file in a specific pattern
NanoTimer timer = new NanoTimer();
assertEquals(fileSize,
generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
assertPathExists(fs, "not created successful", objectPath);
timer.end("Time to write %d bytes", fileSize);
bandwidth(timer, fileSize);
// Now read the same file back and verify its content
try {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information

View File

@ -181,9 +181,6 @@
</includes>
<excludes>
<exclude>**/ITestJets3tNativeS3FileSystemContract.java</exclude>
<exclude>**/ITestS3ABlockingThreadPool.java</exclude>
<exclude>**/ITestS3AFileSystemContract.java</exclude>
<exclude>**/ITestS3AMiniYarnCluster.java</exclude>
<exclude>**/ITest*Root*.java</exclude>
<exclude>**/ITestS3AFileContextStatistics.java</exclude>
<include>**/ITestS3AHuge*.java</include>
@ -211,10 +208,6 @@
<!-- parallel execution. -->
<includes>
<include>**/ITestJets3tNativeS3FileSystemContract.java</include>
<include>**/ITestS3ABlockingThreadPool.java</include>
<include>**/ITestS3AFastOutputStream.java</include>
<include>**/ITestS3AFileSystemContract.java</include>
<include>**/ITestS3AMiniYarnCluster.java</include>
<include>**/ITest*Root*.java</include>
<include>**/ITestS3AFileContextStatistics.java</include>
<include>**/ITestS3AHuge*.java</include>

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.fs.contract.s3a;
import static org.apache.hadoop.fs.s3a.Constants.*;
import static org.apache.hadoop.fs.s3a.S3ATestConstants.SCALE_TEST_TIMEOUT_MILLIS;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
@ -32,6 +33,11 @@ public class ITestS3AContractDistCp extends AbstractContractDistCpTest {
private static final long MULTIPART_SETTING = MULTIPART_MIN_SIZE;
@Override
protected int getTestTimeoutMillis() {
return SCALE_TEST_TIMEOUT_MILLIS;
}
@Override
protected Configuration createConfiguration() {
Configuration newConf = super.createConfiguration();

View File

@ -21,6 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractBondedFSContract;
import org.apache.hadoop.fs.s3a.S3ATestUtils;
/**
* The contract of S3A: only enabled if the test bucket is provided.
@ -29,7 +30,6 @@ public class S3AContract extends AbstractBondedFSContract {
public static final String CONTRACT_XML = "contract/s3a.xml";
public S3AContract(Configuration conf) {
super(conf);
//insert the base features
@ -43,8 +43,6 @@ public String getScheme() {
@Override
public Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? super.getTestPath() :
new Path("/" + testUniqueForkId, "test");
return S3ATestUtils.createTestPath(super.getTestPath());
}
}

View File

@ -26,8 +26,8 @@
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.io.IOUtils;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
@ -40,6 +40,9 @@
public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
implements S3ATestConstants {
protected static final Logger LOG =
LoggerFactory.getLogger(AbstractS3ATestBase.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
@ -52,14 +55,16 @@ public void teardown() throws Exception {
IOUtils.closeStream(getFileSystem());
}
@Rule
public TestName methodName = new TestName();
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
@Override
protected int getTestTimeoutMillis() {
return S3A_TEST_TIMEOUT;
}
protected Configuration getConfiguration() {
return getContract().getConf();
}
@ -73,6 +78,17 @@ public S3AFileSystem getFileSystem() {
return (S3AFileSystem) super.getFileSystem();
}
/**
* Describe a test in the logs.
* @param text text to print
* @param args arguments to format in the printing
*/
protected void describe(String text, Object... args) {
LOG.info("\n\n{}: {}\n",
methodName.getMethodName(),
String.format(text, args));
}
/**
* Write a file, read it back, validate the dataset. Overwrites the file
* if it is present

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
/**
* Demonstrate that the threadpool blocks additional client requests if
* its queue is full (rather than throwing an exception) by initiating an
* upload consisting of 4 parts with 2 threads and 1 spot in the queue. The
* 4th part should not trigger an exception as it would with a
* non-blocking threadpool.
*/
public class ITestS3ABlockingThreadPool {
private Configuration conf;
private S3AFileSystem fs;
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
protected Path getTestPath() {
return new Path("/tests3a");
}
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(Constants.MIN_MULTIPART_THRESHOLD, 5 * 1024 * 1024);
conf.setLong(Constants.MULTIPART_SIZE, 5 * 1024 * 1024);
conf.setInt(Constants.MAX_THREADS, 2);
conf.setInt(Constants.MAX_TOTAL_TASKS, 1);
}
@After
public void tearDown() throws Exception {
if (fs != null) {
fs.delete(getTestPath(), true);
}
}
@Test
public void testRegularMultiPartUpload() throws Exception {
fs = S3ATestUtils.createTestFileSystem(conf);
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
1024);
}
@Test
public void testFastMultiPartUpload() throws Exception {
conf.setBoolean(Constants.FAST_UPLOAD, true);
conf.set(Constants.FAST_UPLOAD_BUFFER,
Constants.FAST_UPLOAD_BYTEBUFFER);
fs = S3ATestUtils.createTestFileSystem(conf);
ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 16 * 1024 *
1024);
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,16 +18,11 @@
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -38,19 +33,11 @@
/**
* S3A tests for configuring block size.
*/
public class ITestS3ABlocksize extends AbstractFSContractTestBase {
public class ITestS3ABlocksize extends AbstractS3ATestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ITestS3ABlocksize.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {

View File

@ -68,34 +68,37 @@ public class ITestS3AConfiguration {
private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AConfiguration.class);
private static final String TEST_ENDPOINT = "test.fs.s3a.endpoint";
@Rule
public Timeout testTimeout = new Timeout(30 * 60 * 1000);
public Timeout testTimeout = new Timeout(
S3ATestConstants.S3A_TEST_TIMEOUT
);
@Rule
public final TemporaryFolder tempDir = new TemporaryFolder();
/**
* Test if custom endpoint is picked up.
* <p/>
* The test expects TEST_ENDPOINT to be defined in the Configuration
* <p>
* The test expects {@link S3ATestConstants#CONFIGURATION_TEST_ENDPOINT}
* to be defined in the Configuration
* describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
* (f.i. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
* (i.e. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
* Evidently, the bucket has to be hosted in the region denoted by the
* endpoint for the test to succeed.
* <p/>
* <p>
* More info and the list of endpoint identifiers:
* http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
* @see <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">endpoint list</a>.
*
* @throws Exception
*/
@Test
public void testEndpoint() throws Exception {
conf = new Configuration();
String endpoint = conf.getTrimmed(TEST_ENDPOINT, "");
String endpoint = conf.getTrimmed(
S3ATestConstants.CONFIGURATION_TEST_ENDPOINT, "");
if (endpoint.isEmpty()) {
LOG.warn("Custom endpoint test skipped as " + TEST_ENDPOINT + "config " +
LOG.warn("Custom endpoint test skipped as " +
S3ATestConstants.CONFIGURATION_TEST_ENDPOINT + "config " +
"setting was not detected");
} else {
conf.set(Constants.ENDPOINT, endpoint);

View File

@ -22,7 +22,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import java.io.IOException;
@ -48,15 +47,9 @@ protected Configuration createConfiguration() {
}
private static final int[] SIZES = {
0, 1, 2, 3, 4, 5, 254, 255, 256, 257, 2 ^ 10 - 3, 2 ^ 11 - 2, 2 ^ 12 - 1
0, 1, 2, 3, 4, 5, 254, 255, 256, 257, 2 ^ 12 - 1
};
@Override
public void teardown() throws Exception {
super.teardown();
IOUtils.closeStream(getFileSystem());
}
@Test
public void testEncryption() throws Throwable {
for (int size: SIZES) {

View File

@ -20,7 +20,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.junit.Test;
import java.io.IOException;
@ -43,12 +42,6 @@ protected Configuration createConfiguration() {
return conf;
}
@Override
public void teardown() throws Exception {
super.teardown();
IOUtils.closeStream(getFileSystem());
}
@Test
public void testEncrypt0() throws Throwable {
writeThenReadFileToFailure(0);

View File

@ -18,13 +18,9 @@
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Test;
@ -41,15 +37,10 @@
* Test S3A Failure translation, including a functional test
* generating errors during stream IO.
*/
public class ITestS3AFailureHandling extends AbstractFSContractTestBase {
public class ITestS3AFailureHandling extends AbstractS3ATestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFailureHandling.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Test
public void testReadFileChanged() throws Throwable {
describe("overwrite a file with a shorter one during a read, seek");

View File

@ -18,13 +18,9 @@
package org.apache.hadoop.fs.s3a;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -43,7 +39,7 @@
* Use metrics to assert about the cost of file status queries.
* {@link S3AFileSystem#getFileStatus(Path)}.
*/
public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
public class ITestS3AFileOperationCost extends AbstractS3ATestBase {
private MetricDiff metadataRequests;
private MetricDiff listRequests;
@ -51,16 +47,6 @@ public class ITestS3AFileOperationCost extends AbstractFSContractTestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFileOperationCost.class);
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
@Override
public S3AFileSystem getFileSystem() {
return (S3AFileSystem) super.getFileSystem();
}
@Override
public void setup() throws Exception {
super.setup();
@ -246,7 +232,8 @@ public void testFakeDirectoryDeletion() throws Throwable {
int destDirDepth = directoriesInPath(destDir);
directoriesCreated.assertDiffEquals(state, 1);
/* TODO: uncomment once HADOOP-13222 is in
/* TODO: uncomment once HADOOP-13222 "s3a.mkdirs() to delete empty fake parent directories"
is in
deleteRequests.assertDiffEquals(state,1);
directoriesDeleted.assertDiffEquals(state,0);
fakeDirectoriesDeleted.assertDiffEquals(state,destDirDepth);

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -18,6 +18,9 @@
package org.apache.hadoop.fs.s3a;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@ -38,18 +41,44 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
protected static final Logger LOG =
LoggerFactory.getLogger(ITestS3AFileSystemContract.class);
private Path basePath;
@Rule
public TestName methodName = new TestName();
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
}
@Override
public void setUp() throws Exception {
Configuration conf = new Configuration();
fs = S3ATestUtils.createTestFileSystem(conf);
basePath = fs.makeQualified(
S3ATestUtils.createTestPath(new Path("/s3afilesystemcontract")));
super.setUp();
}
/**
* This path explicitly places all absolute paths under the per-test suite
* path directory; this allows the test to run in parallel.
* @param pathString path string as input
* @return a qualified path string.
*/
protected Path path(String pathString) {
if (pathString.startsWith("/")) {
return fs.makeQualified(new Path(basePath, pathString));
} else {
return super.path(pathString);
}
}
@Override
protected void tearDown() throws Exception {
if (fs != null) {
fs.delete(path("test"), true);
fs.delete(basePath, true);
}
super.tearDown();
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -28,9 +28,6 @@
import com.amazonaws.services.securitytoken.model.GetSessionTokenResult;
import com.amazonaws.services.securitytoken.model.Credentials;
import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
import org.apache.hadoop.fs.contract.s3a.S3AContract;
import org.apache.hadoop.fs.s3native.S3xLoginHelper;
import org.apache.hadoop.conf.Configuration;
@ -48,9 +45,7 @@
* should only be used against transient filesystems where you don't care about
* the data.
*/
public class ITestS3ATemporaryCredentials extends AbstractFSContractTestBase {
public static final String TEST_STS_ENABLED = "test.fs.s3a.sts.enabled";
public static final String TEST_STS_ENDPOINT = "test.fs.s3a.sts.endpoint";
public class ITestS3ATemporaryCredentials extends AbstractS3ATestBase {
private static final Logger LOG =
LoggerFactory.getLogger(ITestS3ATemporaryCredentials.class);
@ -60,11 +55,6 @@ public class ITestS3ATemporaryCredentials extends AbstractFSContractTestBase {
private static final long TEST_FILE_SIZE = 1024;
@Override
protected AbstractFSContract createContract(Configuration conf) {
return new S3AContract(conf);
}
/**
* Test use of STS for requesting temporary credentials.
*

View File

@ -134,14 +134,33 @@ public interface S3ATestConstants {
*/
int DEFAULT_DIRECTORY_COUNT = 2;
/**
* Default scale test timeout in seconds: {@value}.
*/
int DEFAULT_TEST_TIMEOUT = 30 * 60;
/**
* Default policy on scale tests: {@value}.
*/
boolean DEFAULT_SCALE_TESTS_ENABLED = false;
/**
* Fork ID passed down from maven if the test is running in parallel.
*/
String TEST_UNIQUE_FORK_ID = "test.unique.fork.id";
String TEST_STS_ENABLED = "test.fs.s3a.sts.enabled";
String TEST_STS_ENDPOINT = "test.fs.s3a.sts.endpoint";
/**
* Timeout in Milliseconds for standard tests: {@value}.
*/
int S3A_TEST_TIMEOUT = 10 * 60 * 1000;
/**
* Timeout in Seconds for Scale Tests: {@value}.
*/
int SCALE_TEST_TIMEOUT_SECONDS = 30 * 60;
int SCALE_TEST_TIMEOUT_MILLIS = SCALE_TEST_TIMEOUT_SECONDS * 1000;
/**
* Optional custom endpoint for S3A configuration tests.
* This does <i>not</i> set the endpoint for s3 access elsewhere.
*/
String CONFIGURATION_TEST_ENDPOINT =
"test.fs.s3a.endpoint";
}

View File

@ -21,6 +21,7 @@
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.scale.S3AScaleTestBase;
import org.junit.Assert;
import org.junit.internal.AssumptionViolatedException;
@ -59,7 +60,7 @@ public final class S3ATestUtils {
*/
public static S3AFileSystem createTestFileSystem(Configuration conf)
throws IOException {
return createTestFileSystem(conf, true);
return createTestFileSystem(conf, false);
}
/**
@ -302,6 +303,19 @@ public static void skipIfEncryptionTestsDisabled(
}
}
/**
* Create a test path, using the value of
* {@link S3ATestConstants#TEST_UNIQUE_FORK_ID} if it is set.
* @param defVal default value
* @return a path
*/
public static Path createTestPath(Path defVal) {
String testUniqueForkId = System.getProperty(
S3ATestConstants.TEST_UNIQUE_FORK_ID);
return testUniqueForkId == null ? defVal :
new Path("/" + testUniqueForkId, "test");
}
/**
* Reset all metrics in a list.
* @param metrics metrics to reset

View File

@ -38,6 +38,7 @@
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileStatus;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.Statistic;
import org.apache.hadoop.util.Progressable;
@ -70,27 +71,22 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
private int partitionSize;
@Override
public void setUp() throws Exception {
super.setUp();
public void setup() throws Exception {
super.setup();
final Path testPath = getTestPath();
scaleTestDir = new Path(testPath, "scale");
hugefile = new Path(scaleTestDir, "hugefile");
hugefileRenamed = new Path(scaleTestDir, "hugefileRenamed");
}
@Override
public void tearDown() throws Exception {
// do nothing. Specifically: do not delete the test dir
}
/**
* Note that this can get called before test setup.
* @return the configuration to use.
*/
@Override
protected Configuration createConfiguration() {
Configuration conf = super.createConfiguration();
protected Configuration createScaleConfiguration() {
Configuration conf = super.createScaleConfiguration();
partitionSize = (int)getTestPropertyBytes(conf,
KEY_HUGE_PARTITION_SIZE,
DEFAULT_PARTITION_SIZE);
@ -155,6 +151,7 @@ public void test_010_CreateHugeFile() throws IOException {
// perform the upload.
// there's lots of logging here, so that a tail -f on the output log
// can give a view of what is happening.
S3AFileSystem fs = getFileSystem();
StorageStatistics storageStatistics = fs.getStorageStatistics();
String putRequests = Statistic.OBJECT_PUT_REQUESTS.getSymbol();
String putBytes = Statistic.OBJECT_PUT_BYTES.getSymbol();
@ -286,12 +283,13 @@ private void verifyNoFailures(String operation) {
}
void assumeHugeFileExists() throws IOException {
S3AFileSystem fs = getFileSystem();
ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
ContractTestUtils.assertIsFile(fs, hugefile);
}
private void logFSState() {
LOG.info("File System state after operation:\n{}", fs);
LOG.info("File System state after operation:\n{}", getFileSystem());
}
@Test
@ -305,6 +303,7 @@ public void test_040_PositionedReadHugeFile() throws Throwable {
}
String filetype = encrypted ? "encrypted file" : "file";
describe("Positioned reads of %s %s", filetype, hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
int ops = 0;
@ -344,6 +343,7 @@ public void test_040_PositionedReadHugeFile() throws Throwable {
public void test_050_readHugeFile() throws Throwable {
assumeHugeFileExists();
describe("Reading %s", hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
long blocks = filesize / uploadBlockSize;
@ -369,6 +369,7 @@ public void test_050_readHugeFile() throws Throwable {
public void test_100_renameHugeFile() throws Throwable {
assumeHugeFileExists();
describe("renaming %s to %s", hugefile, hugefileRenamed);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
fs.delete(hugefileRenamed, false);
@ -396,7 +397,7 @@ public void test_100_renameHugeFile() throws Throwable {
public void test_999_DeleteHugeFiles() throws IOException {
deleteHugeFile();
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
S3AFileSystem fs = getFileSystem();
fs.delete(hugefileRenamed, false);
timer2.end("time to delete %s", hugefileRenamed);
ContractTestUtils.rm(fs, getTestPath(), true, true);
@ -405,7 +406,7 @@ public void test_999_DeleteHugeFiles() throws IOException {
protected void deleteHugeFile() throws IOException {
describe("Deleting %s", hugefile);
NanoTimer timer = new NanoTimer();
fs.delete(hugefile, false);
getFileSystem().delete(hugefile, false);
timer.end("time to delete %s", hugefile);
}

View File

@ -20,9 +20,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.s3a.Constants;
import org.junit.Test;
import java.io.IOException;
/**
* Tests file deletion with multi-delete disabled.
@ -30,15 +27,10 @@
public class ITestS3ADeleteFilesOneByOne extends ITestS3ADeleteManyFiles {
@Override
protected Configuration createConfiguration() {
Configuration configuration = super.createConfiguration();
protected Configuration createScaleConfiguration() {
Configuration configuration = super.createScaleConfiguration();
configuration.setBoolean(Constants.ENABLE_MULTI_DELETE, false);
return configuration;
}
@Override
@Test
public void testOpenCreate() throws IOException {
}
}

View File

@ -20,6 +20,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -50,12 +52,12 @@ public class ITestS3ADeleteManyFiles extends S3AScaleTestBase {
*/
@Test
public void testBulkRenameAndDelete() throws Throwable {
final Path scaleTestDir = getTestPath();
final Path scaleTestDir = path("testBulkRenameAndDelete");
final Path srcDir = new Path(scaleTestDir, "src");
final Path finalDir = new Path(scaleTestDir, "final");
final long count = getOperationCount();
final S3AFileSystem fs = getFileSystem();
ContractTestUtils.rm(fs, scaleTestDir, true, false);
fs.mkdirs(srcDir);
fs.mkdirs(finalDir);
@ -114,11 +116,4 @@ public Boolean call() throws IOException {
ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
@Test
public void testOpenCreate() throws IOException {
final Path scaleTestDir = getTestPath();
final Path srcDir = new Path(scaleTestDir, "opencreate");
ContractTestUtils.createAndVerifyFile(fs, srcDir, 1024);
ContractTestUtils.createAndVerifyFile(fs, srcDir, 50 * 1024);
}
}

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.fs.s3a.scale;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.Statistic;
import org.junit.Test;
import org.slf4j.Logger;
@ -40,8 +41,9 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
@Test
public void testListOperations() throws Throwable {
describe("Test recursive list operations");
final Path scaleTestDir = getTestPath();
final Path scaleTestDir = path("testListOperations");
final Path listDir = new Path(scaleTestDir, "lists");
S3AFileSystem fs = getFileSystem();
// scale factor.
int scale = getConf().getInt(KEY_DIRECTORY_COUNT, DEFAULT_DIRECTORY_COUNT);
@ -137,15 +139,16 @@ public void testListOperations() throws Throwable {
@Test
public void testTimeToStatEmptyDirectory() throws Throwable {
describe("Time to stat an empty directory");
Path path = new Path(getTestPath(), "empty");
fs.mkdirs(path);
Path path = path("empty");
getFileSystem().mkdirs(path);
timeToStatPath(path);
}
@Test
public void testTimeToStatNonEmptyDirectory() throws Throwable {
describe("Time to stat a non-empty directory");
Path path = new Path(getTestPath(), "dir");
Path path = path("dir");
S3AFileSystem fs = getFileSystem();
fs.mkdirs(path);
touch(fs, new Path(path, "file"));
timeToStatPath(path);
@ -154,8 +157,8 @@ public void testTimeToStatNonEmptyDirectory() throws Throwable {
@Test
public void testTimeToStatFile() throws Throwable {
describe("Time to stat a simple file");
Path path = new Path(getTestPath(), "file");
touch(fs, path);
Path path = path("file");
touch(getFileSystem(), path);
timeToStatPath(path);
}
@ -167,6 +170,7 @@ public void testTimeToStatRoot() throws Throwable {
private void timeToStatPath(Path path) throws IOException {
describe("Timing getFileStatus(\"%s\")", path);
S3AFileSystem fs = getFileSystem();
MetricDiff metadataRequests =
new MetricDiff(fs, Statistic.OBJECT_METADATA_REQUESTS);
MetricDiff listRequests =

View File

@ -29,8 +29,8 @@
public class ITestS3AHugeFilesClassicOutput extends AbstractSTestS3AHugeFiles {
@Override
protected Configuration createConfiguration() {
final Configuration conf = super.createConfiguration();
protected Configuration createScaleConfiguration() {
final Configuration conf = super.createScaleConfiguration();
conf.setBoolean(Constants.FAST_UPLOAD, false);
return conf;
}

View File

@ -436,7 +436,8 @@ public void testRandomReadOverBuffer() throws Throwable {
describe("read over a buffer, making sure that the requests" +
" spans readahead ranges");
int datasetLen = _32K;
Path dataFile = new Path(getTestPath(), "testReadOverBuffer.bin");
S3AFileSystem fs = getFileSystem();
Path dataFile = path("testReadOverBuffer.bin");
byte[] sourceData = dataset(datasetLen, 0, 64);
// relies on the field 'fs' referring to the R/W FS
writeDataset(fs, dataFile, sourceData, datasetLen, _16K, true);

View File

@ -21,20 +21,15 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
import org.apache.hadoop.fs.s3a.S3AInputStream;
import org.apache.hadoop.fs.s3a.S3AInstrumentation;
import org.apache.hadoop.fs.s3a.S3ATestConstants;
import org.apache.hadoop.fs.s3a.Statistic;
import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.junit.After;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TestName;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -45,25 +40,35 @@
/**
* Base class for scale tests; here is where the common scale configuration
* keys are defined.
* <p>
* Configuration setup is a bit more complex than in the parent classes,
* as the test timeout is desired prior to the {@link #getTestTimeoutMillis()}
* being called to set the test timeout rule; this happens before any of
* the methods tagged with {@code @Before} are invoked.
* <p>
* The algorithm is:
* <ol>
* <li>Create a configuration on demand, via
* {@link #demandCreateConfiguration()}</li>
* <li>Have that return the value of {@link #conf} or create a new one
* if that field is null (and set the field to the created value).</li>
* <li>Override the superclasses {@link #createConfiguration()}
* to return the demand created value; make that method final so that
* subclasses don't break things by overridding it.</li>
* <li>Add a new override point {@link #createScaleConfiguration()}
* to create the config, one which subclasses can (and do) override.</li>
* </ol>
* Bear in mind that this process also takes place during initialization
* of the superclass; the overridden methods are being invoked before
* their instances are fully configured. This is considered
* <i>very bad form</i> in Java code (indeed, in C++ it is actually permitted;
* the base class implementations get invoked instead).
*/
public class S3AScaleTestBase extends Assert implements S3ATestConstants {
@Rule
public final TestName methodName = new TestName();
@Rule
public Timeout testTimeout = createTestTimeout();
@Before
public void nameThread() {
Thread.currentThread().setName("JUnit");
}
public class S3AScaleTestBase extends AbstractS3ATestBase {
public static final int _1KB = 1024;
public static final int _1MB = _1KB * _1KB;
protected S3AFileSystem fs;
protected static final Logger LOG =
LoggerFactory.getLogger(S3AScaleTestBase.class);
@ -71,14 +76,8 @@ public void nameThread() {
private boolean enabled;
/**
* Configuration generator. May be overridden to inject
* some custom options.
* @return a configuration with which to create FS instances
*/
protected Configuration createConfiguration() {
return new Configuration();
}
private Path testPath;
/**
* Get the configuration used to set up the FS.
@ -88,79 +87,72 @@ public Configuration getConf() {
return conf;
}
/**
* Setup. This triggers creation of the configuration.
*/
@Before
public void setUp() throws Exception {
demandCreateConfiguration();
@Override
public void setup() throws Exception {
super.setup();
testPath = path("/tests3ascale");
LOG.debug("Scale test operation count = {}", getOperationCount());
// multipart purges are disabled on the scale tests
fs = createTestFileSystem(conf, false);
// check for the test being enabled
enabled = getTestPropertyBool(
getConf(),
KEY_SCALE_TESTS_ENABLED,
DEFAULT_SCALE_TESTS_ENABLED);
Assume.assumeTrue("Scale test disabled: to enable set property " +
KEY_SCALE_TESTS_ENABLED, enabled);
KEY_SCALE_TESTS_ENABLED, isEnabled());
}
/**
* Create the configuration if it is not already set up.
* Create the configuration if it is not already set up, calling
* {@link #createScaleConfiguration()} to do so.
* @return the configuration.
*/
private synchronized Configuration demandCreateConfiguration() {
if (conf == null) {
conf = createConfiguration();
conf = createScaleConfiguration();
}
return conf;
}
@After
public void tearDown() throws Exception {
ContractTestUtils.rm(fs, getTestPath(), true, true);
/**
* Returns the config created with {@link #demandCreateConfiguration()}.
* Subclasses must override {@link #createScaleConfiguration()}
* in order to customize their configurations.
* @return a configuration with which to create FS instances
*/
protected final Configuration createConfiguration() {
return demandCreateConfiguration();
}
/**
* Override point: create a configuration.
* @return a configuration with which to create FS instances
*/
protected Configuration createScaleConfiguration() {
return new Configuration();
}
protected Path getTestPath() {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
return testUniqueForkId == null ? new Path("/tests3a") :
new Path("/" + testUniqueForkId, "tests3a");
return testPath;
}
protected long getOperationCount() {
return getConf().getLong(KEY_OPERATION_COUNT, DEFAULT_OPERATION_COUNT);
}
/**
* Create the timeout for tests. Some large tests may need a larger value.
* @return the test timeout to use
*/
protected Timeout createTestTimeout() {
demandCreateConfiguration();
return new Timeout(
getTestTimeoutSeconds() * 1000);
}
/**
* Get the test timeout in seconds.
* @return the test timeout as set in system properties or the default.
*/
protected static int getTestTimeoutSeconds() {
return getTestPropertyInt(null,
protected int getTestTimeoutSeconds() {
return getTestPropertyInt(demandCreateConfiguration(),
KEY_TEST_TIMEOUT,
DEFAULT_TEST_TIMEOUT);
SCALE_TEST_TIMEOUT_SECONDS);
}
/**
* Describe a test in the logs.
* @param text text to print
* @param args arguments to format in the printing
*/
protected void describe(String text, Object... args) {
LOG.info("\n\n{}: {}\n",
methodName.getMethodName(),
String.format(text, args));
@Override
protected int getTestTimeoutMillis() {
return getTestTimeoutSeconds() * 1000;
}
/**
@ -189,20 +181,25 @@ protected S3AInstrumentation.InputStreamStatistics getInputStreamStatistics(
* @return the value.
*/
public long gaugeValue(Statistic statistic) {
S3AInstrumentation instrumentation = fs.getInstrumentation();
S3AInstrumentation instrumentation = getFileSystem().getInstrumentation();
MutableGaugeLong gauge = instrumentation.lookupGauge(statistic.getSymbol());
assertNotNull("No gauge " + statistic
+ " in " + instrumentation.dump("", " = ", "\n", true), gauge);
return gauge.value();
}
protected boolean isEnabled() {
/**
* Is the test enabled; this is controlled by the configuration
* and the {@code -Dscale} maven option.
* @return true if the scale tests are enabled.
*/
protected final boolean isEnabled() {
return enabled;
}
/**
* Flag to indicate that this test is being used sequentially. This
* is used by some of the scale tests to validate test time expectations.
* Flag to indicate that this test is being executed in parallel.
* This is used by some of the scale tests to validate test time expectations.
* @return true if the build indicates this test is being run in parallel.
*/
protected boolean isParallelExecution() {

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -59,7 +59,7 @@ public void tearDown() throws Exception {
}
protected Path getTestPath() {
return new Path("/tests3afc");
return S3ATestUtils.createTestPath(new Path("/tests3afc"));
}
@Test

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
@ -24,13 +24,13 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.examples.WordCount;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
import org.apache.hadoop.fs.s3a.S3AFileSystem;
import org.apache.hadoop.fs.s3a.S3ATestUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
@ -39,26 +39,26 @@
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.junit.After;
import static org.junit.Assert.assertTrue;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Tests that S3A is usable through a YARN application.
*/
public class ITestS3AMiniYarnCluster {
public class ITestS3AMiniYarnCluster extends AbstractS3ATestBase {
private final Configuration conf = new YarnConfiguration();
private S3AFileSystem fs;
private MiniYARNCluster yarnCluster;
private final String rootPath = "/tests/MiniClusterWordCount/";
private Path rootPath;
@Before
public void beforeTest() throws IOException {
@Override
public void setup() throws Exception {
super.setup();
fs = S3ATestUtils.createTestFileSystem(conf);
fs.mkdirs(new Path(rootPath + "input/"));
rootPath = path("MiniClusterWordCount");
Path workingDir = path("working");
fs.setWorkingDirectory(workingDir);
fs.mkdirs(new Path(rootPath, "input/"));
yarnCluster = new MiniYARNCluster("MiniClusterWordCount", // testName
1, // number of node managers
@ -68,17 +68,19 @@ public void beforeTest() throws IOException {
yarnCluster.start();
}
@After
public void afterTest() throws IOException {
fs.delete(new Path(rootPath), true);
yarnCluster.stop();
@Override
public void teardown() throws Exception {
if (yarnCluster != null) {
yarnCluster.stop();
}
super.teardown();
}
@Test
public void testWithMiniCluster() throws Exception {
Path input = new Path(rootPath + "input/in.txt");
Path input = new Path(rootPath, "input/in.txt");
input = input.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path output = new Path(rootPath + "output/");
Path output = new Path(rootPath, "output/");
output = output.makeQualified(fs.getUri(), fs.getWorkingDirectory());
writeStringToFile(input, "first line\nsecond line\nthird line");
@ -134,15 +136,9 @@ private void writeStringToFile(Path path, String string) throws IOException {
/**
* helper method.
*/
private String readStringFromFile(Path path) {
try (FSDataInputStream in = fs.open(path)) {
long bytesLen = fs.getFileStatus(path).getLen();
byte[] buffer = new byte[(int) bytesLen];
IOUtils.readFully(in, buffer, 0, buffer.length);
return new String(buffer);
} catch (IOException e) {
throw new RuntimeException("Failed to read from [" + path + "]", e);
}
private String readStringFromFile(Path path) throws IOException {
return ContractTestUtils.readBytesToString(fs, path,
(int) fs.getFileStatus(path).getLen());
}
}