Revert "HADOOP-17244. S3A directory delete tombstones dir markers prematurely. (#2280)"

This reverts commit 0c82eb0324.

Change-Id: I6bd100d9de19660b0f28ee0ab16faf747d6d9f05
This commit is contained in:
Steve Loughran 2020-09-11 18:06:54 +01:00
parent 87ff2f5597
commit aa80bcb1ec
No known key found for this signature in database
GPG Key ID: D22CF846DBB162A0
13 changed files with 30 additions and 201 deletions

View File

@ -774,13 +774,8 @@ public void renameInternal(final Path src, final Path dst,
if (dstStatus.isDirectory()) { if (dstStatus.isDirectory()) {
RemoteIterator<FileStatus> list = listStatusIterator(dst); RemoteIterator<FileStatus> list = listStatusIterator(dst);
if (list != null && list.hasNext()) { if (list != null && list.hasNext()) {
FileStatus child = list.next();
LOG.debug("Rename {}, {} failing as destination has"
+ " at least one child: {}",
src, dst, child);
throw new IOException( throw new IOException(
"Rename cannot overwrite non empty destination directory " + dst "Rename cannot overwrite non empty destination directory " + dst);
+ " containing child: " + child.getPath());
} }
} }
delete(dst, false); delete(dst, false);

View File

@ -1306,33 +1306,14 @@ protected void createFile(Path path) throws IOException {
protected void rename(Path src, Path dst, boolean srcExists, protected void rename(Path src, Path dst, boolean srcExists,
boolean dstExists, Rename... options) throws IOException { boolean dstExists, Rename... options) throws IOException {
IOException ioe = null;
try { try {
fc.rename(src, dst, options); fc.rename(src, dst, options);
} catch (IOException ex) { } finally {
// lets not swallow this completely.
LOG.warn("Rename result: " + ex, ex);
ioe = ex;
}
// There's a bit of extra work in these assertions, as if they fail
// any IOE caught earlier is added as the cause. This
// gets the likely root cause to the test report
try {
LOG.debug("Probing source and destination");
Assert.assertEquals("Source exists", srcExists, exists(fc, src)); Assert.assertEquals("Source exists", srcExists, exists(fc, src));
Assert.assertEquals("Destination exists", dstExists, exists(fc, dst)); Assert.assertEquals("Destination exists", dstExists, exists(fc, dst));
} catch (AssertionError e) {
if (ioe != null && e.getCause() == null) {
e.initCause(ioe);
}
throw e;
}
if (ioe != null) {
throw ioe;
} }
} }
private boolean containsPath(Path path, FileStatus[] filteredPaths) private boolean containsPath(Path path, FileStatus[] filteredPaths)
throws IOException { throws IOException {
for(int i = 0; i < filteredPaths.length; i ++) { for(int i = 0; i < filteredPaths.length; i ++) {

View File

@ -1574,7 +1574,7 @@ public void deleteObjectAtPath(final Path path,
@Override @Override
@Retries.RetryTranslated @Retries.RetryTranslated
public RemoteIterator<S3ALocatedFileStatus> listFilesAndDirectoryMarkers( public RemoteIterator<S3ALocatedFileStatus> listFilesAndEmptyDirectories(
final Path path, final Path path,
final S3AFileStatus status, final S3AFileStatus status,
final boolean collectTombstones, final boolean collectTombstones,
@ -2079,7 +2079,6 @@ protected void deleteObject(String key)
DELETE_CONSIDERED_IDEMPOTENT, DELETE_CONSIDERED_IDEMPOTENT,
()-> { ()-> {
incrementStatistic(OBJECT_DELETE_REQUESTS); incrementStatistic(OBJECT_DELETE_REQUESTS);
incrementStatistic(OBJECT_DELETE_OBJECTS);
s3.deleteObject(bucket, key); s3.deleteObject(bucket, key);
return null; return null;
}); });
@ -2126,14 +2125,9 @@ private void blockRootDelete(String key) throws InvalidRequestException {
} }
/** /**
* Perform a bulk object delete operation against S3; leaves S3Guard * Perform a bulk object delete operation.
* alone.
* Increments the {@code OBJECT_DELETE_REQUESTS} and write * Increments the {@code OBJECT_DELETE_REQUESTS} and write
* operation statistics * operation statistics.
* <p></p>
* {@code OBJECT_DELETE_OBJECTS} is updated with the actual number
* of objects deleted in the request.
* <p></p>
* Retry policy: retry untranslated; delete considered idempotent. * Retry policy: retry untranslated; delete considered idempotent.
* If the request is throttled, this is logged in the throttle statistics, * If the request is throttled, this is logged in the throttle statistics,
* with the counter set to the number of keys, rather than the number * with the counter set to the number of keys, rather than the number
@ -2154,10 +2148,9 @@ private DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteRequest)
incrementWriteOperations(); incrementWriteOperations();
BulkDeleteRetryHandler retryHandler = BulkDeleteRetryHandler retryHandler =
new BulkDeleteRetryHandler(createStoreContext()); new BulkDeleteRetryHandler(createStoreContext());
int keyCount = deleteRequest.getKeys().size();
try(DurationInfo ignored = try(DurationInfo ignored =
new DurationInfo(LOG, false, "DELETE %d keys", new DurationInfo(LOG, false, "DELETE %d keys",
keyCount)) { deleteRequest.getKeys().size())) {
return invoker.retryUntranslated("delete", return invoker.retryUntranslated("delete",
DELETE_CONSIDERED_IDEMPOTENT, DELETE_CONSIDERED_IDEMPOTENT,
(text, e, r, i) -> { (text, e, r, i) -> {
@ -2166,7 +2159,6 @@ private DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteRequest)
}, },
() -> { () -> {
incrementStatistic(OBJECT_DELETE_REQUESTS, 1); incrementStatistic(OBJECT_DELETE_REQUESTS, 1);
incrementStatistic(OBJECT_DELETE_OBJECTS, keyCount);
return s3.deleteObjects(deleteRequest); return s3.deleteObjects(deleteRequest);
}); });
} catch (MultiObjectDeleteException e) { } catch (MultiObjectDeleteException e) {

View File

@ -156,7 +156,6 @@ public class S3AInstrumentation implements Closeable, MetricsSource {
INVOCATION_RENAME, INVOCATION_RENAME,
OBJECT_COPY_REQUESTS, OBJECT_COPY_REQUESTS,
OBJECT_DELETE_REQUESTS, OBJECT_DELETE_REQUESTS,
OBJECT_DELETE_OBJECTS,
OBJECT_LIST_REQUESTS, OBJECT_LIST_REQUESTS,
OBJECT_CONTINUE_LIST_REQUESTS, OBJECT_CONTINUE_LIST_REQUESTS,
OBJECT_METADATA_REQUESTS, OBJECT_METADATA_REQUESTS,

View File

@ -85,8 +85,6 @@ public enum Statistic {
"Calls of rename()"), "Calls of rename()"),
OBJECT_COPY_REQUESTS("object_copy_requests", "Object copy requests"), OBJECT_COPY_REQUESTS("object_copy_requests", "Object copy requests"),
OBJECT_DELETE_REQUESTS("object_delete_requests", "Object delete requests"), OBJECT_DELETE_REQUESTS("object_delete_requests", "Object delete requests"),
OBJECT_DELETE_OBJECTS("object_delete_objects",
"Objects deleted in delete requests"),
OBJECT_LIST_REQUESTS("object_list_requests", OBJECT_LIST_REQUESTS("object_list_requests",
"Number of object listings made"), "Number of object listings made"),
OBJECT_CONTINUE_LIST_REQUESTS("object_continue_list_requests", OBJECT_CONTINUE_LIST_REQUESTS("object_continue_list_requests",

View File

@ -23,7 +23,6 @@
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest;
import com.amazonaws.services.s3.model.DeleteObjectsResult; import com.amazonaws.services.s3.model.DeleteObjectsResult;
@ -153,13 +152,10 @@ public class DeleteOperation extends ExecutingStoreOperation<Boolean> {
/** /**
* List of keys built up for the next delete batch. * List of keys built up for the next delete batch.
*/ */
private List<DeleteEntry> keys; private List<DeleteObjectsRequest.KeyVersion> keys;
/** /**
* List of paths built up for incremental deletion on tree delete. * List of paths built up for deletion.
* At the end of the entire delete the full tree is scanned in S3Guard
* and tombstones added. For this reason this list of paths <i>must not</i>
* include directory markers, as that will break the scan.
*/ */
private List<Path> paths; private List<Path> paths;
@ -327,7 +323,7 @@ protected void deleteDirectoryTree(final Path path,
// list files including any under tombstones through S3Guard // list files including any under tombstones through S3Guard
LOG.debug("Getting objects for directory prefix {} to delete", dirKey); LOG.debug("Getting objects for directory prefix {} to delete", dirKey);
final RemoteIterator<S3ALocatedFileStatus> locatedFiles = final RemoteIterator<S3ALocatedFileStatus> locatedFiles =
callbacks.listFilesAndDirectoryMarkers(path, status, callbacks.listFilesAndEmptyDirectories(path, status,
false, true); false, true);
// iterate through and delete. The next() call will block when a new S3 // iterate through and delete. The next() call will block when a new S3
@ -363,10 +359,7 @@ protected void deleteDirectoryTree(final Path path,
while (objects.hasNext()) { while (objects.hasNext()) {
// get the next entry in the listing. // get the next entry in the listing.
extraFilesDeleted++; extraFilesDeleted++;
S3AFileStatus next = objects.next(); queueForDeletion(deletionKey(objects.next()), null);
LOG.debug("Found Unlisted entry {}", next);
queueForDeletion(deletionKey(next), null,
next.isDirectory());
} }
if (extraFilesDeleted > 0) { if (extraFilesDeleted > 0) {
LOG.debug("Raw S3 Scan found {} extra file(s) to delete", LOG.debug("Raw S3 Scan found {} extra file(s) to delete",
@ -409,7 +402,7 @@ private String deletionKey(final S3AFileStatus stat) {
*/ */
private void queueForDeletion( private void queueForDeletion(
final S3AFileStatus stat) throws IOException { final S3AFileStatus stat) throws IOException {
queueForDeletion(deletionKey(stat), stat.getPath(), stat.isDirectory()); queueForDeletion(deletionKey(stat), stat.getPath());
} }
/** /**
@ -420,18 +413,14 @@ private void queueForDeletion(
* *
* @param key key to delete * @param key key to delete
* @param deletePath nullable path of the key * @param deletePath nullable path of the key
* @param isDirMarker is the entry a directory?
* @throws IOException failure of the previous batch of deletions. * @throws IOException failure of the previous batch of deletions.
*/ */
private void queueForDeletion(final String key, private void queueForDeletion(final String key,
@Nullable final Path deletePath, @Nullable final Path deletePath) throws IOException {
boolean isDirMarker) throws IOException {
LOG.debug("Adding object to delete: \"{}\"", key); LOG.debug("Adding object to delete: \"{}\"", key);
keys.add(new DeleteEntry(key, isDirMarker)); keys.add(new DeleteObjectsRequest.KeyVersion(key));
if (deletePath != null) { if (deletePath != null) {
if (!isDirMarker) { paths.add(deletePath);
paths.add(deletePath);
}
} }
if (keys.size() == pageSize) { if (keys.size() == pageSize) {
@ -495,7 +484,7 @@ private void deleteObjectAtPath(
* @return the submitted future or null * @return the submitted future or null
*/ */
private CompletableFuture<Void> submitDelete( private CompletableFuture<Void> submitDelete(
final List<DeleteEntry> keyList, final List<DeleteObjectsRequest.KeyVersion> keyList,
final List<Path> pathList) { final List<Path> pathList) {
if (keyList.isEmpty() && pathList.isEmpty()) { if (keyList.isEmpty() && pathList.isEmpty()) {
@ -525,59 +514,31 @@ private CompletableFuture<Void> submitDelete(
@Retries.RetryTranslated @Retries.RetryTranslated
private void asyncDeleteAction( private void asyncDeleteAction(
final BulkOperationState state, final BulkOperationState state,
final List<DeleteEntry> keyList, final List<DeleteObjectsRequest.KeyVersion> keyList,
final List<Path> pathList, final List<Path> pathList,
final boolean auditDeletedKeys) final boolean auditDeletedKeys)
throws IOException { throws IOException {
List<DeleteObjectsResult.DeletedObject> deletedObjects = new ArrayList<>();
try (DurationInfo ignored = try (DurationInfo ignored =
new DurationInfo(LOG, false, "Delete page of keys")) { new DurationInfo(LOG, false, "Delete page of keys")) {
DeleteObjectsResult result = null; DeleteObjectsResult result = null;
List<Path> undeletedObjects = new ArrayList<>(); List<Path> undeletedObjects = new ArrayList<>();
if (!keyList.isEmpty()) { if (!keyList.isEmpty()) {
// first delete the files. result = Invoker.once("Remove S3 Keys",
List<DeleteObjectsRequest.KeyVersion> files = keyList.stream()
.filter(e -> !e.isDirMarker)
.map(e -> e.keyVersion)
.collect(Collectors.toList());
result = Invoker.once("Remove S3 Files",
status.getPath().toString(), status.getPath().toString(),
() -> callbacks.removeKeys( () -> callbacks.removeKeys(
files, keyList,
false, false,
undeletedObjects, undeletedObjects,
state, state,
!auditDeletedKeys)); !auditDeletedKeys));
if (result != null) {
deletedObjects.addAll(result.getDeletedObjects());
}
// now the dirs
List<DeleteObjectsRequest.KeyVersion> dirs = keyList.stream()
.filter(e -> e.isDirMarker)
.map(e -> e.keyVersion)
.collect(Collectors.toList());
// This is invoked with deleteFakeDir = true, so
// S3Guard is not updated.
result = Invoker.once("Remove S3 Dir Markers",
status.getPath().toString(),
() -> callbacks.removeKeys(
dirs,
true,
undeletedObjects,
state,
!auditDeletedKeys));
if (result != null) {
deletedObjects.addAll(result.getDeletedObjects());
}
} }
if (!pathList.isEmpty()) { if (!pathList.isEmpty()) {
// delete file paths only. This stops tombstones
// being added until the final directory cleanup
// (HADOOP-17244)
metadataStore.deletePaths(pathList, state); metadataStore.deletePaths(pathList, state);
} }
if (auditDeletedKeys) { if (auditDeletedKeys && result != null) {
// audit the deleted keys // audit the deleted keys
List<DeleteObjectsResult.DeletedObject> deletedObjects =
result.getDeletedObjects();
if (deletedObjects.size() != keyList.size()) { if (deletedObjects.size() != keyList.size()) {
// size mismatch // size mismatch
LOG.warn("Size mismatch in deletion operation. " LOG.warn("Size mismatch in deletion operation. "
@ -588,7 +549,7 @@ private void asyncDeleteAction(
for (DeleteObjectsResult.DeletedObject del : deletedObjects) { for (DeleteObjectsResult.DeletedObject del : deletedObjects) {
keyList.removeIf(kv -> kv.getKey().equals(del.getKey())); keyList.removeIf(kv -> kv.getKey().equals(del.getKey()));
} }
for (DeleteEntry kv : keyList) { for (DeleteObjectsRequest.KeyVersion kv : keyList) {
LOG.debug("{}", kv.getKey()); LOG.debug("{}", kv.getKey());
} }
} }
@ -596,31 +557,5 @@ private void asyncDeleteAction(
} }
} }
/**
* Deletion entry; dir marker state is tracked to control S3Guard
* update policy.
*/
private static final class DeleteEntry {
private final DeleteObjectsRequest.KeyVersion keyVersion;
private final boolean isDirMarker;
private DeleteEntry(final String key, final boolean isDirMarker) {
this.keyVersion = new DeleteObjectsRequest.KeyVersion(key);
this.isDirMarker = isDirMarker;
}
public String getKey() {
return keyVersion.getKey();
}
@Override
public String toString() {
return "DeleteEntry{" +
"key='" + getKey() + '\'' +
", isDirMarker=" + isDirMarker +
'}';
}
}
} }

View File

@ -105,7 +105,7 @@ void deleteObjectAtPath(Path path,
throws IOException; throws IOException;
/** /**
* Recursive list of files and directory markers. * Recursive list of files and empty directories.
* *
* @param path path to list from * @param path path to list from
* @param status optional status of path to list. * @param status optional status of path to list.
@ -115,7 +115,7 @@ void deleteObjectAtPath(Path path,
* @throws IOException failure * @throws IOException failure
*/ */
@Retries.RetryTranslated @Retries.RetryTranslated
RemoteIterator<S3ALocatedFileStatus> listFilesAndDirectoryMarkers( RemoteIterator<S3ALocatedFileStatus> listFilesAndEmptyDirectories(
Path path, Path path,
S3AFileStatus status, S3AFileStatus status,
boolean collectTombstones, boolean collectTombstones,

View File

@ -400,7 +400,6 @@ protected void recursiveDirectoryRename() throws IOException {
destStatus.getPath()); destStatus.getPath());
// Although the dir marker policy doesn't always need to do this, // Although the dir marker policy doesn't always need to do this,
// it's simplest just to be consistent here. // it's simplest just to be consistent here.
// note: updates the metastore as well a S3.
callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false, null); callbacks.deleteObjectAtPath(destStatus.getPath(), dstKey, false, null);
} }
@ -412,7 +411,7 @@ protected void recursiveDirectoryRename() throws IOException {
false); false);
final RemoteIterator<S3ALocatedFileStatus> iterator = final RemoteIterator<S3ALocatedFileStatus> iterator =
callbacks.listFilesAndDirectoryMarkers(parentPath, callbacks.listFilesAndEmptyDirectories(parentPath,
sourceStatus, sourceStatus,
true, true,
true); true);

View File

@ -717,7 +717,7 @@ public DDBPathMetadata get(Path path) throws IOException {
public DDBPathMetadata get(Path path, boolean wantEmptyDirectoryFlag) public DDBPathMetadata get(Path path, boolean wantEmptyDirectoryFlag)
throws IOException { throws IOException {
checkPath(path); checkPath(path);
LOG.debug("Get from table {} in region {}: {} ; wantEmptyDirectory={}", LOG.debug("Get from table {} in region {}: {}. wantEmptyDirectory={}",
tableName, region, path, wantEmptyDirectoryFlag); tableName, region, path, wantEmptyDirectoryFlag);
DDBPathMetadata result = innerGet(path, wantEmptyDirectoryFlag); DDBPathMetadata result = innerGet(path, wantEmptyDirectoryFlag);
LOG.debug("result of get {} is: {}", path, result); LOG.debug("result of get {} is: {}", path, result);

View File

@ -32,7 +32,6 @@
import org.assertj.core.api.Assertions; import org.assertj.core.api.Assertions;
import org.junit.Test; import org.junit.Test;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum; import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
import org.apache.hadoop.fs.s3a.impl.StoreContext; import org.apache.hadoop.fs.s3a.impl.StoreContext;
@ -287,27 +286,4 @@ public int read() {
s3.putObject(putObjectRequest); s3.putObject(putObjectRequest);
} }
@Test
public void testDirMarkerDelete() throws Throwable {
S3AFileSystem fs = getFileSystem();
assumeFilesystemHasMetadatastore(getFileSystem());
Path baseDir = methodPath();
Path subFile = new Path(baseDir, "subdir/file.txt");
// adds the s3guard entry
fs.mkdirs(baseDir);
touch(fs, subFile);
// PUT a marker
createEmptyObject(fs, fs.pathToKey(baseDir) + "/");
fs.delete(baseDir, true);
assertPathDoesNotExist("Should have been deleted", baseDir);
// now create the dir again
fs.mkdirs(baseDir);
FileStatus fileStatus = fs.getFileStatus(baseDir);
Assertions.assertThat(fileStatus)
.matches(FileStatus::isDirectory, "Not a directory");
Assertions.assertThat(fs.listStatus(baseDir))
.describedAs("listing of %s", baseDir)
.isEmpty();
}
} }

View File

@ -333,7 +333,7 @@ public void deleteObjectAtPath(
} }
@Override @Override
public RemoteIterator<S3ALocatedFileStatus> listFilesAndDirectoryMarkers( public RemoteIterator<S3ALocatedFileStatus> listFilesAndEmptyDirectories(
Path path, Path path,
S3AFileStatus status, S3AFileStatus status,
boolean collectTombstones, boolean collectTombstones,

View File

@ -148,7 +148,6 @@ public void setup() throws Exception {
INVOCATION_COPY_FROM_LOCAL_FILE, INVOCATION_COPY_FROM_LOCAL_FILE,
OBJECT_COPY_REQUESTS, OBJECT_COPY_REQUESTS,
OBJECT_DELETE_REQUESTS, OBJECT_DELETE_REQUESTS,
OBJECT_DELETE_OBJECTS,
OBJECT_LIST_REQUESTS, OBJECT_LIST_REQUESTS,
OBJECT_METADATA_REQUESTS, OBJECT_METADATA_REQUESTS,
OBJECT_PUT_BYTES, OBJECT_PUT_BYTES,

View File

@ -19,18 +19,15 @@
package org.apache.hadoop.fs.s3a.performance; package org.apache.hadoop.fs.s3a.performance;
import java.io.FileNotFoundException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import org.assertj.core.api.Assertions;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith; import org.junit.runner.RunWith;
import org.junit.runners.Parameterized; import org.junit.runners.Parameterized;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.S3AFileStatus; import org.apache.hadoop.fs.s3a.S3AFileStatus;
import org.apache.hadoop.fs.s3a.S3AFileSystem; import org.apache.hadoop.fs.s3a.S3AFileSystem;
@ -40,7 +37,6 @@
import static org.apache.hadoop.fs.s3a.Statistic.*; import static org.apache.hadoop.fs.s3a.Statistic.*;
import static org.apache.hadoop.fs.s3a.performance.OperationCost.*; import static org.apache.hadoop.fs.s3a.performance.OperationCost.*;
import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe; import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
/** /**
* Use metrics to assert about the cost of file API calls. * Use metrics to assert about the cost of file API calls.
@ -149,7 +145,7 @@ public void testDeleteFileInDir() throws Throwable {
boolean rawAndDeleting = isRaw() && isDeleting(); boolean rawAndDeleting = isRaw() && isDeleting();
verifyMetrics(() -> { verifyMetrics(() -> {
fs.delete(file1, false); fs.delete(file1, false);
return "after fs.delete(file1) " + getMetricSummary(); return "after fs.delete(file1simpleFile) " + getMetricSummary();
}, },
// delete file. For keeping: that's it // delete file. For keeping: that's it
probe(rawAndKeeping, OBJECT_METADATA_REQUESTS, probe(rawAndKeeping, OBJECT_METADATA_REQUESTS,
@ -177,11 +173,7 @@ public void testDeleteFileInDir() throws Throwable {
public void testDirMarkersSubdir() throws Throwable { public void testDirMarkersSubdir() throws Throwable {
describe("verify cost of deep subdir creation"); describe("verify cost of deep subdir creation");
Path parent = methodPath(); Path subDir = new Path(methodPath(), "1/2/3/4/5/6");
Path subDir = new Path(parent, "1/2/3/4/5/6");
S3AFileSystem fs = getFileSystem();
int dirsCreated = 2;
fs.mkdirs(parent);
// one dir created, possibly a parent removed // one dir created, possibly a parent removed
verifyMetrics(() -> { verifyMetrics(() -> {
mkdirs(subDir); mkdirs(subDir);
@ -195,43 +187,6 @@ public void testDirMarkersSubdir() throws Throwable {
// delete all possible fake dirs above the subdirectory // delete all possible fake dirs above the subdirectory
withWhenDeleting(FAKE_DIRECTORIES_DELETED, withWhenDeleting(FAKE_DIRECTORIES_DELETED,
directoriesInPath(subDir) - 1)); directoriesInPath(subDir) - 1));
int dirDeleteRequests = 1;
int fileDeleteRequests = 0;
int totalDeleteRequests = dirDeleteRequests + fileDeleteRequests;
// now delete the deep tree.
verifyMetrics(() ->
fs.delete(parent, true),
// keeping: the parent dir marker needs deletion alongside
// the subdir one.
with(OBJECT_DELETE_REQUESTS, totalDeleteRequests),
withWhenKeeping(OBJECT_DELETE_OBJECTS, dirsCreated),
// deleting: only the marker at the bottom needs deleting
withWhenDeleting(OBJECT_DELETE_OBJECTS, 1));
// followup with list calls to make sure all is clear.
verifyNoListing(parent);
verifyNoListing(subDir);
// now reinstate the directory, which in HADOOP-17244 hitting problems
fs.mkdirs(parent);
FileStatus[] children = fs.listStatus(parent);
Assertions.assertThat(children)
.describedAs("Children of %s", parent)
.isEmpty();
}
/**
* List a path, verify that there are no direct child entries.
* @param path path to scan
*/
protected void verifyNoListing(final Path path) throws Exception {
intercept(FileNotFoundException.class, () -> {
FileStatus[] statuses = getFileSystem().listStatus(path);
return Arrays.deepToString(statuses);
});
} }
@Test @Test