From de6b7bc67ace7744adb0320ee7de79cf28259d2d Mon Sep 17 00:00:00 2001 From: Sean Mackrory Date: Mon, 8 Jul 2019 11:27:07 -0600 Subject: [PATCH] HADOOP-16409. Allow authoritative mode on non-qualified paths. Contributed by Sean Mackrory --- .../src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java | 2 -- .../apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java | 1 + .../main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java | 3 +-- .../java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java | 4 ++++ 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java index 9b3a4b1286..3ae4f1fe11 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java @@ -2421,7 +2421,6 @@ public FileStatus[] innerListStatus(Path f) throws FileNotFoundException, result.add(files.next()); } // merge the results. This will update the store as needed - return S3Guard.dirListingUnion(metadataStore, path, result, dirMeta, allowAuthoritative, ttlTimeProvider); } else { @@ -3810,7 +3809,6 @@ private RemoteIterator innerListFiles(Path f, boolean final PathMetadata pm = metadataStore.get(path, true); // shouldn't need to check pm.isDeleted() because that will have // been caught by getFileStatus above. - MetadataStoreListFilesIterator metadataStoreListFilesIterator = new MetadataStoreListFilesIterator(metadataStore, pm, allowAuthoritative); diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java index 3329b54842..9f0631309f 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java @@ -1321,6 +1321,7 @@ public void put( final DirListingMetadata meta, @Nullable final BulkOperationState operationState) throws IOException { LOG.debug("Saving to table {} in region {}: {}", tableName, region, meta); + // directory path Path path = meta.getPath(); DDBPathMetadata ddbPathMeta = diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java index 97070f8d33..85e4d1264a 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java @@ -787,13 +787,12 @@ public static Collection getAuthoritativePaths(S3AFileSystem fs) { public static boolean allowAuthoritative(Path p, S3AFileSystem fs, boolean authMetadataStore, Collection authPaths) { - String haystack = fs.maybeAddTrailingSlash(p.toString()); + String haystack = fs.maybeAddTrailingSlash(fs.qualify(p).toString()); if (authMetadataStore) { return true; } if (!authPaths.isEmpty()) { for (String needle : authPaths) { - if (haystack.startsWith(needle)) { return true; } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java index b7c582308f..c35a5855d5 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestAuthoritativePath.java @@ -294,6 +294,10 @@ public void testPrefixVsDirectory() throws Exception { Path directoryMatch = new Path(testRoot, "/auth/oritative"); assertTrue(S3Guard.allowAuthoritative(directoryMatch, fs, false, authPaths)); + + Path unqualifiedMatch = new Path(testRoot.toUri().getPath(), "/auth/oritative"); + assertTrue(S3Guard.allowAuthoritative(unqualifiedMatch, fs, + false, authPaths)); } finally { cleanUpFS(fs); }