HDFS-13255. RBF: Fail when try to remove mount point paths. Contributed by Akira Ajisaka.

This commit is contained in:
Ayush Saxena 2019-05-29 01:15:18 +05:30 committed by Brahma Reddy Battula
parent 8e4267650f
commit ffbb6b6557
10 changed files with 192 additions and 47 deletions

View File

@ -140,7 +140,7 @@ public ErasureCodingPolicy getErasureCodingPolicy(String src)
rpcServer.checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod remoteMethod = new RemoteMethod("getErasureCodingPolicy",
new Class<?>[] {String.class}, new RemoteParam());
ErasureCodingPolicy ret = rpcClient.invokeSequential(
@ -153,7 +153,7 @@ public void setErasureCodingPolicy(String src, String ecPolicyName)
rpcServer.checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod remoteMethod = new RemoteMethod("setErasureCodingPolicy",
new Class<?>[] {String.class, String.class},
new RemoteParam(), ecPolicyName);
@ -168,7 +168,7 @@ public void unsetErasureCodingPolicy(String src) throws IOException {
rpcServer.checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod remoteMethod = new RemoteMethod("unsetErasureCodingPolicy",
new Class<?>[] {String.class}, new RemoteParam());
if (rpcServer.isInvokeConcurrent(src)) {

View File

@ -213,13 +213,14 @@ private List<RemoteLocation> getQuotaRemoteLocations(String path)
if (manager != null) {
Set<String> childrenPaths = manager.getPaths(path);
for (String childPath : childrenPaths) {
locations.addAll(rpcServer.getLocationsForPath(childPath, true, false));
locations.addAll(
rpcServer.getLocationsForPath(childPath, false, false));
}
}
if (locations.size() >= 1) {
return locations;
} else {
locations.addAll(rpcServer.getLocationsForPath(path, true, false));
locations.addAll(rpcServer.getLocationsForPath(path, false, false));
return locations;
}
}

View File

@ -396,7 +396,7 @@ public void setPermission(String src, FsPermission permissions)
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("setPermission",
new Class<?>[] {String.class, FsPermission.class},
new RemoteParam(), permissions);
@ -413,7 +413,7 @@ public void setOwner(String src, String username, String groupname)
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("setOwner",
new Class<?>[] {String.class, String.class, String.class},
new RemoteParam(), username, groupname);
@ -672,7 +672,7 @@ public boolean mkdirs(String src, FsPermission masked, boolean createParent)
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true);
rpcServer.getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("mkdirs",
new Class<?>[] {String.class, FsPermission.class, boolean.class},
new RemoteParam(), masked, createParent);
@ -725,7 +725,7 @@ public DirectoryListing getListing(String src, byte[] startAfter,
// Locate the dir and fetch the listing
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("getListing",
new Class<?>[] {String.class, startAfter.getClass(), boolean.class},
new RemoteParam(), startAfter, needLocation);
@ -1182,7 +1182,7 @@ public void setTimes(String src, long mtime, long atime) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("setTimes",
new Class<?>[] {String.class, long.class, long.class},
new RemoteParam(), mtime, atime);
@ -1212,7 +1212,7 @@ public String getLinkTarget(String path) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ);
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(path, true, false);
rpcServer.getLocationsForPath(path, false, false);
RemoteMethod method = new RemoteMethod("getLinkTarget",
new Class<?>[] {String.class}, new RemoteParam());
return rpcClient.invokeSequential(locations, method, String.class, null);
@ -1310,7 +1310,7 @@ public void modifyAclEntries(String src, List<AclEntry> aclSpec)
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("modifyAclEntries",
new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
@ -1328,7 +1328,7 @@ public void removeAclEntries(String src, List<AclEntry> aclSpec)
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("removeAclEntries",
new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
@ -1345,7 +1345,7 @@ public void removeDefaultAcl(String src) throws IOException {
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("removeDefaultAcl",
new Class<?>[] {String.class}, new RemoteParam());
if (rpcServer.isInvokeConcurrent(src)) {
@ -1361,7 +1361,7 @@ public void removeAcl(String src) throws IOException {
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("removeAcl",
new Class<?>[] {String.class}, new RemoteParam());
if (rpcServer.isInvokeConcurrent(src)) {
@ -1377,7 +1377,7 @@ public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod(
"setAcl", new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
@ -1407,7 +1407,7 @@ public void createEncryptionZone(String src, String keyName)
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("createEncryptionZone",
new Class<?>[] {String.class, String.class},
new RemoteParam(), keyName);
@ -1454,7 +1454,7 @@ public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("setXAttr",
new Class<?>[] {String.class, XAttr.class, EnumSet.class},
new RemoteParam(), xAttr, flag);
@ -1500,7 +1500,7 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException {
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("removeXAttr",
new Class<?>[] {String.class, XAttr.class}, new RemoteParam(), xAttr);
if (rpcServer.isInvokeConcurrent(src)) {
@ -1516,7 +1516,7 @@ public void checkAccess(String path, FsAction mode) throws IOException {
// TODO handle virtual directories
final List<RemoteLocation> locations =
rpcServer.getLocationsForPath(path, true, false);
rpcServer.getLocationsForPath(path, false, false);
RemoteMethod method = new RemoteMethod("checkAccess",
new Class<?>[] {String.class, FsAction.class},
new RemoteParam(), mode);
@ -1736,7 +1736,7 @@ private RemoteParam getRenameDestinations(
throws IOException {
final List<RemoteLocation> dstLocations =
rpcServer.getLocationsForPath(dst, true, false);
rpcServer.getLocationsForPath(dst, false, false);
final Map<RemoteLocation, String> dstMap = new HashMap<>();
Iterator<RemoteLocation> iterator = srcLocations.iterator();

View File

@ -1450,7 +1450,8 @@ protected List<RemoteLocation> getLocationsForPath(String path,
* Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
* @param failIfLocked Fail the request if there is any mount point under
* the path.
* @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
@ -1458,6 +1459,27 @@ protected List<RemoteLocation> getLocationsForPath(String path,
protected List<RemoteLocation> getLocationsForPath(String path,
boolean failIfLocked, boolean needQuotaVerify) throws IOException {
try {
if (failIfLocked) {
// check if there is any mount point under the path
final List<String> mountPoints =
this.subclusterResolver.getMountPoints(path);
if (mountPoints != null) {
StringBuilder sb = new StringBuilder();
sb.append("The operation is not allowed because ");
if (mountPoints.isEmpty()) {
sb.append("the path: ")
.append(path)
.append(" is a mount point");
} else {
sb.append("there are mount points: ")
.append(String.join(",", mountPoints))
.append(" under the path: ")
.append(path);
}
throw new AccessControlException(sb.toString());
}
}
// Check the location for this path
final PathLocation location =
this.subclusterResolver.getDestinationForPath(path);

View File

@ -46,7 +46,7 @@ public void setStoragePolicy(String src, String policyName)
rpcServer.checkOperation(NameNode.OperationCategory.WRITE);
List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("setStoragePolicy",
new Class<?>[] {String.class, String.class},
new RemoteParam(),
@ -69,7 +69,7 @@ public void unsetStoragePolicy(String src) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true);
List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, true, false);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("unsetStoragePolicy",
new Class<?>[] {String.class},
new RemoteParam());

View File

@ -61,4 +61,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException {
public void testRecursiveRootListing() throws IOException {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmRootRecursive() {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmEmptyRootDirRecursive() {
// It doesn't apply because we still have the mount points here
}
}

View File

@ -60,4 +60,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException {
public void testRecursiveRootListing() throws IOException {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmRootRecursive() {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmEmptyRootDirRecursive() {
// It doesn't apply because we still have the mount points here
}
}

View File

@ -61,4 +61,14 @@ public void testRmEmptyRootDirNonRecursive() throws IOException {
public void testRecursiveRootListing() throws IOException {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmRootRecursive() {
// It doesn't apply because we still have the mount points here
}
@Override
public void testRmEmptyRootDirRecursive() {
// It doesn't apply because we still have the mount points here
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.federation.router;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -51,6 +52,7 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Time;
import org.junit.After;
@ -474,4 +476,86 @@ public void testGetListingWithTrailingSlash() throws IOException {
nnFs1.delete(new Path("/testlist/tmp1"), true);
}
}
/**
* Regression test for HDFS-13255.
* Verify that delete fails if the path is a mount point or
* there are any mount point under the path.
*/
@Test
public void testDeleteMountPoint() throws Exception {
try {
MountTable addEntry = MountTable.newInstance("/testdelete/subdir",
Collections.singletonMap("ns0", "/testdelete/subdir"));
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/testdelete/subdir"));
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because there are mount points: "
+ "subdir under the path: /testdelete",
() -> routerFs.delete(new Path("/testdelete"), true));
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because there are mount points: "
+ "subdir under the path: /testdelete",
() -> routerFs.delete(new Path("/testdelete"), false));
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because the path: "
+ "/testdelete/subdir is a mount point",
() -> routerFs.delete(new Path("/testdelete/subdir"), true));
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because the path: "
+ "/testdelete/subdir is a mount point",
() -> routerFs.delete(new Path("/testdelete/subdir"), false));
} finally {
nnFs0.delete(new Path("/testdelete"), true);
}
}
/**
* Regression test for HDFS-13255.
* Verify that rename fails if the src path is a mount point or
* there are any mount point under the path.
*/
@Test
public void testRenameMountPoint() throws Exception {
try {
MountTable addEntry = MountTable.newInstance("/testrename1/sub",
Collections.singletonMap("ns0", "/testrename1/sub"));
assertTrue(addMountTable(addEntry));
addEntry = MountTable.newInstance("/testrename2/sub",
Collections.singletonMap("ns0", "/testrename2/sub"));
assertTrue(addMountTable(addEntry));
nnFs0.mkdirs(new Path("/testrename1/sub/sub"));
nnFs0.mkdirs(new Path("/testrename2"));
// Success: rename a directory to a mount point
assertTrue(nnFs0.exists(new Path("/testrename1/sub/sub")));
assertFalse(nnFs0.exists(new Path("/testrename2/sub")));
assertTrue(routerFs.rename(new Path("/testrename1/sub/sub"),
new Path("/testrename2")));
assertFalse(nnFs0.exists(new Path("/testrename1/sub/sub")));
assertTrue(nnFs0.exists(new Path("/testrename2/sub")));
// Fail: the target already exists
nnFs0.mkdirs(new Path("/testrename1/sub/sub"));
assertFalse(routerFs.rename(new Path("/testrename1/sub/sub"),
new Path("/testrename2")));
// Fail: The src is a mount point
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because the path: "
+ "/testrename1/sub is a mount point",
() -> routerFs.rename(new Path("/testrename1/sub"),
new Path("/testrename2/sub")));
// Fail: There is a mount point under the src
LambdaTestUtils.intercept(AccessControlException.class,
"The operation is not allowed because there are mount points: "
+ "sub under the path: /testrename1",
() -> routerFs.rename(new Path("/testrename1"),
new Path("/testrename2/sub")));
} finally {
nnFs0.delete(new Path("/testrename1"), true);
nnFs0.delete(new Path("/testrename2"), true);
}
}
}

View File

@ -19,12 +19,14 @@
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
@ -141,29 +143,35 @@ public void testNamespaceQuotaExceed() throws Exception {
addMountTable(mountTable2);
final FileSystem routerFs = routerContext.getFileSystem();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
boolean isNsQuotaViolated = false;
try {
// create new directory to trigger NSQuotaExceededException
routerFs.mkdirs(new Path("/nsquota/" + UUID.randomUUID()));
routerFs.mkdirs(new Path("/nsquota/subdir/" + UUID.randomUUID()));
} catch (NSQuotaExceededException e) {
isNsQuotaViolated = true;
} catch (IOException ignored) {
}
return isNsQuotaViolated;
final List<Path> created = new ArrayList<>();
GenericTestUtils.waitFor(() -> {
boolean isNsQuotaViolated = false;
try {
// create new directory to trigger NSQuotaExceededException
Path p = new Path("/nsquota/" + UUID.randomUUID());
routerFs.mkdirs(p);
created.add(p);
p = new Path("/nsquota/subdir/" + UUID.randomUUID());
routerFs.mkdirs(p);
created.add(p);
} catch (NSQuotaExceededException e) {
isNsQuotaViolated = true;
} catch (IOException ignored) {
}
return isNsQuotaViolated;
}, 5000, 60000);
// mkdir in real FileSystem should be okay
nnFs1.mkdirs(new Path("/testdir1/" + UUID.randomUUID()));
nnFs2.mkdirs(new Path("/testdir2/" + UUID.randomUUID()));
// delete/rename call should be still okay
routerFs.delete(new Path("/nsquota"), true);
routerFs.rename(new Path("/nsquota/subdir"), new Path("/nsquota/subdir"));
// rename/delete call should be still okay
assertFalse(created.isEmpty());
for(Path src: created) {
final Path dst = new Path(src.toString()+"-renamed");
routerFs.rename(src, dst);
routerFs.delete(dst, true);
}
}
@Test
@ -376,7 +384,7 @@ public void testStaleQuotaRemoving() throws Exception {
/**
* Remove a mount table entry to the mount table through the admin API.
* @param entry Mount table entry to remove.
* @param path Mount table entry to remove.
* @return If it was successfully removed.
* @throws IOException Problems removing entries.
*/
@ -677,8 +685,8 @@ public void testQuotaRefreshWhenDestinationNotPresent() throws Exception {
assertEquals(BLOCK_SIZE, mountQuota2.getSpaceConsumed());
FileSystem routerFs = routerContext.getFileSystem();
// Remove destination directory for the mount entry
routerFs.delete(new Path("/setdir1"), true);
// Remove file in setdir1. The target directory still exists.
routerFs.delete(new Path("/setdir1/file1"), true);
// Create file
routerClient.create("/setdir2/file3", true).close();
@ -699,9 +707,9 @@ public void testQuotaRefreshWhenDestinationNotPresent() throws Exception {
updatedMountTable = getMountTable("/setdir2");
mountQuota2 = updatedMountTable.getQuota();
// If destination is not present the quota usage should be reset to 0
assertEquals(0, cacheQuota1.getFileAndDirectoryCount());
assertEquals(0, mountQuota1.getFileAndDirectoryCount());
// The quota usage should be reset.
assertEquals(1, cacheQuota1.getFileAndDirectoryCount());
assertEquals(1, mountQuota1.getFileAndDirectoryCount());
assertEquals(0, cacheQuota1.getSpaceConsumed());
assertEquals(0, mountQuota1.getSpaceConsumed());