HDFS-14739. RBF: LS command for mount point shows wrong owner and permission information. Contributed by Jinglun.

This commit is contained in:
Ayush Saxena 2019-10-16 19:26:01 +05:30
parent 090f73a9aa
commit 375224edeb
5 changed files with 125 additions and 28 deletions

View File

@ -423,8 +423,8 @@ public PathLocation lookupLocation(final String str) throws IOException {
} else { } else {
// Not found, use default location // Not found, use default location
if (!defaultNSEnable) { if (!defaultNSEnable) {
throw new IOException("Cannot find locations for " + path + ", " + throw new RouterResolveException("Cannot find locations for " + path
"because the default nameservice is disabled to read or write"); + ", because the default nameservice is disabled to read or write");
} }
RemoteLocation remoteLocation = RemoteLocation remoteLocation =
new RemoteLocation(defaultNameService, path, path); new RemoteLocation(defaultNameService, path, path);

View File

@ -0,0 +1,32 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.resolver;
import java.io.IOException;
/**
* Thrown by FileSubclusterResolver when a path can't be resolved.
*/
public class RouterResolveException extends IOException {
/** For java.io.Serializable. */
private static final long serialVersionUID = 1L;
public RouterResolveException(String msg) {
super(msg);
}
}

View File

@ -78,6 +78,7 @@
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
import org.apache.hadoop.hdfs.server.federation.resolver.RouterResolveException;
import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager; import org.apache.hadoop.hdfs.server.federation.router.security.RouterSecurityManager;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -745,16 +746,8 @@ public DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException { boolean needLocation) throws IOException {
rpcServer.checkOperation(NameNode.OperationCategory.READ); rpcServer.checkOperation(NameNode.OperationCategory.READ);
// Locate the dir and fetch the listing List<RemoteResult<RemoteLocation, DirectoryListing>> listings =
final List<RemoteLocation> locations = getListingInt(src, startAfter, needLocation);
rpcServer.getLocationsForPath(src, false, false);
RemoteMethod method = new RemoteMethod("getListing",
new Class<?>[] {String.class, startAfter.getClass(), boolean.class},
new RemoteParam(), startAfter, needLocation);
final List<RemoteResult<RemoteLocation, DirectoryListing>> listings =
rpcClient.invokeConcurrent(
locations, method, false, -1, DirectoryListing.class);
Map<String, HdfsFileStatus> nnListing = new TreeMap<>(); Map<String, HdfsFileStatus> nnListing = new TreeMap<>();
int totalRemainingEntries = 0; int totalRemainingEntries = 0;
int remainingEntries = 0; int remainingEntries = 0;
@ -818,7 +811,9 @@ public DirectoryListing getListing(String src, byte[] startAfter,
if (dates != null && dates.containsKey(child)) { if (dates != null && dates.containsKey(child)) {
date = dates.get(child); date = dates.get(child);
} }
HdfsFileStatus dirStatus = getMountPointStatus(child, 0, date); Path childPath = new Path(src, child);
HdfsFileStatus dirStatus =
getMountPointStatus(childPath.toString(), 0, date);
// This may overwrite existing listing entries with the mount point // This may overwrite existing listing entries with the mount point
// TODO don't add if already there? // TODO don't add if already there?
@ -2042,6 +2037,31 @@ private long getModifiedTime(Map<String, Long> ret, String path,
return modTime; return modTime;
} }
/**
* Get listing on remote locations.
*/
private List<RemoteResult<RemoteLocation, DirectoryListing>> getListingInt(
String src, byte[] startAfter, boolean needLocation) throws IOException {
try {
List<RemoteLocation> locations =
rpcServer.getLocationsForPath(src, false, false);
// Locate the dir and fetch the listing.
if (locations.isEmpty()) {
return new ArrayList<>();
}
RemoteMethod method = new RemoteMethod("getListing",
new Class<?>[] {String.class, startAfter.getClass(), boolean.class},
new RemoteParam(), startAfter, needLocation);
List<RemoteResult<RemoteLocation, DirectoryListing>> listings = rpcClient
.invokeConcurrent(locations, method, false, -1,
DirectoryListing.class);
return listings;
} catch (RouterResolveException e) {
LOG.debug("Cannot get locations for {}, {}.", src, e.getMessage());
return new ArrayList<>();
}
}
/** /**
* Checks if the path is a directory and is supposed to be present in all * Checks if the path is a directory and is supposed to be present in all
* subclusters. * subclusters.

View File

@ -21,7 +21,6 @@
import static org.apache.hadoop.util.Time.monotonicNow; import static org.apache.hadoop.util.Time.monotonicNow;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
@ -50,7 +49,6 @@
import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest; import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameserviceRequest;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.codehaus.jettison.json.JSONObject; import org.codehaus.jettison.json.JSONObject;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -128,6 +126,7 @@ private static void setupNamespace() throws IOException {
// Add a folder to each namespace // Add a folder to each namespace
NamenodeContext nn0 = cluster.getNamenode("ns0", null); NamenodeContext nn0 = cluster.getNamenode("ns0", null);
nn0.getFileSystem().mkdirs(new Path("/dirns0/0")); nn0.getFileSystem().mkdirs(new Path("/dirns0/0"));
nn0.getFileSystem().mkdirs(new Path("/dir-ns"));
NamenodeContext nn1 = cluster.getNamenode("ns1", null); NamenodeContext nn1 = cluster.getNamenode("ns1", null);
nn1.getFileSystem().mkdirs(new Path("/dirns1/1")); nn1.getFileSystem().mkdirs(new Path("/dirns1/1"));
} }
@ -167,9 +166,10 @@ public void testWithoutDisabling() throws IOException {
// Return the results from all subclusters even if slow // Return the results from all subclusters even if slow
FileSystem routerFs = routerContext.getFileSystem(); FileSystem routerFs = routerContext.getFileSystem();
FileStatus[] filesStatus = routerFs.listStatus(new Path("/")); FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
assertEquals(2, filesStatus.length); assertEquals(3, filesStatus.length);
assertEquals("dirns0", filesStatus[0].getPath().getName()); assertEquals("dir-ns", filesStatus[0].getPath().getName());
assertEquals("dirns1", filesStatus[1].getPath().getName()); assertEquals("dirns0", filesStatus[1].getPath().getName());
assertEquals("dirns1", filesStatus[2].getPath().getName());
} }
@Test @Test
@ -184,14 +184,11 @@ public void testDisabling() throws Exception {
t < TimeUnit.SECONDS.toMillis(1)); t < TimeUnit.SECONDS.toMillis(1));
// We should not report anything from ns0 // We should not report anything from ns0
FileSystem routerFs = routerContext.getFileSystem(); FileSystem routerFs = routerContext.getFileSystem();
FileStatus[] filesStatus = null;
try { FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
routerFs.listStatus(new Path("/")); assertEquals(2, filesStatus.length);
fail("The listStatus call should fail."); assertEquals("dirns0", filesStatus[0].getPath().getName());
} catch (IOException ioe) { assertEquals("dirns1", filesStatus[1].getPath().getName());
GenericTestUtils.assertExceptionContains(
"No remote locations available", ioe);
}
filesStatus = routerFs.listStatus(new Path("/dirns1")); filesStatus = routerFs.listStatus(new Path("/dirns1"));
assertEquals(1, filesStatus.length); assertEquals(1, filesStatus.length);

View File

@ -269,11 +269,11 @@ public void testGetMountPointStatusWithIOException()
assertTrue(addMountTable(addEntry)); assertTrue(addMountTable(addEntry));
addEntry = MountTable.newInstance("/testA/testB", addEntry = MountTable.newInstance("/testA/testB",
Collections.singletonMap("ns0", "/testA/testB")); Collections.singletonMap("ns0", "/testA/testB"));
addEntry.setOwnerName("userB");
addEntry.setGroupName("groupB");
assertTrue(addMountTable(addEntry)); assertTrue(addMountTable(addEntry));
addEntry = MountTable.newInstance("/testB", addEntry = MountTable.newInstance("/testB",
Collections.singletonMap("ns0", "/test1/testB")); Collections.singletonMap("ns0", "/test1/testB"));
addEntry.setOwnerName("userB");
addEntry.setGroupName("groupB");
assertTrue(addMountTable(addEntry)); assertTrue(addMountTable(addEntry));
assertTrue(nnFs0.mkdirs(new Path("/test1"))); assertTrue(nnFs0.mkdirs(new Path("/test1")));
@ -311,6 +311,44 @@ private void getListing(String testPath)
assertEquals("groupB", currentGroup); assertEquals("groupB", currentGroup);
} }
@Test
public void testListNonExistPath() throws Exception {
mountTable.setDefaultNSEnable(false);
LambdaTestUtils.intercept(FileNotFoundException.class,
"File /base does not exist.",
"Expect FileNotFoundException.",
() -> routerFs.listStatus(new Path("/base")));
}
@Test
public void testListWhenDisableDefaultMountTable() throws IOException {
mountTable.setDefaultNSEnable(false);
/**
* /base/dir1 -> ns0:/base/dir1
* /base/dir2 -> ns0:/base/dir2
*/
assertTrue(addMountTable(createEntry("/base/dir1", "ns0", "/base/dir1",
"group2", "owner2", (short) 0750)));
assertTrue(addMountTable(createEntry("/base/dir2", "ns0", "/base/dir2",
"group3", "owner3", (short) 0755)));
FileStatus[] list = routerFs.listStatus(new Path("/base"));
assertEquals(2, list.length);
for (FileStatus status : list) {
if (status.getPath().toUri().getPath().equals("/base/dir1")) {
assertEquals("group2", status.getGroup());
assertEquals("owner2", status.getOwner());
assertEquals((short) 0750, status.getPermission().toShort());
} else if (status.getPath().toUri().getPath().equals("/base/dir2")) {
assertEquals("group3", status.getGroup());
assertEquals("owner3", status.getOwner());
assertEquals((short) 0755, status.getPermission().toShort());
} else {
fail("list result should be either /base/dir1 or /base/dir2.");
}
}
}
/** /**
* Verify permission for a mount point when the actual destination is not * Verify permission for a mount point when the actual destination is not
* present. It returns the permissions of the mount point. * present. It returns the permissions of the mount point.
@ -330,6 +368,16 @@ public void testMountTablePermissionsNoDest() throws IOException {
assertEquals((short) 0775, list[0].getPermission().toShort()); assertEquals((short) 0775, list[0].getPermission().toShort());
} }
private MountTable createEntry(String mountPath, String ns, String remotePath,
String group, String owner, short permission) throws IOException {
MountTable entry = MountTable
.newInstance(mountPath, Collections.singletonMap(ns, remotePath));
entry.setGroupName(group);
entry.setOwnerName(owner);
entry.setMode(FsPermission.createImmutable(permission));
return entry;
}
/** /**
* Verify permission for a mount point when the actual destination present. It * Verify permission for a mount point when the actual destination present. It
* returns the permissions of the actual destination pointed by the mount * returns the permissions of the actual destination pointed by the mount