HADOOP-9761. ViewFileSystem#rename fails when using DistributedFileSystem (Andrew Wang via Colin Patrick McCabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1509874 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-08-02 21:42:38 +00:00
parent c04a7d974a
commit f216f22915
4 changed files with 22 additions and 23 deletions

View File

@ -312,6 +312,9 @@ Release 2.3.0 - UNRELEASED
HADOOP-9582. Non-existent file to "hadoop fs -conf" doesn't throw error HADOOP-9582. Non-existent file to "hadoop fs -conf" doesn't throw error
(Ashwin Shankar via jlowe) (Ashwin Shankar via jlowe)
HADOOP-9761. ViewFileSystem#rename fails when using DistributedFileSystem.
(Andrew Wang via Colin Patrick McCabe)
Release 2.1.1-beta - UNRELEASED Release 2.1.1-beta - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -73,7 +73,9 @@ public T resolve(final FileSystem filesys, final Path path)
int count = 0; int count = 0;
T in = null; T in = null;
Path p = path; Path p = path;
FileSystem fs = FileSystem.getFSofPath(p, filesys.getConf()); // Assumes path belongs to this FileSystem.
// Callers validate this by passing paths through FileSystem#checkPath
FileSystem fs = filesys;
for (boolean isLink = true; isLink;) { for (boolean isLink = true; isLink;) {
try { try {
in = doCall(p); in = doCall(p);

View File

@ -170,12 +170,11 @@ public Path getHomeDirectory() {
} }
/** /**
* Checks that the passed URI belongs to this filesystem, resolves the path * Checks that the passed URI belongs to this filesystem and returns
* component against the current working directory if relative, and finally * just the path component. Expects a URI with an absolute path.
* returns the absolute path component.
* *
* @param file URI to check and resolve * @param file URI with absolute path
* @return resolved absolute path component of {file} * @return path component of {file}
* @throws IllegalArgumentException if URI does not belong to this DFS * @throws IllegalArgumentException if URI does not belong to this DFS
*/ */
private String getPathName(Path file) { private String getPathName(Path file) {
@ -514,15 +513,10 @@ public void concat(Path trg, Path [] psrcs) throws IOException {
@Override @Override
public boolean rename(Path src, Path dst) throws IOException { public boolean rename(Path src, Path dst) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
// Both Paths have to belong to this DFS
final Path absSrc = fixRelativePart(src); final Path absSrc = fixRelativePart(src);
final Path absDst = fixRelativePart(dst); final Path absDst = fixRelativePart(dst);
FileSystem srcFS = getFSofPath(absSrc, getConf());
FileSystem dstFS = getFSofPath(absDst, getConf());
if (!srcFS.getUri().equals(getUri()) ||
!dstFS.getUri().equals(getUri())) {
throw new IOException("Renames across FileSystems not supported");
}
// Try the rename without resolving first // Try the rename without resolving first
try { try {
return dfs.rename(getPathName(absSrc), getPathName(absDst)); return dfs.rename(getPathName(absSrc), getPathName(absDst));
@ -539,7 +533,8 @@ public Boolean doCall(final Path p)
@Override @Override
public Boolean next(final FileSystem fs, final Path p) public Boolean next(final FileSystem fs, final Path p)
throws IOException { throws IOException {
return fs.rename(source, p); // Should just throw an error in FileSystem#checkPath
return doCall(p);
} }
}.resolve(this, absDst); }.resolve(this, absDst);
} }
@ -553,15 +548,8 @@ public Boolean next(final FileSystem fs, final Path p)
public void rename(Path src, Path dst, final Options.Rename... options) public void rename(Path src, Path dst, final Options.Rename... options)
throws IOException { throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
// Both Paths have to belong to this DFS
final Path absSrc = fixRelativePart(src); final Path absSrc = fixRelativePart(src);
final Path absDst = fixRelativePart(dst); final Path absDst = fixRelativePart(dst);
FileSystem srcFS = getFSofPath(absSrc, getConf());
FileSystem dstFS = getFSofPath(absDst, getConf());
if (!srcFS.getUri().equals(getUri()) ||
!dstFS.getUri().equals(getUri())) {
throw new IOException("Renames across FileSystems not supported");
}
// Try the rename without resolving first // Try the rename without resolving first
try { try {
dfs.rename(getPathName(absSrc), getPathName(absDst), options); dfs.rename(getPathName(absSrc), getPathName(absDst), options);
@ -579,7 +567,7 @@ public Void doCall(final Path p)
@Override @Override
public Void next(final FileSystem fs, final Path p) public Void next(final FileSystem fs, final Path p)
throws IOException { throws IOException {
// Since we know it's this DFS for both, can just call doCall again // Should just throw an error in FileSystem#checkPath
return doCall(p); return doCall(p);
} }
}.resolve(this, absDst); }.resolve(this, absDst);

View File

@ -24,8 +24,10 @@
import javax.security.auth.login.LoginException; import javax.security.auth.login.LoginException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -69,6 +71,10 @@ public static void clusterSetupAtBegining() throws IOException,
fHdfs = cluster.getFileSystem(0); fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1); fHdfs2 = cluster.getFileSystem(1);
fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_URI.toString());
fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
FsConstants.VIEWFS_URI.toString());
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName())); UserGroupInformation.getCurrentUser().getShortUserName()));