HADOOP-16878. FileUtil.copy() to throw IOException if the source and destination are the same

Contributed by Gabor Bota.
This commit is contained in:
Gabor Bota 2020-10-13 17:17:44 +02:00 committed by GitHub
parent 0507c4160f
commit 59f01a548e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 37 additions and 1 deletions

View File

@ -398,6 +398,12 @@ public static boolean copy(FileSystem srcFS, FileStatus srcStatus,
Configuration conf) throws IOException { Configuration conf) throws IOException {
Path src = srcStatus.getPath(); Path src = srcStatus.getPath();
dst = checkDest(src.getName(), dstFS, dst, overwrite); dst = checkDest(src.getName(), dstFS, dst, overwrite);
if (srcFS.makeQualified(src).equals(dstFS.makeQualified(dst))) {
throw new PathOperationException("Source (" + src + ") and destination " +
"(" + dst + ") are equal in the copy command.");
}
if (srcStatus.isDirectory()) { if (srcStatus.isDirectory()) {
checkDependencies(srcFS, src, dstFS, dst); checkDependencies(srcFS, src, dstFS, dst);
if (!dstFS.mkdirs(dst)) { if (!dstFS.mkdirs(dst)) {

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -175,7 +176,20 @@ public void testCopyDirFromWindowsLocalPath() throws Exception {
checkPut(dirPath, targetDir, true); checkPut(dirPath, targetDir, true);
} }
@Test
public void testCopyBetweenFsEqualPath() throws Exception {
Path testRoot = new Path(testRootDir, "testPutFile");
lfs.delete(testRoot, true);
lfs.mkdirs(testRoot);
Path filePath = new Path(testRoot, "sameSourceTarget");
lfs.create(filePath).close();
final FileStatus status = lfs.getFileStatus(filePath);
LambdaTestUtils.intercept(PathOperationException.class, () ->
FileUtil.copy(lfs, status, lfs, filePath, false, true, conf)
);
}
private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath) private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath)
throws Exception { throws Exception {
lfs.delete(targetDir, true); lfs.delete(targetDir, true);

View File

@ -68,6 +68,7 @@
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData; import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@ -78,6 +79,7 @@
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.PathOperationException;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageStatistics.LongStatistic; import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
@ -2497,4 +2499,18 @@ public void testDisallowSnapshotShouldThrowWhenTrashRootExists()
} }
} }
} }
@Test
public void testCopyBetweenFsEqualPath() throws Exception {
Configuration conf = getTestConfiguration();
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
Path filePath = new Path("/dir/file");
dfs.create(filePath).close();
FileStatus fstatus = dfs.getFileStatus(filePath);
LambdaTestUtils.intercept(PathOperationException.class,
() -> FileUtil.copy(dfs, fstatus, dfs, filePath, false, true, conf));
}
}
} }