HDFS-15540. Directories protected from delete can still be moved to the trash. Contributed by Stephen O'Donnell.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
(cherry picked from commit 2ffe00fc46
)
This commit is contained in:
parent
9eaa3520e6
commit
a9ce6001ea
@ -262,6 +262,11 @@ static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
|
final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
|
||||||
final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
|
final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK);
|
||||||
|
|
||||||
|
if(fsd.isNonEmptyDirectory(srcIIP)) {
|
||||||
|
DFSUtil.checkProtectedDescendants(fsd, srcIIP);
|
||||||
|
}
|
||||||
|
|
||||||
if (fsd.isPermissionEnabled()) {
|
if (fsd.isPermissionEnabled()) {
|
||||||
boolean renameToTrash = false;
|
boolean renameToTrash = false;
|
||||||
if (null != options &&
|
if (null != options &&
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.Trash;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
@ -36,6 +38,7 @@
|
|||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
@ -284,6 +287,31 @@ public void testDelete() throws Throwable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMoveToTrash() throws Throwable {
|
||||||
|
for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
|
||||||
|
MiniDFSCluster cluster = setupTestCase(
|
||||||
|
conf, testMatrixEntry.getProtectedPaths(),
|
||||||
|
testMatrixEntry.getUnprotectedPaths());
|
||||||
|
|
||||||
|
try {
|
||||||
|
LOG.info("Running {}", testMatrixEntry);
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
for (Path path : testMatrixEntry.getAllPathsToBeDeleted()) {
|
||||||
|
assertThat(
|
||||||
|
testMatrixEntry + ": Testing whether " + path +
|
||||||
|
" can be moved to trash",
|
||||||
|
moveToTrash(fs, path, conf),
|
||||||
|
is(testMatrixEntry.canPathBeDeleted(path)));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Verify that protected directories could not be renamed.
|
* Verify that protected directories could not be renamed.
|
||||||
*/
|
*/
|
||||||
@ -339,6 +367,33 @@ public void testRenameProtectSubDirs() throws Throwable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testMoveProtectedSubDirsToTrash() throws Throwable {
|
||||||
|
for (TestMatrixEntry testMatrixEntry :
|
||||||
|
createTestMatrixForProtectSubDirs()) {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setBoolean(DFS_PROTECTED_SUBDIRECTORIES_ENABLE, true);
|
||||||
|
conf.setInt(DFSConfigKeys.FS_TRASH_INTERVAL_KEY, 3600);
|
||||||
|
MiniDFSCluster cluster = setupTestCase(
|
||||||
|
conf, testMatrixEntry.getProtectedPaths(),
|
||||||
|
testMatrixEntry.getUnprotectedPaths());
|
||||||
|
|
||||||
|
try {
|
||||||
|
LOG.info("Running {}", testMatrixEntry);
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
|
||||||
|
assertThat(
|
||||||
|
testMatrixEntry + ": Testing whether "
|
||||||
|
+ srcPath + " can be moved to trash",
|
||||||
|
moveToTrash(fs, srcPath, conf),
|
||||||
|
is(testMatrixEntry.canPathBeRenamed(srcPath)));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDeleteProtectSubDirs() throws Throwable {
|
public void testDeleteProtectSubDirs() throws Throwable {
|
||||||
for (TestMatrixEntry testMatrixEntry :
|
for (TestMatrixEntry testMatrixEntry :
|
||||||
@ -465,6 +520,21 @@ private boolean deletePath(FileSystem fs, Path path) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean moveToTrash(FileSystem fs, Path path, Configuration conf) {
|
||||||
|
try {
|
||||||
|
return Trash.moveToAppropriateTrash(fs, path, conf);
|
||||||
|
} catch (FileNotFoundException fnf) {
|
||||||
|
// fs.delete(...) does not throw an exception if the file does not exist.
|
||||||
|
// The deletePath method in this class, will therefore return true if
|
||||||
|
// there is an attempt to delete a file which does not exist. Therefore
|
||||||
|
// catching this exception and returning true to keep it consistent and
|
||||||
|
// allow tests to work with the same test matrix.
|
||||||
|
return true;
|
||||||
|
} catch (IOException ace) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return true if the path was successfully renamed. False if it
|
* Return true if the path was successfully renamed. False if it
|
||||||
* failed with AccessControlException. Any other exceptions are
|
* failed with AccessControlException. Any other exceptions are
|
||||||
|
Loading…
Reference in New Issue
Block a user