HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs. Contributed by Akira Ajisaka.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603222 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c1eda00e84
commit
6e37dd331b
@ -433,6 +433,9 @@ Release 2.5.0 - UNRELEASED
|
||||
HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
|
||||
the source files license header. (Henry Saputra via wang)
|
||||
|
||||
HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs.
|
||||
(Akira Ajisaka via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -25,6 +25,7 @@
|
||||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.NoSuchElementException;
|
||||
@ -39,6 +40,9 @@
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.PathNotFoundException;
|
||||
import org.apache.hadoop.fs.PathOperationException;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
/**
|
||||
@ -88,7 +92,7 @@ protected void setPreserve(boolean preserve) {
|
||||
}
|
||||
|
||||
protected static enum FileAttribute {
|
||||
TIMESTAMPS, OWNERSHIP, PERMISSION, XATTR;
|
||||
TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
|
||||
|
||||
public static FileAttribute getAttribute(char symbol) {
|
||||
for (FileAttribute attribute : values()) {
|
||||
@ -306,11 +310,22 @@ protected void copyFileToTarget(PathData src, PathData target) throws IOExceptio
|
||||
src.stat.getOwner(),
|
||||
src.stat.getGroup());
|
||||
}
|
||||
if (shouldPreserve(FileAttribute.PERMISSION)) {
|
||||
if (shouldPreserve(FileAttribute.PERMISSION) ||
|
||||
shouldPreserve(FileAttribute.ACL)) {
|
||||
target.fs.setPermission(
|
||||
target.path,
|
||||
src.stat.getPermission());
|
||||
}
|
||||
if (shouldPreserve(FileAttribute.ACL)) {
|
||||
FsPermission perm = src.stat.getPermission();
|
||||
if (perm.getAclBit()) {
|
||||
List<AclEntry> srcEntries =
|
||||
src.fs.getAclStatus(src.path).getEntries();
|
||||
List<AclEntry> srcFullEntries =
|
||||
AclUtil.getAclFromPermAndEntries(perm, srcEntries);
|
||||
target.fs.setAcl(target.path, srcFullEntries);
|
||||
}
|
||||
}
|
||||
if (shouldPreserve(FileAttribute.XATTR)) {
|
||||
Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
|
||||
if (srcXAttrs != null) {
|
||||
|
@ -133,16 +133,18 @@ protected void processPath(PathData src) throws IOException {
|
||||
|
||||
static class Cp extends CommandWithDestination {
|
||||
public static final String NAME = "cp";
|
||||
public static final String USAGE = "[-f] [-p | -p[topx]] <src> ... <dst>";
|
||||
public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
|
||||
public static final String DESCRIPTION =
|
||||
"Copy files that match the file pattern <src> to a " +
|
||||
"destination. When copying multiple files, the destination " +
|
||||
"must be a directory. Passing -p preserves status " +
|
||||
"[topx] (timestamps, ownership, permission, XAttr). " +
|
||||
"[topax] (timestamps, ownership, permission, ACLs, XAttr). " +
|
||||
"If -p is specified with no <arg>, then preserves " +
|
||||
"timestamps, ownership, permission. Passing -f " +
|
||||
"overwrites the destination if it already exists.\n";
|
||||
|
||||
"timestamps, ownership, permission. If -pa is specified, " +
|
||||
"then preserves permission also because ACL is a super-set of " +
|
||||
"permission. Passing -f overwrites the destination if it " +
|
||||
"already exists.\n";
|
||||
|
||||
@Override
|
||||
protected void processOptions(LinkedList<String> args) throws IOException {
|
||||
popPreserveOption(args);
|
||||
|
@ -159,7 +159,7 @@ count
|
||||
|
||||
cp
|
||||
|
||||
Usage: <<<hdfs dfs -cp [-f] [-p | -p[topx]] URI [URI ...] <dest> >>>
|
||||
Usage: <<<hdfs dfs -cp [-f] [-p | -p[topax]] URI [URI ...] <dest> >>>
|
||||
|
||||
Copy files from source to destination. This command allows multiple sources
|
||||
as well in which case the destination must be a directory.
|
||||
@ -169,8 +169,10 @@ cp
|
||||
* The -f option will overwrite the destination if it already exists.
|
||||
|
||||
* The -p option will preserve file attributes [topx] (timestamps,
|
||||
ownership, permission, XAttr). If -p is specified with no <arg>,
|
||||
then preserves timestamps, ownership, permission.
|
||||
ownership, permission, ACL, XAttr). If -p is specified with no <arg>,
|
||||
then preserves timestamps, ownership, permission. If -pa is specified,
|
||||
then preserves permission also because ACL is a super-set of
|
||||
permission.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -296,7 +296,7 @@
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^-cp \[-f\] \[-p \| -p\[topx\]\] <src> \.\.\. <dst> :\s*</expected-output>
|
||||
<expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] <src> \.\.\. <dst> :\s*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
@ -308,15 +308,19 @@
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*\[topx\] \(timestamps, ownership, permission, XAttr\). If -p is specified with no( )*</expected-output>
|
||||
<expected-output>^( |\t)*\[topax\] \(timestamps, ownership, permission, ACLs, XAttr\). If -p is specified( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^( |\t)*<arg>, then preserves timestamps, ownership, permission. Passing -f overwrites( )*</expected-output>
|
||||
<expected-output>^( |\t)*with no <arg>, then preserves timestamps, ownership, permission. If -pa is( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^\s*the destination if it already exists.( )*</expected-output>
|
||||
<expected-output>^( |\t)*specified, then preserves permission also because ACL is a super-set of( )*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
@ -34,6 +34,7 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.*;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
@ -54,10 +55,16 @@
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
|
||||
import static org.apache.hadoop.fs.permission.AclEntryType.*;
|
||||
import static org.apache.hadoop.fs.permission.FsAction.*;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* This class tests commands from DFSShell.
|
||||
*/
|
||||
@ -1621,11 +1628,13 @@ public void testInvalidShell() throws Exception {
|
||||
assertEquals("expected to fail -1", res , -1);
|
||||
}
|
||||
|
||||
// Preserve Copy Option is -ptopx (timestamps, ownership, permission, XATTR)
|
||||
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
|
||||
// ACLs)
|
||||
@Test (timeout = 120000)
|
||||
public void testCopyCommandsWithPreserveOption() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.format(true).build();
|
||||
FsShell shell = null;
|
||||
@ -1638,6 +1647,14 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
|
||||
fs.mkdirs(hdfsTestDir);
|
||||
Path src = new Path(hdfsTestDir, "srcfile");
|
||||
fs.create(src).close();
|
||||
|
||||
fs.setAcl(src, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, ALL),
|
||||
aclEntry(ACCESS, USER, "foo", ALL),
|
||||
aclEntry(ACCESS, GROUP, READ_EXECUTE),
|
||||
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
|
||||
aclEntry(ACCESS, OTHER, EXECUTE)));
|
||||
|
||||
FileStatus status = fs.getFileStatus(src);
|
||||
final long mtime = status.getModificationTime();
|
||||
final long atime = status.getAccessTime();
|
||||
@ -1661,41 +1678,93 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
assertTrue(perm.equals(targetStatus.getPermission()));
|
||||
FsPermission targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
Map<String, byte[]> xattrs = fs.getXAttrs(target1);
|
||||
assertTrue(xattrs.isEmpty());
|
||||
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
|
||||
assertTrue(acls.isEmpty());
|
||||
assertFalse(targetPerm.getAclBit());
|
||||
|
||||
// -ptop
|
||||
Path target2 = new Path(hdfsTestDir, "targetfile2");
|
||||
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
|
||||
target2.toUri().toString() };
|
||||
ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp -p is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target1);
|
||||
assertEquals("cp -ptop is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target2);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
assertTrue(perm.equals(targetStatus.getPermission()));
|
||||
targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
xattrs = fs.getXAttrs(target2);
|
||||
assertTrue(xattrs.isEmpty());
|
||||
|
||||
acls = fs.getAclStatus(target2).getEntries();
|
||||
assertTrue(acls.isEmpty());
|
||||
assertFalse(targetPerm.getAclBit());
|
||||
|
||||
// -ptopx
|
||||
Path target3 = new Path(hdfsTestDir, "targetfile3");
|
||||
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
|
||||
target3.toUri().toString() };
|
||||
ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp -p is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target1);
|
||||
assertEquals("cp -ptopx is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target3);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
assertTrue(perm.equals(targetStatus.getPermission()));
|
||||
targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
xattrs = fs.getXAttrs(target3);
|
||||
assertEquals(xattrs.size(), 2);
|
||||
assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
|
||||
assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
|
||||
acls = fs.getAclStatus(target3).getEntries();
|
||||
assertTrue(acls.isEmpty());
|
||||
assertFalse(targetPerm.getAclBit());
|
||||
|
||||
// -ptopa
|
||||
Path target4 = new Path(hdfsTestDir, "targetfile4");
|
||||
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
|
||||
target4.toUri().toString() };
|
||||
ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp -ptopa is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target4);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
xattrs = fs.getXAttrs(target4);
|
||||
assertTrue(xattrs.isEmpty());
|
||||
acls = fs.getAclStatus(target4).getEntries();
|
||||
assertFalse(acls.isEmpty());
|
||||
assertTrue(targetPerm.getAclBit());
|
||||
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
|
||||
|
||||
// -ptoa (verify -pa option will preserve permissions also)
|
||||
Path target5 = new Path(hdfsTestDir, "targetfile5");
|
||||
argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
|
||||
target5.toUri().toString() };
|
||||
ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp -ptoa is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target5);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
xattrs = fs.getXAttrs(target5);
|
||||
assertTrue(xattrs.isEmpty());
|
||||
acls = fs.getAclStatus(target5).getEntries();
|
||||
assertFalse(acls.isEmpty());
|
||||
assertTrue(targetPerm.getAclBit());
|
||||
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
|
||||
} finally {
|
||||
if (null != shell) {
|
||||
shell.close();
|
||||
@ -1709,6 +1778,90 @@ public void testCopyCommandsWithPreserveOption() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify cp -pa option will preserve both ACL and sticky bit.
|
||||
@Test (timeout = 120000)
|
||||
public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.format(true).build();
|
||||
FsShell shell = null;
|
||||
FileSystem fs = null;
|
||||
final String testdir =
|
||||
"/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
|
||||
+ counter.getAndIncrement();
|
||||
final Path hdfsTestDir = new Path(testdir);
|
||||
try {
|
||||
fs = cluster.getFileSystem();
|
||||
fs.mkdirs(hdfsTestDir);
|
||||
Path src = new Path(hdfsTestDir, "srcfile");
|
||||
fs.create(src).close();
|
||||
|
||||
fs.setAcl(src, Lists.newArrayList(
|
||||
aclEntry(ACCESS, USER, ALL),
|
||||
aclEntry(ACCESS, USER, "foo", ALL),
|
||||
aclEntry(ACCESS, GROUP, READ_EXECUTE),
|
||||
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
|
||||
aclEntry(ACCESS, OTHER, EXECUTE)));
|
||||
// set sticky bit
|
||||
fs.setPermission(src,
|
||||
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
|
||||
|
||||
FileStatus status = fs.getFileStatus(src);
|
||||
final long mtime = status.getModificationTime();
|
||||
final long atime = status.getAccessTime();
|
||||
final String owner = status.getOwner();
|
||||
final String group = status.getGroup();
|
||||
final FsPermission perm = status.getPermission();
|
||||
|
||||
shell = new FsShell(conf);
|
||||
|
||||
// -p preserves sticky bit and doesn't preserve ACL
|
||||
Path target1 = new Path(hdfsTestDir, "targetfile1");
|
||||
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
|
||||
target1.toUri().toString() };
|
||||
int ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp is not working", SUCCESS, ret);
|
||||
FileStatus targetStatus = fs.getFileStatus(target1);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
FsPermission targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
|
||||
assertTrue(acls.isEmpty());
|
||||
assertFalse(targetPerm.getAclBit());
|
||||
|
||||
// -ptopa preserves both sticky bit and ACL
|
||||
Path target2 = new Path(hdfsTestDir, "targetfile2");
|
||||
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
|
||||
target2.toUri().toString() };
|
||||
ret = ToolRunner.run(shell, argv);
|
||||
assertEquals("cp -ptopa is not working", SUCCESS, ret);
|
||||
targetStatus = fs.getFileStatus(target2);
|
||||
assertEquals(mtime, targetStatus.getModificationTime());
|
||||
assertEquals(atime, targetStatus.getAccessTime());
|
||||
assertEquals(owner, targetStatus.getOwner());
|
||||
assertEquals(group, targetStatus.getGroup());
|
||||
targetPerm = targetStatus.getPermission();
|
||||
assertTrue(perm.equals(targetPerm));
|
||||
acls = fs.getAclStatus(target2).getEntries();
|
||||
assertFalse(acls.isEmpty());
|
||||
assertTrue(targetPerm.getAclBit());
|
||||
assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
|
||||
} finally {
|
||||
if (null != shell) {
|
||||
shell.close();
|
||||
}
|
||||
if (null != fs) {
|
||||
fs.delete(hdfsTestDir, true);
|
||||
fs.close();
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
// force Copy Option is -f
|
||||
@Test (timeout = 30000)
|
||||
public void testCopyCommandsWithForceOption() throws Exception {
|
||||
|
Loading…
Reference in New Issue
Block a user