diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3681131f83..2416359141 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -505,6 +505,9 @@ Release 2.5.0 - UNRELEASED HDFS-6610. TestShortCircuitLocalRead tests sometimes timeout on slow machines. (Charles Lamb via wang) + HDFS-6620. Snapshot docs should specify about preserve options with cp command + (Stephen Chu via umamahesh) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml index f809e855b9..eba1d807c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml @@ -97,7 +97,9 @@
  • Listing the files in snapshot s0: hdfs dfs -ls /foo/.snapshot/s0
  • Copying a file from snapshot s0: - hdfs dfs -cp /foo/.snapshot/s0/bar /tmp
  • + hdfs dfs -cp -ptopax /foo/.snapshot/s0/bar /tmp +

    Note that this example uses the preserve option to preserve + timestamps, ownership, permission, ACLs and XAttrs.

    diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java index 7042fc914c..35b687aa30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsPermission; @@ -38,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -57,6 +59,7 @@ public class TestXAttrWithSnapshot { private static int pathCount = 0; private static Path path, snapshotPath; private static String snapshotName; + private final int SUCCESS = 0; // XAttrs private static final String name1 = "user.a1"; private static final byte[] value1 = { 0x31, 0x32, 0x33 }; @@ -351,6 +354,26 @@ public void testRemoveXAttrExceedsQuota() throws Exception { hdfs.removeXAttr(filePath, name1); } + /** + * Test that users can copy a snapshot while preserving its xattrs. + */ + @Test (timeout = 120000) + public void testCopySnapshotShouldPreserveXAttrs() throws Exception { + FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700)); + hdfs.setXAttr(path, name1, value1); + hdfs.setXAttr(path, name2, value2); + SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName); + Path snapshotCopy = new Path(path.toString() + "-copy"); + String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(), + snapshotCopy.toUri().toString() }; + int ret = ToolRunner.run(new FsShell(conf), argv); + assertEquals("cp -px is not working on a snapshot", SUCCESS, ret); + + Map xattrs = hdfs.getXAttrs(snapshotCopy); + assertArrayEquals(value1, xattrs.get(name1)); + assertArrayEquals(value2, xattrs.get(name2)); + } + /** * Initialize the cluster, wait for it to become active, and get FileSystem * instances for our test users.