diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 497aa84e76..9ff54e6f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2067,23 +2067,7 @@ INodeAttributes getAttributes(INodesInPath iip) // first empty component for the root. however file status // related calls are expected to strip out the root component according // to TestINodeAttributeProvider. - // Due to HDFS-15372 the attribute provider should received the resolved - // snapshot path. Ie, rather than seeing /d/.snapshot/sn/data it should - // see /d/data. However, for the path /d/.snapshot/sn it should see this - // full path. If the current inode is the snapshot name, it always has the - // same ID as its parent inode, so we can use that to check if it is the - // path which needs handled specially. - byte[][] components; - INodeDirectory parent = node.getParent(); - if (iip.isSnapshot() - && parent != null && parent.getId() != node.getId()) { - // For snapshot paths, we always user node.getPathComponents so the - // snapshot path is resolved to the real path, unless the last component - // is the snapshot name root directory. - components = node.getPathComponents(); - } else { - components = iip.getPathComponents(); - } + byte[][] components = iip.getPathComponents(); components = Arrays.copyOfRange(components, 1, components.length); nodeAttrs = ap.getAttributes(components, nodeAttrs); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index e8e292761d..324cd5d441 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Stack; @@ -265,7 +264,7 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] components = inodesInPath.getPathComponents(); for (int i = 0; i < inodes.length && inodes[i] != null; i++) { - inodeAttrs[i] = getINodeAttrs(inodes[i], snapshotId); + inodeAttrs[i] = getINodeAttrs(components, i, inodes[i], snapshotId); } String path = inodesInPath.getPath(); @@ -315,7 +314,8 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, void checkPermission(INode inode, int snapshotId, FsAction access) throws AccessControlException { byte[][] pathComponents = inode.getPathComponents(); - INodeAttributes nodeAttributes = getINodeAttrs(inode, snapshotId); + INodeAttributes nodeAttributes = getINodeAttrs(pathComponents, + pathComponents.length - 1, inode, snapshotId); try { INodeAttributes[] iNodeAttr = {nodeAttributes}; AccessControlEnforcer enforcer = getAccessControlEnforcer(); @@ -424,31 +424,23 @@ public void checkPermissionWithContext( authzContext.getSubAccess(), authzContext.isIgnoreEmptyDir()); } - private INodeAttributes getINodeAttrs(INode inode, int snapshotId) { + private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, + INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); - /** - * This logic is similar to {@link FSDirectory#getAttributes()} and it - * ensures that the attribute provider sees snapshot paths resolved to their - * original location. This means the attributeProvider can apply permissions - * to the snapshot paths in the same was as the live paths. See HDFS-15372. - */ if (getAttributesProvider() != null) { + String[] elements = new String[pathIdx + 1]; /** - * If we have an inode representing a path like /d/.snapshot/snap1 - * then calling inode.getPathComponents returns [null, d, snap1]. If we - * call inode.getFullPathName() it will return /d/.snapshot/snap1. For - * this special path (snapshot root) the attribute provider should see: - * - * [null, d, .snapshot/snap1] - * - * Using IIP.resolveFromRoot, it will take the inode fullPathName and - * construct an IIP object that give the correct components as above. + * {@link INode#getPathComponents(String)} returns a null component + * for the root only path "/". Assign an empty string if so. */ - INodesInPath iip = INodesInPath.resolveFromRoot(inode); - byte[][] components = iip.getPathComponents(); - components = Arrays.copyOfRange(components, 1, components.length); - inodeAttrs = getAttributesProvider() - .getAttributes(components, inodeAttrs); + if (pathByNameArr.length == 1 && pathByNameArr[0] == null) { + elements[0] = ""; + } else { + for (int i = 0; i < elements.length; i++) { + elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); + } + } + inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); } return inodeAttrs; } @@ -504,7 +496,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider INodeAttributes inodeAttr = - getINodeAttrs(d, snapshotId); + getINodeAttrs(components, pathIdx, d, snapshotId); if (!hasPermission(inodeAttr, access)) { throw new AccessControlException( toAccessControlString(inodeAttr, d.getFullPathName(), access)); @@ -522,7 +514,7 @@ private void checkSubAccess(byte[][] components, int pathIdx, if (inodeAttr.getFsPermission().getStickyBit()) { for (INode child : cList) { INodeAttributes childInodeAttr = - getINodeAttrs(child, snapshotId); + getINodeAttrs(components, pathIdx, child, snapshotId); if (isStickyBitViolated(inodeAttr, childInodeAttr)) { List allComponentList = new ArrayList<>(); for (int i = 0; i <= pathIdx; ++i) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 8a150f0630..c2cdd48d49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -135,27 +135,6 @@ static INodesInPath resolve(final INodeDirectory startingDir, return resolve(startingDir, components, false); } - /** - * Retrieves the existing INodes from a path, starting at the root directory. - * The root directory is located by following the parent link in the inode - * recursively until the final root inode is found. - * The inodes returned will depend upon the output of inode.getFullPathName(). - * For a snapshot path, like /data/.snapshot/snap1, it will be resolved to: - * [null, data, .snapshot/snap1] - * For a file in the snapshot, as inode.getFullPathName resolves the snapshot - * information, the returned inodes for a path like /data/.snapshot/snap1/d1 - * would be: - * [null, data, d1] - * @param inode the {@link INode} to be resolved - * @return INodesInPath - */ - static INodesInPath resolveFromRoot(INode inode) { - INode[] inodes = getINodes(inode); - byte[][] paths = INode.getPathComponents(inode.getFullPathName()); - INodeDirectory rootDir = inodes[0].asDirectory(); - return resolve(rootDir, paths); - } - static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) { Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java index 512d102983..776a1981ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.conf.Configuration; @@ -33,9 +34,9 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.*; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Lists; @@ -81,7 +82,6 @@ public void checkPermission(String fsOwner, String supergroup, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access); - CALLED.add("checkPermission|" + path); } @Override @@ -89,13 +89,13 @@ public void checkPermissionWithContext( AuthorizationContext authzContext) throws AccessControlException { if (authzContext.getAncestorIndex() > 1 && authzContext.getInodes()[1].getLocalName().equals("user") - && authzContext.getInodes()[2].getLocalName().equals("acl")) { + && authzContext.getInodes()[2].getLocalName().equals("acl") + || runPermissionCheck) { this.ace.checkPermissionWithContext(authzContext); } CALLED.add("checkPermission|" + authzContext.getAncestorAccess() + "|" + authzContext.getParentAccess() + "|" + authzContext .getAccess()); - CALLED.add("checkPermission|" + authzContext.getPath()); } } @@ -112,12 +112,7 @@ public void stop() { @Override public INodeAttributes getAttributes(String[] pathElements, final INodeAttributes inode) { - String fullPath = String.join("/", pathElements); - if (!fullPath.startsWith("/")) { - fullPath = "/" + fullPath; - } CALLED.add("getAttributes"); - CALLED.add("getAttributes|"+fullPath); final boolean useDefault = useDefault(pathElements); final boolean useNullAcl = useNullAclFeature(pathElements); return new INodeAttributes() { @@ -495,109 +490,63 @@ public Void run() throws Exception { } @Test - // HDFS-15372 - Attribute provider should not see the snapshot path as it - // should be resolved into the original path name before it hits the provider. - public void testAttrProviderSeesResolvedSnapahotPaths() throws Exception { + // See HDFS-16132 where an issue was reported after HDFS-15372. The sequence + // of operations here causes that change to break and the test fails with: + // org.apache.hadoop.ipc.RemoteException(java.lang.AssertionError): + // Absolute path required, but got 'foo' + // at org.apache.hadoop.hdfs.server.namenode.INode.checkAbsolutePath + // (INode.java:838) + // at org.apache.hadoop.hdfs.server.namenode.INode.getPathComponents + // (INode.java:813) + // After reverting HDFS-15372 the test passes, so including this test in the + // revert for future reference. + public void testAttrProviderWorksCorrectlyOnRenamedSnapshotPaths() + throws Exception { + runPermissionCheck = true; FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); DistributedFileSystem hdfs = miniDFS.getFileSystem(); - final Path userPath = new Path("/user"); - final Path authz = new Path("/user/authz"); - final Path authzChild = new Path("/user/authz/child2"); + final Path parent = new Path("/user"); + hdfs.mkdirs(parent); + fs.setPermission(parent, new FsPermission(HDFS_PERMISSION)); + final Path sub1 = new Path(parent, "sub1"); + final Path sub1foo = new Path(sub1, "foo"); + hdfs.mkdirs(sub1); + hdfs.mkdirs(sub1foo); + Path f = new Path(sub1foo, "file0"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + hdfs.allowSnapshot(parent); + hdfs.createSnapshot(parent, "s0"); - fs.mkdirs(userPath); - fs.setPermission(userPath, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authz); - hdfs.allowSnapshot(userPath); - fs.setPermission(authz, new FsPermission(HDFS_PERMISSION)); - fs.mkdirs(authzChild); - fs.setPermission(authzChild, new FsPermission(HDFS_PERMISSION)); - fs.createSnapshot(userPath, "snapshot_1"); - UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1", - new String[]{"g1"}); + f = new Path(sub1foo, "file1"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + f = new Path(sub1foo, "file2"); + DFSTestUtil.createFile(hdfs, f, 0, (short) 1, 0); + + final Path sub2 = new Path(parent, "sub2"); + hdfs.mkdirs(sub2); + final Path sub2foo = new Path(sub2, "foo"); + // mv /parent/sub1/foo to /parent/sub2/foo + hdfs.rename(sub1foo, sub2foo); + + hdfs.createSnapshot(parent, "s1"); + hdfs.createSnapshot(parent, "s2"); + + final Path sub3 = new Path(parent, "sub3"); + hdfs.mkdirs(sub3); + // mv /parent/sub2/foo to /parent/sub3/foo + hdfs.rename(sub2foo, sub3); + + hdfs.delete(sub3, true); + UserGroupInformation ugi = + UserGroupInformation.createUserForTesting("u1", new String[] {"g1"}); ugi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - final Path snapChild = - new Path("/user/.snapshot/snapshot_1/authz/child2"); - // Run various methods on the path to access the attributes etc. - fs.getAclStatus(snapChild); - fs.getContentSummary(snapChild); - fs.getFileStatus(snapChild); - Assert.assertFalse(CALLED.contains("getAttributes|" + - snapChild.toString())); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz/child2")); - // The snapshot path should be seen by the permission checker, but when - // it checks access, the paths will be resolved so the attributeProvider - // only sees the resolved path. - Assert.assertTrue( - CALLED.contains("checkPermission|" + snapChild.toString())); + ((DistributedFileSystem)fs).getSnapshotDiffReport(parent, "s1", "s2"); CALLED.clear(); - fs.getAclStatus(new Path("/")); - Assert.assertTrue(CALLED.contains("checkPermission|/")); - Assert.assertTrue(CALLED.contains("getAttributes|/")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user")); - Assert.assertTrue(CALLED.contains("checkPermission|/user")); - Assert.assertTrue(CALLED.contains("getAttributes|/user")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/authz")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); return null; } }); - // Delete the files / folders covered by the snapshot, then re-check they - // are all readable correctly. - fs.delete(authz, true); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0)); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot")); - Assert.assertTrue(CALLED.contains("checkPermission|/user/.snapshot")); - // attribute provider never sees the .snapshot path directly. - Assert.assertFalse(CALLED.contains("getAttributes|/user/.snapshot")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("checkPermission|/user/.snapshot/snapshot_1")); - Assert.assertTrue( - CALLED.contains("getAttributes|/user/.snapshot/snapshot_1")); - - CALLED.clear(); - fs.getFileStatus(new Path("/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED - .contains("checkPermission|/user/.snapshot/snapshot_1/authz")); - Assert.assertTrue(CALLED.contains("getAttributes|/user/authz")); - - return null; - } - }); - } }