diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b62644669d..743958d5df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -819,6 +819,9 @@ Release 0.23.0 - Unreleased HDFS-2501. Add version prefix and root methods to webhdfs. (szetszwo) + HDFS-1869. mkdirs should use the supplied permission for all of the created + directories. (Daryn Sharp via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 654c3a231d..9eb7bdec9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -233,7 +234,7 @@ INodeFileUnderConstruction addFile(String path, clientMachine, clientNode); writeLock(); try { - newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE, false); + newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE); } finally { writeUnlock(); } @@ -276,7 +277,7 @@ INode unprotectedAddFile( String path, writeLock(); try { try { - newNode = addNode(path, newNode, diskspace, false); + newNode = addNode(path, newNode, diskspace); if(newNode != null && blocks != null) { int nrBlocks = blocks.length; // Add file->block mapping @@ -303,7 +304,7 @@ INodeDirectory addToParent(byte[] src, INodeDirectory parentINode, try { try { newParent = rootDir.addToParent(src, newNode, parentINode, - false, propagateModTime); + propagateModTime); cacheName(newNode); } catch (FileNotFoundException e) { return null; @@ -576,7 +577,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) // add src to the destination dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, - srcChild, UNKNOWN_DISK_SPACE, false); + srcChild, UNKNOWN_DISK_SPACE); if (dstChild != null) { srcChild = null; if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -593,7 +594,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp) // put it back srcChild.setLocalName(srcChildName); addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild, - UNKNOWN_DISK_SPACE, false); + UNKNOWN_DISK_SPACE); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " @@ -731,7 +732,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, removedSrc.setLocalName(dstComponents[dstInodes.length - 1]); // add src as dst to complete rename dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, - removedSrc, UNKNOWN_DISK_SPACE, false); + removedSrc, UNKNOWN_DISK_SPACE); int filesDeleted = 0; if (dstChild != null) { @@ -759,13 +760,13 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp, // Rename failed - restore src removedSrc.setLocalName(srcChildName); addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, removedSrc, - UNKNOWN_DISK_SPACE, false); + UNKNOWN_DISK_SPACE); } if (removedDst != null) { // Rename failed - restore dst removedDst.setLocalName(dstChildName); addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, removedDst, - UNKNOWN_DISK_SPACE, false); + UNKNOWN_DISK_SPACE); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " @@ -1436,9 +1437,10 @@ static String getFullPathName(INode inode) { * @param src string representation of the path to the directory * @param permissions the permission of the directory - * @param inheritPermission if the permission of the directory should inherit - * from its parent or not. The automatically created - * ones always inherit its permission from its parent + * @param isAutocreate if the permission of the directory should inherit + * from its parent or not. u+wx is implicitly added to + * the automatically created directories, and to the + * given directory if inheritPermission is true * @param now creation time * @return true if the operation succeeds false otherwise * @throws FileNotFoundException if an ancestor or itself is a file @@ -1454,6 +1456,7 @@ boolean mkdirs(String src, PermissionStatus permissions, String[] names = INode.getPathNames(src); byte[][] components = INode.getPathComponents(names); INode[] inodes = new INode[components.length]; + final int lastInodeIndex = inodes.length - 1; writeLock(); try { @@ -1470,12 +1473,44 @@ boolean mkdirs(String src, PermissionStatus permissions, } } + // default to creating parent dirs with the given perms + PermissionStatus parentPermissions = permissions; + + // if not inheriting and it's the last inode, there's no use in + // computing perms that won't be used + if (inheritPermission || (i < lastInodeIndex)) { + // if inheriting (ie. creating a file or symlink), use the parent dir, + // else the supplied permissions + // NOTE: the permissions of the auto-created directories violate posix + FsPermission parentFsPerm = inheritPermission + ? inodes[i-1].getFsPermission() : permissions.getPermission(); + + // ensure that the permissions allow user write+execute + if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) { + parentFsPerm = new FsPermission( + parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE), + parentFsPerm.getGroupAction(), + parentFsPerm.getOtherAction() + ); + } + + if (!parentPermissions.getPermission().equals(parentFsPerm)) { + parentPermissions = new PermissionStatus( + parentPermissions.getUserName(), + parentPermissions.getGroupName(), + parentFsPerm + ); + // when inheriting, use same perms for entire path + if (inheritPermission) permissions = parentPermissions; + } + } + // create directories beginning from the first null index for(; i < inodes.length; i++) { pathbuilder.append(Path.SEPARATOR + names[i]); String cur = pathbuilder.toString(); - unprotectedMkdir(inodes, i, components[i], permissions, - inheritPermission || i != components.length-1, now); + unprotectedMkdir(inodes, i, components[i], + (i < lastInodeIndex) ? parentPermissions : permissions, now); if (inodes[i] == null) { return false; } @@ -1506,7 +1541,7 @@ INode unprotectedMkdir(String src, PermissionStatus permissions, rootDir.getExistingPathINodes(components, inodes, false); unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1], - permissions, false, timestamp); + permissions, timestamp); return inodes[inodes.length-1]; } @@ -1515,19 +1550,19 @@ INode unprotectedMkdir(String src, PermissionStatus permissions, * All ancestors exist. Newly created one stored at index pos. */ private void unprotectedMkdir(INode[] inodes, int pos, - byte[] name, PermissionStatus permission, boolean inheritPermission, + byte[] name, PermissionStatus permission, long timestamp) throws QuotaExceededException { assert hasWriteLock(); inodes[pos] = addChild(inodes, pos, new INodeDirectory(name, permission, timestamp), - -1, inheritPermission ); + -1); } /** Add a node child to the namespace. The full path name of the node is src. * childDiskspace should be -1, if unknown. * QuotaExceededException is thrown if it violates quota limit */ private T addNode(String src, T child, - long childDiskspace, boolean inheritPermission) + long childDiskspace) throws QuotaExceededException, UnresolvedLinkException { byte[][] components = INode.getPathComponents(src); byte[] path = components[components.length-1]; @@ -1537,8 +1572,7 @@ private T addNode(String src, T child, writeLock(); try { rootDir.getExistingPathINodes(components, inodes, false); - return addChild(inodes, inodes.length-1, child, childDiskspace, - inheritPermission); + return addChild(inodes, inodes.length-1, child, childDiskspace); } finally { writeUnlock(); } @@ -1666,7 +1700,7 @@ protected void verifyFsLimits(INode[] pathComponents, * Its ancestors are stored at [0, pos-1]. * QuotaExceededException is thrown if it violates quota limit */ private T addChild(INode[] pathComponents, int pos, - T child, long childDiskspace, boolean inheritPermission, + T child, long childDiskspace, boolean checkQuota) throws QuotaExceededException { // The filesystem limits are not really quotas, so this check may appear // odd. It's because a rename operation deletes the src, tries to add @@ -1689,7 +1723,7 @@ private T addChild(INode[] pathComponents, int pos, throw new NullPointerException("Panic: parent does not exist"); } T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild( - child, inheritPermission, true); + child, true); if (addedNode == null) { updateCount(pathComponents, pos, -counts.getNsCount(), -childDiskspace, true); @@ -1698,18 +1732,16 @@ private T addChild(INode[] pathComponents, int pos, } private T addChild(INode[] pathComponents, int pos, - T child, long childDiskspace, boolean inheritPermission) + T child, long childDiskspace) throws QuotaExceededException { - return addChild(pathComponents, pos, child, childDiskspace, - inheritPermission, true); + return addChild(pathComponents, pos, child, childDiskspace, true); } private T addChildNoQuotaCheck(INode[] pathComponents, - int pos, T child, long childDiskspace, boolean inheritPermission) { + int pos, T child, long childDiskspace) { T inode = null; try { - inode = addChild(pathComponents, pos, child, childDiskspace, - inheritPermission, false); + inode = addChild(pathComponents, pos, child, childDiskspace, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); } @@ -2119,7 +2151,7 @@ INodeSymlink unprotectedSymlink(String path, String target, long modTime, assert hasWriteLock(); INodeSymlink newNode = new INodeSymlink(target, modTime, atime, perm); try { - newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE, false); + newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE); } catch (UnresolvedLinkException e) { /* All UnresolvedLinkExceptions should have been resolved by now, but we * should re-throw them in case that changes so they are not swallowed diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 7f9a8e1626..7f0c997ee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -261,25 +261,13 @@ int nextChild(byte[] name) { * Add a child inode to the directory. * * @param node INode to insert - * @param inheritPermission inherit permission from parent? * @param setModTime set modification time for the parent node * not needed when replaying the addition and * the parent already has the proper mod time * @return null if the child with this name already exists; * node, otherwise */ - T addChild(final T node, boolean inheritPermission, - boolean setModTime) { - if (inheritPermission) { - FsPermission p = getFsPermission(); - //make sure the permission has wx for the user - if (!p.getUserAction().implies(FsAction.WRITE_EXECUTE)) { - p = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE), - p.getGroupAction(), p.getOtherAction()); - } - node.setPermission(p); - } - + T addChild(final T node, boolean setModTime) { if (children == null) { children = new ArrayList(DEFAULT_FILES_PER_DIRECTORY); } @@ -297,31 +285,22 @@ T addChild(final T node, boolean inheritPermission, return node; } - /** - * Equivalent to addNode(path, newNode, false). - * @see #addNode(String, INode, boolean) - */ - T addNode(String path, T newNode) - throws FileNotFoundException, UnresolvedLinkException { - return addNode(path, newNode, false); - } /** * Add new INode to the file tree. * Find the parent and insert * * @param path file path * @param newNode INode to be added - * @param inheritPermission If true, copy the parent's permission to newNode. * @return null if the node already exists; inserted INode, otherwise * @throws FileNotFoundException if parent does not exist or * @throws UnresolvedLinkException if any path component is a symbolic link * is not a directory. */ - T addNode(String path, T newNode, boolean inheritPermission + T addNode(String path, T newNode ) throws FileNotFoundException, UnresolvedLinkException { byte[][] pathComponents = getPathComponents(path); if(addToParent(pathComponents, newNode, - inheritPermission, true) == null) + true) == null) return null; return newNode; } @@ -338,13 +317,12 @@ T addNode(String path, T newNode, boolean inheritPermission INodeDirectory addToParent( byte[] localname, INode newNode, INodeDirectory parent, - boolean inheritPermission, boolean propagateModTime ) throws FileNotFoundException, UnresolvedLinkException { // insert into the parent children list newNode.name = localname; - if(parent.addChild(newNode, inheritPermission, propagateModTime) == null) + if(parent.addChild(newNode, propagateModTime) == null) return null; return parent; } @@ -380,7 +358,6 @@ INodeDirectory getParent(byte[][] pathComponents) */ INodeDirectory addToParent( byte[][] pathComponents, INode newNode, - boolean inheritPermission, boolean propagateModTime ) throws FileNotFoundException, UnresolvedLinkException { @@ -391,7 +368,7 @@ INodeDirectory addToParent( byte[][] pathComponents, newNode.name = pathComponents[pathLen-1]; // insert into the parent children list INodeDirectory parent = getParent(pathComponents); - if(parent.addChild(newNode, inheritPermission, propagateModTime) == null) + if(parent.addChild(newNode, propagateModTime) == null) return null; return parent; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java index c81ffa326b..cef0a0db87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java @@ -165,7 +165,7 @@ private void addChildWithName(String name, Class expected) Class generated = null; try { fs.verifyFsLimits(inodes, 1, child); - rootInode.addChild(child, false, false); + rootInode.addChild(child, false); } catch (QuotaExceededException e) { generated = e.getClass(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java index 03657ab347..e65b9009d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java @@ -111,10 +111,18 @@ public void testCreate() throws Exception { FsPermission dirPerm = new FsPermission((short)0777); fs.mkdirs(new Path("/a1/a2/a3"), dirPerm); - checkPermission(fs, "/a1", inheritPerm); - checkPermission(fs, "/a1/a2", inheritPerm); + checkPermission(fs, "/a1", dirPerm); + checkPermission(fs, "/a1/a2", dirPerm); checkPermission(fs, "/a1/a2/a3", dirPerm); + dirPerm = new FsPermission((short)0123); + FsPermission permission = FsPermission.createImmutable( + (short)(dirPerm.toShort() | 0300)); + fs.mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm); + checkPermission(fs, "/aa/1", permission); + checkPermission(fs, "/aa/1/aa/2", permission); + checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm); + FsPermission filePerm = new FsPermission((short)0444); FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm, true, conf.getInt("io.file.buffer.size", 4096), @@ -126,7 +134,7 @@ public void testCreate() throws Exception { checkPermission(fs, "/b1/b2/b3.txt", filePerm); conf.set(FsPermission.UMASK_LABEL, "022"); - FsPermission permission = + permission = FsPermission.createImmutable((short)0666); FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission)); FileSystem.create(fs, new Path("/c1/c2.txt"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index ea15f381cb..08e5d569a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -139,11 +139,11 @@ public void testGetFullPathName() { assertEquals("f", inf.getFullPathName()); assertEquals("", inf.getLocalParentDir()); - dir.addChild(inf, false, false); + dir.addChild(inf, false); assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName()); assertEquals("d", inf.getLocalParentDir()); - root.addChild(dir, false, false); + root.addChild(dir, false); assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName()); assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());