diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4e4b8ca395..0c1c81398c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -462,6 +462,9 @@ Release 2.7.0 - UNRELEASED HDFS-7536. Remove unused CryptoCodec in org.apache.hadoop.fs.Hdfs. (Yi Liu via wheat9) + HDFS-7528. Consolidate symlink-related implementation into a single class. + (wheat9) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java new file mode 100644 index 0000000000..3380d0af3d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.QuotaExceededException; + +import java.io.IOException; + +import static org.apache.hadoop.util.Time.now; + +class FSDirSymlinkOp { + + static HdfsFileStatus createSymlinkInt( + FSNamesystem fsn, String target, final String linkArg, + PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) + throws IOException { + FSDirectory fsd = fsn.getFSDirectory(); + String link = linkArg; + if (!DFSUtil.isValidName(link)) { + throw new InvalidPathException("Invalid link name: " + link); + } + if (FSDirectory.isReservedName(target) || target.isEmpty()) { + throw new InvalidPathException("Invalid target name: " + target); + } + + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target=" + + target + " link=" + link); + } + + FSPermissionChecker pc = fsn.getPermissionChecker(); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link); + fsd.writeLock(); + try { + link = fsd.resolvePath(pc, link, pathComponents); + final INodesInPath iip = fsd.getINodesInPath4Write(link, false); + if (!createParent) { + fsd.verifyParentDir(iip, link); + } + if (!fsd.isValidToCreate(link, iip)) { + throw new IOException( + "failed to create link " + link + + " either because the filename is invalid or the file exists"); + } + if (fsd.isPermissionEnabled()) { + fsd.checkAncestorAccess(pc, iip, FsAction.WRITE); + } + // validate that we have enough inodes. + fsn.checkFsObjectLimit(); + + // add symbolic link to namespace + addSymlink(fsd, link, iip, target, dirPerms, createParent, logRetryCache); + } finally { + fsd.writeUnlock(); + } + NameNode.getNameNodeMetrics().incrCreateSymlinkOps(); + return fsd.getAuditFileInfo(link, false); + } + + static INodeSymlink unprotectedAddSymlink( + FSDirectory fsd, INodesInPath iip, long id, String target, long mtime, + long atime, PermissionStatus perm) + throws UnresolvedLinkException, QuotaExceededException { + assert fsd.hasWriteLock(); + final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime, + target); + return fsd.addINode(iip, symlink) ? symlink : null; + } + + /** + * Add the given symbolic link to the fs. Record it in the edits log. + */ + private static INodeSymlink addSymlink( + FSDirectory fsd, String path, INodesInPath iip, String target, + PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) + throws IOException { + final long mtime = now(); + if (createParent) { + INodesInPath parentIIP = iip.getParentINodesInPath(); + if (parentIIP == null || (parentIIP = FSDirMkdirOp.mkdirsRecursively( + fsd, + parentIIP, dirPerms, true, mtime)) == null) { + return null; + } else { + iip = INodesInPath.append(parentIIP, null, iip.getLastLocalName()); + } + } + final String userName = dirPerms.getUserName(); + long id = fsd.allocateNewInodeId(); + PermissionStatus perm = new PermissionStatus( + userName, null, FsPermission.getDefault()); + INodeSymlink newNode = + unprotectedAddSymlink(fsd, iip, id, target, mtime, mtime, perm); + if (newNode == null) { + NameNode.stateChangeLog.info("addSymlink: failed to add " + path); + return null; + } + fsd.getEditLog().logSymlink(path, target, mtime, mtime, newNode, + logRetryCache); + + if(NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("addSymlink: " + path + " is added"); + } + return newNode; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 3f7310a591..9ddc5c0326 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -860,7 +860,7 @@ static String getFullPathName(INode inode) { * @param iip the INodesInPath instance containing all the ancestral INodes * @throws QuotaExceededException is thrown if it violates quota limit */ - private boolean addINode(INodesInPath iip, INode child) + boolean addINode(INodesInPath iip, INode child) throws QuotaExceededException, UnresolvedLinkException { child.setLocalName(iip.getLastLocalName()); cacheName(child); @@ -1192,29 +1192,6 @@ void reset() { } } - /** - * Add the specified path into the namespace. - */ - INodeSymlink addSymlink(INodesInPath iip, long id, String target, - long mtime, long atime, PermissionStatus perm) - throws UnresolvedLinkException, QuotaExceededException { - writeLock(); - try { - return unprotectedAddSymlink(iip, id, target, mtime, atime, perm); - } finally { - writeUnlock(); - } - } - - INodeSymlink unprotectedAddSymlink(INodesInPath iip, long id, String target, - long mtime, long atime, PermissionStatus perm) - throws UnresolvedLinkException, QuotaExceededException { - assert hasWriteLock(); - final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime, - target); - return addINode(iip, symlink) ? symlink : null; - } - boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { readLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 397cc6072f..9d08d4e3b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -360,12 +360,12 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // add to the file tree inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); - newFile = fsDir.unprotectedAddFile(inodeId, - iip, addCloseOp.permissions, addCloseOp.aclEntries, - addCloseOp.xAttrs, - replication, addCloseOp.mtime, addCloseOp.atime, - addCloseOp.blockSize, true, addCloseOp.clientName, - addCloseOp.clientMachine, addCloseOp.storagePolicyId); + newFile = fsDir.unprotectedAddFile( + inodeId, iip, addCloseOp.permissions, addCloseOp.aclEntries, + addCloseOp.xAttrs, replication, addCloseOp.mtime, + addCloseOp.atime, addCloseOp.blockSize, true, + addCloseOp.clientName, addCloseOp.clientMachine, + addCloseOp.storagePolicyId); iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); @@ -587,9 +587,9 @@ fsDir, renameReservedPathsOnUpgrade(timesOp.path, logVersion), final String path = renameReservedPathsOnUpgrade(symlinkOp.path, logVersion); final INodesInPath iip = fsDir.getINodesInPath(path, false); - fsDir.unprotectedAddSymlink(iip, inodeId, - symlinkOp.value, symlinkOp.mtime, symlinkOp.atime, - symlinkOp.permissionStatus); + FSDirSymlinkOp.unprotectedAddSymlink(fsDir, iip, inodeId, symlinkOp.value, + symlinkOp.mtime, symlinkOp.atime, + symlinkOp.permissionStatus); if (toAddRetryCache) { fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ab4e4aa0b0..b411c67cba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -168,7 +168,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException; import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; @@ -1918,60 +1917,23 @@ void setTimes(String src, long mtime, long atime) throws IOException { void createSymlink(String target, String link, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException { - if (!DFSUtil.isValidName(link)) { - throw new InvalidPathException("Invalid link name: " + link); - } - if (FSDirectory.isReservedName(target)) { - throw new InvalidPathException("Invalid target name: " + target); - } - - try { - createSymlinkInt(target, link, dirPerms, createParent, logRetryCache); - } catch (AccessControlException e) { - logAuditEvent(false, "createSymlink", link, target, null); - throw e; - } - } - - private void createSymlinkInt(String target, final String linkArg, - PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) - throws IOException { - String link = linkArg; - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target=" - + target + " link=" + link); - } - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); + waitForLoadingFSImage(); + HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create symlink " + link); - link = dir.resolvePath(pc, link, pathComponents); - final INodesInPath iip = dir.getINodesInPath4Write(link, false); - if (!createParent) { - dir.verifyParentDir(iip, link); - } - if (!dir.isValidToCreate(link, iip)) { - throw new IOException("failed to create link " + link - +" either because the filename is invalid or the file exists"); - } - if (isPermissionEnabled) { - dir.checkAncestorAccess(pc, iip, FsAction.WRITE); - } - // validate that we have enough inodes. - checkFsObjectLimit(); - - // add symbolic link to namespace - addSymlink(link, iip, target, dirPerms, createParent, logRetryCache); - resultingStat = getAuditFileInfo(link, false); + auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms, + createParent, logRetryCache); + } catch (AccessControlException e) { + logAuditEvent(false, "createSymlink", link, target, null); + throw e; } finally { writeUnlock(); } getEditLog().logSync(); - logAuditEvent(true, "createSymlink", linkArg, target, resultingStat); + logAuditEvent(true, "createSymlink", link, target, auditStat); } /** @@ -4360,43 +4322,6 @@ private void closeFile(String path, INodeFile file) { } } - /** - * Add the given symbolic link to the fs. Record it in the edits log. - */ - private INodeSymlink addSymlink(String path, INodesInPath iip, String target, - PermissionStatus dirPerms, - boolean createParent, boolean logRetryCache) - throws UnresolvedLinkException, FileAlreadyExistsException, - QuotaExceededException, SnapshotAccessControlException, AclException { - waitForLoadingFSImage(); - - final long modTime = now(); - if (createParent) { - INodesInPath parentIIP = iip.getParentINodesInPath(); - if (parentIIP == null || (parentIIP = FSDirMkdirOp.mkdirsRecursively(dir, - parentIIP, dirPerms, true, modTime)) == null) { - return null; - } else { - iip = INodesInPath.append(parentIIP, null, iip.getLastLocalName()); - } - } - final String userName = dirPerms.getUserName(); - long id = dir.allocateNewInodeId(); - INodeSymlink newNode = dir.addSymlink(iip, id, target, modTime, modTime, - new PermissionStatus(userName, null, FsPermission.getDefault())); - if (newNode == null) { - NameNode.stateChangeLog.info("addSymlink: failed to add " + path); - return null; - } - getEditLog().logSymlink(path, target, modTime, modTime, newNode, - logRetryCache); - - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("addSymlink: " + path + " is added"); - } - return newNode; - } - /** * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if * there are found to be insufficient resources available, causes the NN to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index db22c4be81..824094675d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1142,8 +1142,7 @@ public void createSymlink(String target, String link, FsPermission dirPerms, return; // Return previous response } - metrics.incrCreateSymlinkOps(); - /* We enforce the MAX_PATH_LENGTH limit even though a symlink target + /* We enforce the MAX_PATH_LENGTH limit even though a symlink target * URI may refer to a non-HDFS file system. */ if (!checkPathLength(link)) { @@ -1151,9 +1150,7 @@ public void createSymlink(String target, String link, FsPermission dirPerms, " character limit"); } - if ("".equals(target)) { - throw new IOException("Invalid symlink target"); - } + final UserGroupInformation ugi = getRemoteUser(); boolean success = false;