From ede10b8a1f9a4d099c16469f827345cb359cef3d Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Fri, 13 Sep 2013 04:21:46 +0000 Subject: [PATCH] HDFS-5067 Support symlink operations in NFS gateway. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522774 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 3 +- .../apache/hadoop/nfs/nfs3/IdUserGroup.java | 4 + .../hadoop/nfs/nfs3/Nfs3FileAttributes.java | 11 +- .../nfs/nfs3/request/SYMLINK3Request.java | 4 +- .../nfs/nfs3/response/READLINK3Response.java | 2 +- .../nfs/nfs3/response/SYMLINK3Response.java | 2 +- .../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 7 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 131 +++++++++++++++++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + 9 files changed, 146 insertions(+), 20 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index a9131e3b50..dd837f5412 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -40,8 +40,7 @@ public static XDR writeMNTResponse(int status, XDR xdr, int xid, RpcAcceptedReply.voidReply(xdr, xid); xdr.writeInt(status); if (status == MNT_OK) { - xdr.writeInt(handle.length); - xdr.writeFixedOpaque(handle); + xdr.writeVariableOpaque(handle); // Only MountV3 returns a list of supported authFlavors xdr.writeInt(1); xdr.writeInt(AuthFlavor.AUTH_SYS.getValue()); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index c0be5dd289..e034c66405 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -149,6 +149,8 @@ synchronized public String getUserName(int uid, String unknown) { checkAndUpdateMaps(); String uname = uidNameMap.get(uid); if (uname == null) { + LOG.warn("Can't find user name for uid " + uid + + ". Use default user name " + unknown); uname = unknown; } return uname; @@ -158,6 +160,8 @@ synchronized public String getGroupName(int gid, String unknown) { checkAndUpdateMaps(); String gname = gidNameMap.get(gid); if (gname == null) { + LOG.warn("Can't find group name for gid " + gid + + ". Use default group name " + unknown); gname = unknown; } return gname; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java index 1ba727b447..f8ce1fad88 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java @@ -72,19 +72,18 @@ public String toString() { } public Nfs3FileAttributes() { - this(false, 0, (short)0, 0, 0, 0, 0, 0, 0, 0); + this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0); } - public Nfs3FileAttributes(boolean isDir, int nlink, short mode, int uid, + public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid, int gid, long size, long fsid, long fileid, long mtime, long atime) { - this.type = isDir ? NfsFileType.NFSDIR.toValue() : NfsFileType.NFSREG - .toValue(); + this.type = nfsType.toValue(); this.mode = mode; - this.nlink = isDir ? (nlink + 2) : 1; + this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1; this.uid = uid; this.gid = gid; this.size = size; - if(isDir) { + if(type == NfsFileType.NFSDIR.toValue()) { this.size = getDirSize(nlink); } this.used = this.size; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java index 6e9f641cc2..6e74d1aa61 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java @@ -25,9 +25,9 @@ * SYMLINK3 Request */ public class SYMLINK3Request extends RequestWithHandle { - private final String name; + private final String name; // The name of the link private final SetAttr3 symAttr; - private final String symData; + private final String symData; // It contains the target public SYMLINK3Request(XDR xdr) throws IOException { super(xdr); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java index c86f051ece..758895c588 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java @@ -46,7 +46,7 @@ public XDR send(XDR out, int xid) { out.writeBoolean(true); // Attribute follows postOpSymlinkAttr.serialize(out); if (getStatus() == Nfs3Status.NFS3_OK) { - out.writeFixedOpaque(path, path.length); + out.writeVariableOpaque(path); } return out; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java index 76b937b1e5..a1d245c8ea 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java @@ -60,9 +60,9 @@ public XDR send(XDR out, int xid) { if (this.getStatus() == Nfs3Status.NFS3_OK) { out.writeBoolean(true); objFileHandle.serialize(out); + out.writeBoolean(true); objPostOpAttr.serialize(out); } - out.writeBoolean(true); dirWcc.serialize(out); return out; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index 592106f5aa..c8509eff6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -49,7 +49,7 @@ public static String getFileIdPath(long fileId) { public static HdfsFileStatus getFileStatus(DFSClient client, String fileIdPath) throws IOException { - return client.getFileInfo(fileIdPath); + return client.getFileLinkInfo(fileIdPath); } public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( @@ -59,7 +59,10 @@ public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ - return new Nfs3FileAttributes(fs.isDir(), fs.getChildrenNum(), fs + NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; + fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; + + return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs .getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */, fs.getFileId(), fs.getModificationTime(), fs.getAccessTime()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index b935119b6b..b6a9b98809 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; @@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; +import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.IdUserGroup; @@ -65,10 +67,12 @@ import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request; +import org.apache.hadoop.nfs.nfs3.request.READLINK3Request; import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; +import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; @@ -476,9 +480,70 @@ public ACCESS3Response access(XDR xdr, } } - public READLINK3Response readlink(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { - return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); + public READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { + READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); + if (dfsClient == null) { + response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); + return response; + } + + READLINK3Request request = null; + + try { + request = new READLINK3Request(xdr); + } catch (IOException e) { + LOG.error("Invalid READLINK request"); + return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); + } + + FileHandle handle = request.getHandle(); + if (LOG.isDebugEnabled()) { + LOG.debug("NFS READLINK fileId: " + handle.getFileId()); + } + + String fileIdPath = Nfs3Utils.getFileIdPath(handle); + try { + String target = dfsClient.getLinkTarget(fileIdPath); + + Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, + fileIdPath, iug); + if (postOpAttr == null) { + LOG.info("Can't get path for fileId:" + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); + } + if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) { + LOG.error("Not a symlink, fileId:" + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); + } + if (target == null) { + LOG.error("Symlink target should not be null, fileId:" + + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT); + } + if (MAX_READ_TRANSFER_SIZE < target.getBytes().length) { + return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, null); + } + + return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, + target.getBytes()); + + } catch (IOException e) { + LOG.warn("Readlink error: " + e.getClass(), e); + if (e instanceof FileNotFoundException) { + return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); + } else if (e instanceof AccessControlException) { + return new READLINK3Response(Nfs3Status.NFS3ERR_ACCES); + } + return new READLINK3Response(Nfs3Status.NFS3ERR_IO); + } } @Override @@ -1121,9 +1186,63 @@ public RENAME3Response rename(XDR xdr, } @Override - public SYMLINK3Response symlink(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { - return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); + public SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { + SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); + if (dfsClient == null) { + response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); + return response; + } + + SYMLINK3Request request = null; + try { + request = new SYMLINK3Request(xdr); + } catch (IOException e) { + LOG.error("Invalid SYMLINK request"); + response.setStatus(Nfs3Status.NFS3ERR_INVAL); + return response; + } + + FileHandle dirHandle = request.getHandle(); + String name = request.getName(); + String symData = request.getSymData(); + String linkDirIdPath = Nfs3Utils.getFileIdPath(dirHandle); + // Don't do any name check to source path, just leave it to HDFS + String linkIdPath = linkDirIdPath + "/" + name; + if (LOG.isDebugEnabled()) { + LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath); + } + + try { + WccData dirWcc = response.getDirWcc(); + WccAttr preOpAttr = Nfs3Utils.getWccAttr(dfsClient, linkDirIdPath); + dirWcc.setPreOpAttr(preOpAttr); + + dfsClient.createSymlink(symData, linkIdPath, false); + // Set symlink attr is considered as to change the attr of the target + // file. So no need to set symlink attr here after it's created. + + HdfsFileStatus linkstat = dfsClient.getFileLinkInfo(linkIdPath); + Nfs3FileAttributes objAttr = Nfs3Utils.getNfs3FileAttrFromFileStatus( + linkstat, iug); + dirWcc + .setPostOpAttr(Nfs3Utils.getFileAttr(dfsClient, linkDirIdPath, iug)); + + return new SYMLINK3Response(Nfs3Status.NFS3_OK, new FileHandle( + objAttr.getFileid()), objAttr, dirWcc); + + } catch (IOException e) { + LOG.warn("Exception:" + e); + response.setStatus(Nfs3Status.NFS3ERR_IO); + return response; + } } public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, InetAddress client) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0627b0ea56..9c79d8dcb7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -333,6 +333,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5085. Refactor o.a.h.nfs to support different types of authentications. (jing9) + HDFS-5067 Support symlink operations in NFS gateway. (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may