From 8cb0d4b380e0fd4437310c1dd6ef8b8995cc383d Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 27 Oct 2011 23:13:26 +0000 Subject: [PATCH] HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files; clear umask before creating a flie; throw IllegalArgumentException if setOwner with both owner and group empty; throw FileNotFoundException if getFileStatus on non-existing files; fix bugs in getBlockLocations; and changed getFileChecksum json response root to "FileChecksum". git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1190077 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++ .../web/resources/DatanodeWebHdfsMethods.java | 15 +++--- .../web/resources/NamenodeWebHdfsMethods.java | 15 +++++- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 8 +-- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 44 +++++++++-------- .../web/TestWebHdfsFileSystemContract.java | 49 +++++++++++++++++++ 6 files changed, 104 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5f8a26daf4..9b18636660 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1208,6 +1208,12 @@ Release 0.23.0 - Unreleased HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods. (Uma Maheswara Rao G via szetszwo) + HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files; + clear umask before creating a flie; throw IllegalArgumentException if + setOwner with both owner and group empty; throw FileNotFoundException if + getFileStatus on non-existing files; fix bugs in getBlockLocations; and + changed getFileChecksum json response root to "FileChecksum". (szetszwo) + BREAKDOWN OF HDFS-1073 SUBTASKS HDFS-1521. Persist transaction ID on disk between NN restarts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 213e857a1c..e8c00ca005 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -152,6 +153,8 @@ public Response run() throws IOException, URISyntaxException { { final Configuration conf = new Configuration(datanode.getConf()); final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf); + conf.set(FsPermission.UMASK_LABEL, "000"); + final int b = bufferSize.getValue(conf); DFSClient dfsclient = new DFSClient(nnRpcAddr, conf); FSDataOutputStream out = null; @@ -307,12 +310,12 @@ public Response run() throws IOException { final DataNode datanode = (DataNode)context.getAttribute("datanode"); final Configuration conf = new Configuration(datanode.getConf()); final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf); - final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf); switch(op.getValue()) { case OPEN: { final int b = bufferSize.getValue(conf); + final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf); DFSDataInputStream in = null; try { in = new DFSClient.DFSDataInputStream( @@ -355,13 +358,13 @@ public void write(final OutputStream out) throws IOException { case GETFILECHECKSUM: { MD5MD5CRC32FileChecksum checksum = null; - DFSClient client = dfsclient; + DFSClient dfsclient = new DFSClient(nnRpcAddr, conf); try { - checksum = client.getFileChecksum(fullpath); - client.close(); - client = null; + checksum = dfsclient.getFileChecksum(fullpath); + dfsclient.close(); + dfsclient = null; } finally { - IOUtils.cleanup(LOG, client); + IOUtils.cleanup(LOG, dfsclient); } final String js = JsonUtil.toJsonString(checksum); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index c3f771279a..bebd84d849 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -42,7 +42,9 @@ import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.StreamingOutput; +import javax.ws.rs.core.Response.Status; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -318,10 +320,15 @@ public Response run() throws IOException, URISyntaxException { { final boolean b = np.setReplication(fullpath, replication.getValue(conf)); final String js = JsonUtil.toJsonString("boolean", b); - return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + final ResponseBuilder r = b? Response.ok(): Response.status(Status.FORBIDDEN); + return r.entity(js).type(MediaType.APPLICATION_JSON).build(); } case SETOWNER: { + if (owner.getValue() == null && group.getValue() == null) { + throw new IllegalArgumentException("Both owner and group are empty."); + } + np.setOwner(fullpath, owner.getValue(), group.getValue()); return Response.ok().type(MediaType.APPLICATION_JSON).build(); } @@ -487,13 +494,17 @@ public Response run() throws IOException, URISyntaxException { final long offsetValue = offset.getValue(); final Long lengthValue = length.getValue(); final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath, - offsetValue, lengthValue != null? lengthValue: offsetValue + 1); + offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE); final String js = JsonUtil.toJsonString(locatedblocks); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case GETFILESTATUS: { final HdfsFileStatus status = np.getFileInfo(fullpath); + if (status == null) { + throw new FileNotFoundException("File does not exist: " + fullpath); + } + final String js = JsonUtil.toJsonString(status, true); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 9c5774d49b..d166d63a98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -27,6 +27,7 @@ import java.util.TreeMap; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; @@ -334,7 +335,7 @@ private static Object[] toJsonArray(final List array } else { final Object[] a = new Object[array.size()]; for(int i = 0; i < array.size(); i++) { - a[i] = toJsonMap(array.get(0)); + a[i] = toJsonMap(array.get(i)); } return a; } @@ -436,7 +437,7 @@ public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) { m.put("algorithm", checksum.getAlgorithmName()); m.put("length", checksum.getLength()); m.put("bytes", StringUtils.byteToHexString(checksum.getBytes())); - return toJsonString(MD5MD5CRC32FileChecksum.class, m); + return toJsonString(FileChecksum.class, m); } /** Convert a Json map to a MD5MD5CRC32FileChecksum. */ @@ -446,8 +447,7 @@ public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum( return null; } - final Map m = (Map)json.get( - MD5MD5CRC32FileChecksum.class.getSimpleName()); + final Map m = (Map)json.get(FileChecksum.class.getSimpleName()); final String algorithm = (String)m.get("algorithm"); final int length = (int)(long)(Long)m.get("length"); final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 8f592dc769..14e50e1c42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -154,19 +154,18 @@ private Path makeAbsolute(Path f) { return f.isAbsolute()? f: new Path(workingDir, f); } - @SuppressWarnings("unchecked") - private static T jsonParse(final InputStream in) throws IOException { + private static Map jsonParse(final InputStream in) throws IOException { if (in == null) { throw new IOException("The input stream is null."); } - return (T)JSON.parse(new InputStreamReader(in)); + return (Map)JSON.parse(new InputStreamReader(in)); } - private static void validateResponse(final HttpOpParam.Op op, + private static Map validateResponse(final HttpOpParam.Op op, final HttpURLConnection conn) throws IOException { final int code = conn.getResponseCode(); if (code != op.getExpectedHttpResponseCode()) { - final Map m; + final Map m; try { m = jsonParse(conn.getErrorStream()); } catch(IOException e) { @@ -175,6 +174,10 @@ private static void validateResponse(final HttpOpParam.Op op, + ", message=" + conn.getResponseMessage(), e); } + if (m.get(RemoteException.class.getSimpleName()) == null) { + return m; + } + final RemoteException re = JsonUtil.toRemoteException(m); throw re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, @@ -185,6 +188,7 @@ private static void validateResponse(final HttpOpParam.Op op, NSQuotaExceededException.class, UnresolvedPathException.class); } + return null; } URL toUrl(final HttpOpParam.Op op, final Path fspath, @@ -235,15 +239,15 @@ private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath * @param op http operation * @param fspath file system path * @param parameters parameters for the operation - * @return a JSON object, e.g. Object[], Map, etc. + * @return a JSON object, e.g. Object[], Map, etc. * @throws IOException */ - private T run(final HttpOpParam.Op op, final Path fspath, + private Map run(final HttpOpParam.Op op, final Path fspath, final Param... parameters) throws IOException { final HttpURLConnection conn = httpConnect(op, fspath, parameters); - validateResponse(op, conn); try { - return WebHdfsFileSystem.jsonParse(conn.getInputStream()); + final Map m = validateResponse(op, conn); + return m != null? m: jsonParse(conn.getInputStream()); } finally { conn.disconnect(); } @@ -258,7 +262,7 @@ private FsPermission applyUMask(FsPermission permission) { private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; - final Map json = run(op, f); + final Map json = run(op, f); final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); if (status == null) { throw new FileNotFoundException("File does not exist: " + f); @@ -284,7 +288,7 @@ private FileStatus makeQualified(HdfsFileStatus f, Path parent) { public boolean mkdirs(Path f, FsPermission permission) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; - final Map json = run(op, f, + final Map json = run(op, f, new PermissionParam(applyUMask(permission))); return (Boolean)json.get("boolean"); } @@ -293,7 +297,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { public boolean rename(final Path src, final Path dst) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.RENAME; - final Map json = run(op, src, + final Map json = run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath())); return (Boolean)json.get("boolean"); } @@ -333,8 +337,7 @@ public boolean setReplication(final Path p, final short replication ) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; - final Map json = run(op, p, - new ReplicationParam(replication)); + final Map json = run(op, p, new ReplicationParam(replication)); return (Boolean)json.get("boolean"); } @@ -403,7 +406,7 @@ public FSDataOutputStream append(final Path f, final int bufferSize, @Override public boolean delete(Path f, boolean recursive) throws IOException { final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; - final Map json = run(op, f, new RecursiveParam(recursive)); + final Map json = run(op, f, new RecursiveParam(recursive)); return (Boolean)json.get("boolean"); } @@ -428,8 +431,7 @@ public FileStatus[] listStatus(final Path f) throws IOException { //convert FileStatus final FileStatus[] statuses = new FileStatus[array.length]; for(int i = 0; i < array.length; i++) { - @SuppressWarnings("unchecked") - final Map m = (Map)array[i]; + final Map m = (Map)array[i]; statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); } return statuses; @@ -439,7 +441,7 @@ public FileStatus[] listStatus(final Path f) throws IOException { public Token getDelegationToken(final String renewer ) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN; - final Map m = run(op, null, new RenewerParam(renewer)); + final Map m = run(op, null, new RenewerParam(renewer)); final Token token = JsonUtil.toDelegationToken(m); token.setService(new Text(getCanonicalServiceName())); return token; @@ -467,7 +469,7 @@ public BlockLocation[] getFileBlockLocations(final Path p, statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS; - final Map m = run(op, p, new OffsetParam(offset), + final Map m = run(op, p, new OffsetParam(offset), new LengthParam(length)); return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m)); } @@ -477,7 +479,7 @@ public ContentSummary getContentSummary(final Path p) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; - final Map m = run(op, p); + final Map m = run(op, p); return JsonUtil.toContentSummary(m); } @@ -487,7 +489,7 @@ public MD5MD5CRC32FileChecksum getFileChecksum(final Path p statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; - final Map m = run(op, p); + final Map m = run(op, p); return JsonUtil.toMD5MD5CRC32FileChecksum(m); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index ed3f0cd33a..7d990bded5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -27,6 +27,8 @@ import java.net.URL; import java.security.PrivilegedExceptionAction; +import javax.servlet.http.HttpServletResponse; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; @@ -39,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.resources.GetOpParam; +import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -257,4 +260,50 @@ public void testRootDir() throws IOException { WebHdfsFileSystem.LOG.info("This is expected.", e); } } + + public void testResponseCode() throws IOException { + final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; + final Path dir = new Path("/test/testUrl"); + assertTrue(webhdfs.mkdirs(dir)); + + {//test set owner with empty parameters + final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir); + final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.connect(); + assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode()); + conn.disconnect(); + } + + {//test set replication on a directory + final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; + final URL url = webhdfs.toUrl(op, dir); + final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.connect(); + assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode()); + + assertFalse(webhdfs.setReplication(dir, (short)1)); + conn.disconnect(); + } + + {//test get file status for a non-exist file. + final Path p = new Path(dir, "non-exist"); + final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p); + final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.connect(); + assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode()); + conn.disconnect(); + } + + {//test set permission with empty parameters + final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION; + final URL url = webhdfs.toUrl(op, dir); + final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.connect(); + assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort()); + conn.disconnect(); + } + } }