From 676f488efffd50eb47e75cd750f9bc948b9e12fb Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Mon, 10 Oct 2011 01:49:37 +0000 Subject: [PATCH] HDFS-2404. webhdfs liststatus json response is not correct. Contributed by Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1180757 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 8 ++-- .../web/resources/NamenodeWebHdfsMethods.java | 10 ++-- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 46 ++++++++++--------- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 8 ++-- .../apache/hadoop/hdfs/web/TestJsonUtil.java | 4 +- 5 files changed, 40 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 090c9d9f3f..5e6d4e9070 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -86,11 +86,11 @@ Trunk (unreleased changes) there are two SC_START_IN_CTOR findbugs warnings. (szetszwo) HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector, - IOExceptions of stream closures can mask root exceptions. (Uma Maheswara + IOExceptions of stream closures can mask root exceptions. (Uma Maheswara Rao G via szetszwo) HDFS-46. Change default namespace quota of root directory from - Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo) + Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo) HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction. (szetszwo) @@ -104,10 +104,12 @@ Trunk (unreleased changes) not include multiple methods of the same name. (cutting) HDFS-2403. NamenodeWebHdfsMethods.generateDelegationToken(..) does not use - the renewer parameter. (szetszwo) + the renewer parameter. (szetszwo) HDFS-2409. _HOST in dfs.web.authentication.kerberos.principal. (jitendra) + HDFS-2404. webhdfs liststatus json response is not correct. (suresh) + Release 0.23.0 - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 8c27eca658..c552521cf7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -424,7 +424,7 @@ public Response run() throws IOException, URISyntaxException { case GETFILESTATUS: { final HdfsFileStatus status = np.getFileInfo(fullpath); - final String js = JsonUtil.toJsonString(status); + final String js = JsonUtil.toJsonString(status, true); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case LISTSTATUS: @@ -480,22 +480,22 @@ private static StreamingOutput getListingStream(final NamenodeProtocols np, @Override public void write(final OutputStream outstream) throws IOException { final PrintStream out = new PrintStream(outstream); - out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":["); + out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "\":["); final HdfsFileStatus[] partial = first.getPartialListing(); if (partial.length > 0) { - out.print(JsonUtil.toJsonString(partial[0])); + out.print(JsonUtil.toJsonString(partial[0], false)); } for(int i = 1; i < partial.length; i++) { out.println(','); - out.print(JsonUtil.toJsonString(partial[i])); + out.print(JsonUtil.toJsonString(partial[i], false)); } for(DirectoryListing curr = first; curr.hasMore(); ) { curr = getDirectoryListing(np, p, curr.getLastName()); for(HdfsFileStatus s : curr.getPartialListing()) { out.println(','); - out.print(JsonUtil.toJsonString(s)); + out.print(JsonUtil.toJsonString(s, false)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index adf639c32b..0efc3d2e8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -133,37 +133,39 @@ private static FsPermission toFsPermission(final String s) { } /** Convert a HdfsFileStatus object to a Json string. */ - public static String toJsonString(final HdfsFileStatus status) { + public static String toJsonString(final HdfsFileStatus status, + boolean includeType) { if (status == null) { return null; - } else { - final Map m = new TreeMap(); - m.put("localName", status.getLocalName()); - m.put("isDir", status.isDir()); - m.put("isSymlink", status.isSymlink()); - if (status.isSymlink()) { - m.put("symlink", status.getSymlink()); - } - - m.put("len", status.getLen()); - m.put("owner", status.getOwner()); - m.put("group", status.getGroup()); - m.put("permission", toString(status.getPermission())); - m.put("accessTime", status.getAccessTime()); - m.put("modificationTime", status.getModificationTime()); - m.put("blockSize", status.getBlockSize()); - m.put("replication", status.getReplication()); - return toJsonString(HdfsFileStatus.class, m); } + final Map m = new TreeMap(); + m.put("localName", status.getLocalName()); + m.put("isDir", status.isDir()); + m.put("isSymlink", status.isSymlink()); + if (status.isSymlink()) { + m.put("symlink", status.getSymlink()); + } + + m.put("len", status.getLen()); + m.put("owner", status.getOwner()); + m.put("group", status.getGroup()); + m.put("permission", toString(status.getPermission())); + m.put("accessTime", status.getAccessTime()); + m.put("modificationTime", status.getModificationTime()); + m.put("blockSize", status.getBlockSize()); + m.put("replication", status.getReplication()); + return includeType ? toJsonString(HdfsFileStatus.class, m) : + JSON.toString(m); } /** Convert a Json map to a HdfsFileStatus object. */ - public static HdfsFileStatus toFileStatus(final Map json) { + public static HdfsFileStatus toFileStatus(final Map json, boolean includesType) { if (json == null) { return null; } - final Map m = (Map)json.get(HdfsFileStatus.class.getSimpleName()); + final Map m = includesType ? + (Map)json.get(HdfsFileStatus.class.getSimpleName()) : json; final String localName = (String) m.get("localName"); final boolean isDir = (Boolean) m.get("isDir"); final boolean isSymlink = (Boolean) m.get("isSymlink"); @@ -287,7 +289,7 @@ private static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) { return array; } } - + /** Convert a LocatedBlock to a Json map. */ private static Map toJsonMap(final LocatedBlock locatedblock ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 27d6fe166e..1cd20f1468 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -253,7 +253,7 @@ private FsPermission applyUMask(FsPermission permission) { private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; final Map json = run(op, f); - final HdfsFileStatus status = JsonUtil.toFileStatus(json); + final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); if (status == null) { throw new FileNotFoundException("File does not exist: " + f); } @@ -405,14 +405,14 @@ public FileStatus[] listStatus(final Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; final Map json = run(op, f); final Object[] array = (Object[])json.get( - HdfsFileStatus[].class.getSimpleName()); + HdfsFileStatus.class.getSimpleName()); //convert FileStatus final FileStatus[] statuses = new FileStatus[array.length]; for(int i = 0; i < array.length; i++) { @SuppressWarnings("unchecked") final Map m = (Map)array[i]; - statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f); + statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); } return statuses; } @@ -472,4 +472,4 @@ public MD5MD5CRC32FileChecksum getFileChecksum(final Path p final Map m = run(op, p); return JsonUtil.toMD5MD5CRC32FileChecksum(m); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index a4b687d5e7..7f6aa36a6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -46,9 +46,9 @@ public void testHdfsFileStatus() { final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); - final String json = JsonUtil.toJsonString(status); + final String json = JsonUtil.toJsonString(status, true); System.out.println("json = " + json.replace(",", ",\n ")); - final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map)JSON.parse(json)); + final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map)JSON.parse(json), true); final FileStatus fs2 = toFileStatus(s2, parent); System.out.println("s2 = " + s2); System.out.println("fs2 = " + fs2);