diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index ca94840973..e025e31efd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -389,6 +389,16 @@ static AclStatus toAclStatus(final Map json) { return aclStatusBuilder.build(); } + static String getPath(final Map json) + throws IOException { + if (json == null) { + return null; + } + + String path = (String) json.get("Path"); + return path; + } + static byte[] getXAttr(final Map json, final String name) throws IOException { if (json == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index aebd25a5ed..d902738865 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -110,6 +110,7 @@ public class WebHdfsFileSystem extends FileSystem protected Text tokenServiceName; private RetryPolicy retryPolicy = null; private Path workingDir; + private Path cachedHomeDirectory; private InetSocketAddress nnAddrs[]; private int currentNNAddrIndex; private boolean disallowFallbackToInsecureCluster; @@ -193,7 +194,7 @@ public synchronized void initialize(URI uri, Configuration conf failoverSleepMaxMillis); } - this.workingDir = getHomeDirectory(); + this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi))); this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled(); this.disallowFallbackToInsecureCluster = !conf.getBoolean( CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, @@ -267,14 +268,35 @@ protected URI canonicalizeUri(URI uri) { return NetUtils.getCanonicalUri(uri, getDefaultPort()); } - /** @return the home directory. */ + /** @return the home directory */ + @Deprecated public static String getHomeDirectoryString(final UserGroupInformation ugi) { return "/user/" + ugi.getShortUserName(); } @Override public Path getHomeDirectory() { - return makeQualified(new Path(getHomeDirectoryString(ugi))); + if (cachedHomeDirectory == null) { + final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY; + try { + String pathFromDelegatedFS = new FsPathResponseRunner(op, null, + new UserParam(ugi)) { + @Override + String decodeResponse(Map json) throws IOException { + return JsonUtilClient.getPath(json); + } + } .run(); + + cachedHomeDirectory = new Path(pathFromDelegatedFS).makeQualified( + this.getUri(), null); + + } catch (IOException e) { + LOG.error("Unable to get HomeDirectory from original File System", e); + cachedHomeDirectory = new Path("/user/" + ugi.getShortUserName()) + .makeQualified(this.getUri(), null); + } + } + return cachedHomeDirectory; } @Override @@ -284,12 +306,13 @@ public synchronized Path getWorkingDirectory() { @Override public synchronized void setWorkingDirectory(final Path dir) { - String result = makeAbsolute(dir).toUri().getPath(); + Path absolutePath = makeAbsolute(dir); + String result = absolutePath.toUri().getPath(); if (!DFSUtilClient.isValidName(result)) { throw new IllegalArgumentException("Invalid DFS directory name " + result); } - workingDir = makeAbsolute(dir); + workingDir = absolutePath; } private Path makeAbsolute(Path f) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b9d9943213..4bd0e8b731 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -938,6 +938,9 @@ Release 2.8.0 - UNRELEASED HDFS-4366. Block Replication Policy Implementation May Skip Higher-Priority Blocks for Lower-Priority Blocks (Derek Dagit via kihwal) + HDFS-8542. WebHDFS getHomeDirectory behavior does not match specification. + (Kanaka Kumar Avvaru via jghoman) + Release 2.7.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index d33721c0fd..6e880f0b5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -53,6 +53,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclStatus; @@ -826,6 +827,8 @@ private Response get( final TokenServiceParam tokenService ) throws IOException, URISyntaxException { final NameNode namenode = (NameNode)context.getAttribute("name.node"); + final Configuration conf = (Configuration) context + .getAttribute(JspHelper.CURRENT_CONF); final NamenodeProtocols np = getRPCServer(namenode); switch(op.getValue()) { @@ -892,11 +895,10 @@ private Response get( final String js = JsonUtil.toJsonString(token); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } - case GETHOMEDIRECTORY: - { - final String js = JsonUtil.toJsonString( - org.apache.hadoop.fs.Path.class.getSimpleName(), - WebHdfsFileSystem.getHomeDirectoryString(ugi)); + case GETHOMEDIRECTORY: { + final String js = JsonUtil.toJsonString("Path", + FileSystem.get(conf != null ? conf : new Configuration()) + .getHomeDirectory().toUri().getPath()); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case GETACLSTATUS: { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 0c963f1c2e..0563f12be6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -18,12 +18,15 @@ package org.apache.hadoop.hdfs.web; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.fail; import java.io.IOException; import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.InetSocketAddress; +import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.security.PrivilegedExceptionAction; @@ -557,4 +560,60 @@ public void testWebHdfsOffsetAndLength() throws Exception{ } } } + + @Test(timeout = 30000) + public void testGetHomeDirectory() throws Exception { + + MiniDFSCluster cluster = null; + try { + Configuration conf = new Configuration(); + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + DistributedFileSystem hdfs = cluster.getFileSystem(); + + final URI uri = new URI(WebHdfsConstants.WEBHDFS_SCHEME + "://" + + cluster.getHttpUri(0).replace("http://", "")); + final Configuration confTemp = new Configuration(); + + { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri, + confTemp); + + assertEquals(hdfs.getHomeDirectory().toUri().getPath(), webhdfs + .getHomeDirectory().toUri().getPath()); + + webhdfs.close(); + } + + { + WebHdfsFileSystem webhdfs = createWebHDFSAsTestUser(confTemp, uri, + "XXX"); + + assertNotEquals(hdfs.getHomeDirectory().toUri().getPath(), webhdfs + .getHomeDirectory().toUri().getPath()); + + webhdfs.close(); + } + + } finally { + if (cluster != null) + cluster.shutdown(); + } + } + + private WebHdfsFileSystem createWebHDFSAsTestUser(final Configuration conf, + final URI uri, final String userName) throws Exception { + + final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + userName, new String[] { "supergroup" }); + + return ugi.doAs(new PrivilegedExceptionAction() { + @Override + public WebHdfsFileSystem run() throws IOException { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri, + conf); + return webhdfs; + } + }); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index bc10bca667..5d2001414e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -399,7 +399,7 @@ public void testResponseCode() throws IOException { final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); final Map m = WebHdfsTestUtil.connectAndGetJson( conn, HttpServletResponse.SC_OK); - assertEquals(WebHdfsFileSystem.getHomeDirectoryString(ugi), + assertEquals(webhdfs.getHomeDirectory().toUri().getPath(), m.get(Path.class.getSimpleName())); conn.disconnect(); }