From 4bca22005f48f426b9bc7cf36d435ead470a2590 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 9 Aug 2012 22:52:56 +0000 Subject: [PATCH 1/7] HADOOP-8681. add support for HTTPS to the web UIs. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1371525 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../fs/CommonConfigurationKeysPublic.java | 3 + .../org/apache/hadoop/http/HttpConfig.java | 48 ++++++++ .../org/apache/hadoop/http/HttpServer.java | 32 ++++- .../apache/hadoop/security/SecurityUtil.java | 19 ++- .../src/main/resources/core-default.xml | 10 ++ .../apache/hadoop/http/TestSSLHttpServer.java | 114 ++++++++++++++++++ .../server/datanode/DatanodeJspHelper.java | 23 ++-- .../server/namenode/ClusterJspHelper.java | 5 +- .../server/namenode/NamenodeJspHelper.java | 6 +- .../hdfs/server/namenode/TransferFsImage.java | 5 +- .../org/apache/hadoop/hdfs/tools/DFSck.java | 3 +- .../mapreduce/v2/app/webapp/JobBlock.java | 7 +- .../mapreduce/v2/app/webapp/NavBlock.java | 4 +- .../mapreduce/v2/app/webapp/TaskPage.java | 7 +- .../v2/app/webapp/dao/AMAttemptInfo.java | 3 +- .../hadoop/mapreduce/util/HostUtil.java | 7 +- .../mapreduce/v2/hs/webapp/HsJobBlock.java | 15 +-- .../mapreduce/v2/hs/webapp/HsTaskPage.java | 3 +- .../hadoop/mapred/ClientServiceDelegate.java | 3 +- .../rmapp/attempt/RMAppAttemptImpl.java | 7 +- .../resourcemanager/webapp/AppBlock.java | 4 +- .../resourcemanager/webapp/NodesPage.java | 4 +- .../webapp/dao/AppAttemptInfo.java | 4 +- .../resourcemanager/webapp/dao/AppInfo.java | 9 +- .../yarn/server/webproxy/ProxyUriUtils.java | 5 +- .../amfilter/AmFilterInitializer.java | 4 +- 27 files changed, 295 insertions(+), 61 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index adfcf30785..4d4857a55a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -213,6 +213,8 @@ Branch-2 ( Unreleased changes ) HADOOP-8644. AuthenticatedURL should be able to use SSLFactory. (tucu) + HADOOP-8681. add support for HTTPS to the web UIs. (tucu) + IMPROVEMENTS HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 3f16de916f..81d1aa721a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -239,5 +239,8 @@ public class CommonConfigurationKeysPublic { public static final String HADOOP_SECURITY_AUTH_TO_LOCAL = "hadoop.security.auth_to_local"; + public static final String HADOOP_SSL_ENABLED_KEY = "hadoop.ssl.enabled"; + public static final boolean HADOOP_SSL_ENABLED_DEFAULT = false; + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java new file mode 100644 index 0000000000..4ee2f5582f --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; + +/** + * Singleton to get access to Http related configuration. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class HttpConfig { + private static boolean sslEnabled; + + static { + Configuration conf = new Configuration(); + sslEnabled = conf.getBoolean( + CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, + CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT); + } + + public static boolean isSecure() { + return sslEnabled; + } + + public static String getSchemePrefix() { + return (isSecure()) ? "https://" : "http://"; + } + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index 2f693b4714..de265725c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -24,12 +24,14 @@ import java.net.BindException; import java.net.InetSocketAddress; import java.net.URL; +import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.net.ssl.SSLServerSocketFactory; import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -56,6 +58,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.ReflectionUtils; import org.mortbay.io.Buffer; import org.mortbay.jetty.Connector; @@ -105,6 +108,7 @@ public class HttpServer implements FilterContainer { private AccessControlList adminsAcl; + private SSLFactory sslFactory; protected final Server webServer; protected final Connector listener; protected final WebAppContext webAppContext; @@ -208,7 +212,23 @@ public HttpServer(String name, String bindAddress, int port, if(connector == null) { listenerStartedExternally = false; - listener = createBaseListener(conf); + if (HttpConfig.isSecure()) { + sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf); + try { + sslFactory.init(); + } catch (GeneralSecurityException ex) { + throw new IOException(ex); + } + SslSocketConnector sslListener = new SslSocketConnector() { + @Override + protected SSLServerSocketFactory createFactory() throws Exception { + return sslFactory.createSSLServerSocketFactory(); + } + }; + listener = sslListener; + } else { + listener = createBaseListener(conf); + } listener.setHost(bindAddress); listener.setPort(port); } else { @@ -720,6 +740,16 @@ public void stop() throws Exception { exception = addMultiException(exception, e); } + try { + if (sslFactory != null) { + sslFactory.destroy(); + } + } catch (Exception e) { + LOG.error("Error while destroying the SSLFactory" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + try { // clear & stop webAppContext attributes to avoid memory leaks. webAppContext.clearAttributes(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java index 8189cfdb27..2f65892db7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java @@ -40,10 +40,12 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; @@ -65,12 +67,23 @@ public class SecurityUtil { static boolean useIpForTokenService; @VisibleForTesting static HostResolver hostResolver; - + + private static SSLFactory sslFactory; + static { - boolean useIp = new Configuration().getBoolean( + Configuration conf = new Configuration(); + boolean useIp = conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT); setTokenServiceUseIp(useIp); + if (HttpConfig.isSecure()) { + sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); + try { + sslFactory.init(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } } /** @@ -456,7 +469,7 @@ public static URLConnection openSecureHttpConnection(URL url) throws IOException AuthenticatedURL.Token token = new AuthenticatedURL.Token(); try { - return new AuthenticatedURL().openConnection(url, token); + return new AuthenticatedURL(null, sslFactory).openConnection(url, token); } catch (AuthenticationException e) { throw new IOException("Exception trying to open authenticated connection to " + url, e); diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 93b11c008b..25d5798de9 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -1073,4 +1073,14 @@ + + hadoop.ssl.enabled + false + + Whether to use SSL for the HTTP endpoints. If set to true, the + NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and + MapReduceAppMaster web UIs will be served over HTTPS instead HTTP. + + + diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java new file mode 100644 index 0000000000..f5ab957225 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.security.ssl.SSLFactory; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import javax.net.ssl.HttpsURLConnection; +import java.io.File; +import java.io.FileWriter; +import java.io.InputStream; +import java.io.Writer; +import java.net.URL; + +/** + * This testcase issues SSL certificates configures the HttpServer to serve + * HTTPS using the created certficates and calls an echo servlet using the + * corresponding HTTPS URL. + */ +public class TestSSLHttpServer extends HttpServerFunctionalTest { + private static final String BASEDIR = + System.getProperty("test.build.dir", "target/test-dir") + "/" + + TestSSLHttpServer.class.getSimpleName(); + + static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class); + private static HttpServer server; + private static URL baseUrl; + + @Before + public void setup() throws Exception { + File base = new File(BASEDIR); + FileUtil.fullyDelete(base); + base.mkdirs(); + String classpathDir = + KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); + Configuration conf = new Configuration(); + String keystoresDir = new File(BASEDIR).getAbsolutePath(); + String sslConfsDir = + KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false); + conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true); + + //we do this trick because the MR AppMaster is started in another VM and + //the HttpServer configuration is not loaded from the job.xml but from the + //site.xml files in the classpath + Writer writer = new FileWriter(classpathDir + "/core-site.xml"); + conf.writeXml(writer); + writer.close(); + + conf.setInt(HttpServer.HTTP_MAX_THREADS, 10); + server = createServer("test", conf); + server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class); + server.start(); + baseUrl = new URL("https://localhost:" + server.getPort() + "/"); + LOG.info("HTTP server started: "+ baseUrl); + } + + @After + public void cleanup() throws Exception { + server.stop(); + String classpathDir = + KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class); + new File(classpathDir + "/core-site.xml").delete(); + } + + + @Test + public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", + readOut(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", + readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + } + + private static String readOut(URL url) throws Exception { + StringBuilder out = new StringBuilder(); + HttpsURLConnection conn = (HttpsURLConnection) url.openConnection(); + SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, new Configuration()); + sslf.init(); + conn.setSSLSocketFactory(sslf.createSSLSocketFactory()); + InputStream in = conn.getInputStream(); + byte[] buffer = new byte[64 * 1024]; + int len = in.read(buffer); + while (len > 0) { + out.append(new String(buffer, 0, len)); + len = in.read(buffer); + } + return out.toString(); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index e09440efe5..d19e54e112 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -19,7 +19,6 @@ import java.io.File; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URL; import java.net.URLEncoder; @@ -37,7 +36,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -45,6 +43,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -140,7 +139,7 @@ static void generateDirectoryStructure(JspWriter out, DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); String fqdn = canonicalize(chosenNode.getIpAddr()); int datanodePort = chosenNode.getXferPort(); - String redirectLocation = "http://" + fqdn + ":" + String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" + firstBlock.getBlock().getBlockId() + "&blockSize=" + firstBlock.getBlock().getNumBytes() + "&genstamp=" @@ -220,7 +219,7 @@ static void generateDirectoryStructure(JspWriter out, JspHelper.addTableFooter(out); } } - out.print("
Go back to DFS home"); dfs.close(); @@ -296,7 +295,7 @@ static void generateFileDetails(JspWriter out, Long.MAX_VALUE).getLocatedBlocks(); // Add the various links for looking at the file contents // URL for downloading the full file - String downloadUrl = "http://" + req.getServerName() + ":" + String downloadUrl = HttpConfig.getSchemePrefix() + req.getServerName() + ":" + req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true) + JspHelper.getDelegationTokenUrlParam(tokenString); @@ -314,7 +313,7 @@ static void generateFileDetails(JspWriter out, return; } String fqdn = canonicalize(chosenNode.getIpAddr()); - String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort() + String tailUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort() + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "&namenodeInfoPort=" + namenodeInfoPort + "&chunkSizeToView=" + chunkSizeToView @@ -363,7 +362,7 @@ static void generateFileDetails(JspWriter out, String datanodeAddr = locs[j].getXferAddr(); datanodePort = locs[j].getXferPort(); fqdn = canonicalize(locs[j].getIpAddr()); - String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort() + String blockUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + locs[j].getInfoPort() + "/browseBlock.jsp?blockId=" + blockidstring + "&blockSize=" + blockSize + "&filename=" + URLEncoder.encode(filename, "UTF-8") @@ -374,7 +373,7 @@ static void generateFileDetails(JspWriter out, + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); - String blockInfoUrl = "http://" + nnCanonicalName + ":" + String blockInfoUrl = HttpConfig.getSchemePrefix() + nnCanonicalName + ":" + namenodeInfoPort + "/block_info_xml.jsp?blockId=" + blockidstring; out.print(" " @@ -385,7 +384,7 @@ static void generateFileDetails(JspWriter out, } out.println(""); out.print("
"); - out.print("
Go back to DFS home"); dfs.close(); @@ -485,7 +484,7 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req, String parent = new File(filename).getParent(); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr); out.print("
"); - out.print(" localPaths, Storage dstStorage, boolean getChecksum) throws IOException { - String str = "http://" + nnHostPort + "/getimage?" + queryString; + String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" + + queryString; LOG.info("Opening connection to " + str); // // open connection to remote server diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 7124876aba..566d77a5fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; @@ -226,7 +227,7 @@ private String getCurrentNamenodeAddress() throws IOException { } private int doWork(final String[] args) throws IOException { - final StringBuilder url = new StringBuilder("http://"); + final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix()); String namenodeAddress = getCurrentNamenodeAddress(); if (namenodeAddress == null) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java index c0d7de0f64..6b80c8c7d0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.yarn.util.StringHelper.join; -import static org.apache.hadoop.yarn.util.StringHelper.ujoin; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD; @@ -31,6 +30,7 @@ import java.util.Date; import java.util.List; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; @@ -40,8 +40,6 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; @@ -106,7 +104,8 @@ public class JobBlock extends HtmlBlock { table.tr(). td(String.valueOf(attempt.getAttemptId())). td(new Date(attempt.getStartTime()).toString()). - td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), + td().a(".nodelink", url(HttpConfig.getSchemePrefix(), + attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._(). td().a(".logslink", url(attempt.getLogsLink()), "logs")._(). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java index 56a0a2f4c0..941b7b0b96 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; @@ -62,7 +63,8 @@ public class NavBlock extends HtmlBlock { li().a(url("conf", jobid), "Configuration")._(). li().a(url("tasks", jobid, "m"), "Map tasks")._(). li().a(url("tasks", jobid, "r"), "Reduce tasks")._(). - li().a(".logslink", url("http://", nodeHttpAddress, "node", + li().a(".logslink", url(HttpConfig.getSchemePrefix(), + nodeHttpAddress, "node", "containerlogs", thisAmInfo.getContainerId().toString(), app.getJob().getUserName()), "AM Logs")._()._(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java index e83a957158..90f082a231 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java @@ -27,6 +27,7 @@ import java.util.Collection; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.util.StringUtils; @@ -93,13 +94,15 @@ protected void render(Block html) { nodeTd._("N/A"); } else { nodeTd. - a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr); + a(".nodelink", url(HttpConfig.getSchemePrefix(), + nodeHttpAddr), nodeHttpAddr); } nodeTd._(); if (containerId != null) { String containerIdStr = ta.getAssignedContainerIdStr(); row.td(). - a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs", + a(".logslink", url(HttpConfig.getSchemePrefix(), + nodeHttpAddr, "node", "containerlogs", containerIdStr, app.getJob().getUserName()), "logs")._(); } else { row.td()._("N/A")._(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java index 96e2f1d4eb..8dcb7c5bf2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java @@ -24,6 +24,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -63,7 +64,7 @@ public AMAttemptInfo(AMInfo amInfo, String jobId, String user) { ContainerId containerId = amInfo.getContainerId(); if (containerId != null) { this.containerId = containerId.toString(); - this.logsLink = join("http://" + nodeHttpAddress, + this.logsLink = join(HttpConfig.getSchemePrefix() + nodeHttpAddress, ujoin("node", "containerlogs", this.containerId, user)); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java index 83bbbe9239..0a42bb73a2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.http.HttpConfig; @Private @Unstable @@ -33,9 +34,9 @@ public class HostUtil { * @return the taskLogUrl */ public static String getTaskLogUrl(String taskTrackerHostName, - String httpPort, String taskAttemptID) { - return ("http://" + taskTrackerHostName + ":" + httpPort - + "/tasklog?attemptid=" + taskAttemptID); + String httpPort, String taskAttemptID) { + return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" + + httpPort + "/tasklog?attemptid=" + taskAttemptID); } public static String convertTrackerNameToHostName(String trackerName) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java index b21218e822..25b22f0d2a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java @@ -21,28 +21,18 @@ import com.google.inject.Inject; import java.util.Date; import java.util.List; -import java.util.Map; -import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; -import org.apache.hadoop.mapreduce.v2.api.records.JobReport; -import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; -import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; -import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.app.job.Task; -import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo; import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; -import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; @@ -142,7 +132,8 @@ public class HsJobBlock extends HtmlBlock { table.tr((odd = !odd) ? _ODD : _EVEN). td(String.valueOf(attempt.getAttemptId())). td(new Date(attempt.getStartTime()).toString()). - td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), + td().a(".nodelink", url(HttpConfig.getSchemePrefix(), + attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._(). td().a(".logslink", url(attempt.getShortLogsLink()), "logs")._(). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java index 5e4b701b30..9807b1f4a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java @@ -29,6 +29,7 @@ import java.util.Collection; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; @@ -143,7 +144,7 @@ protected void render(Block html) { td.br().$title(String.valueOf(sortId))._(). // sorting _(taid)._().td(ta.getState().toString()).td().a(".nodelink", - "http://"+ nodeHttpAddr, + HttpConfig.getSchemePrefix()+ nodeHttpAddr, nodeRackName + "/" + nodeHttpAddr); td._(); row.td(). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index 19d83a8190..f2eb71c2e9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -393,7 +394,7 @@ public JobStatus getJobStatus(JobID oldJobID) throws IOException { String url = StringUtils.isNotEmpty(historyTrackingUrl) ? historyTrackingUrl : trackingUrl; if (!UNAVAILABLE.equals(url)) { - url = "http://" + url; + url = HttpConfig.getSchemePrefix() + url; } jobStatus = TypeConverter.fromYarn(report, url); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index f7a9f92567..f4187f483c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -393,13 +394,13 @@ private String generateProxyUriWithoutScheme( final String trackingUriWithoutScheme) { this.readLock.lock(); try { - URI trackingUri = trackingUriWithoutScheme == null ? null : + URI trackingUri = trackingUriWithoutScheme == null ? null : ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme); URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy); - URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri, + URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri, applicationAttemptId.getApplicationId()); //We need to strip off the scheme to have it match what was there before - return result.toASCIIString().substring(7); + return result.toASCIIString().substring(HttpConfig.getSchemePrefix().length()); } catch (URISyntaxException e) { LOG.warn("Could not proxify "+trackingUriWithoutScheme,e); return trackingUriWithoutScheme; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java index 3dcd2f0268..c3593de53f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java @@ -30,6 +30,7 @@ import com.google.inject.Inject; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -137,7 +138,8 @@ protected void render(Block html) { table.tr((odd = !odd) ? _ODD : _EVEN). td(String.valueOf(attemptInfo.getAttemptId())). td(Times.format(attemptInfo.getStartTime())). - td().a(".nodelink", url("http://", attemptInfo.getNodeHttpAddress()), + td().a(".nodelink", url(HttpConfig.getSchemePrefix(), + attemptInfo.getNodeHttpAddress()), attemptInfo.getNodeHttpAddress())._(). td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._(). _(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index a9aafc5dbb..18167c89cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -26,6 +26,7 @@ import java.util.Collection; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -118,7 +119,8 @@ protected void render(Block html) { row.td()._("N/A")._(); } else { String httpAddress = info.getNodeHTTPAddress(); - row.td().a("http://" + httpAddress, httpAddress)._(); + row.td().a(HttpConfig.getSchemePrefix() + httpAddress, + httpAddress)._(); } row.td(info.getHealthStatus()). td().br().$title(String.valueOf(info.getLastHealthUpdate()))._(). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 5ad726e3b3..61b4880e13 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -23,6 +23,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -55,7 +56,8 @@ public AppAttemptInfo(RMAppAttempt attempt) { this.containerId = masterContainer.getId().toString(); this.nodeHttpAddress = masterContainer.getNodeHttpAddress(); this.nodeId = masterContainer.getNodeId().toString(); - this.logsLink = join("http://", masterContainer.getNodeHttpAddress(), + this.logsLink = join(HttpConfig.getSchemePrefix(), + masterContainer.getNodeHttpAddress(), "/node", "/containerlogs/", ConverterUtils.toString(masterContainer.getId()), "/", attempt.getSubmissionContext().getUser()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index 47a263ded8..8a38278e56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -24,6 +24,7 @@ import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -87,10 +88,10 @@ public AppInfo(RMApp app, Boolean hasAccess) { this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app .getFinishTime() == 0 ? "ApplicationMaster" : "History"); if (!trackingUrlIsNotReady) { - this.trackingUrl = join("http://", trackingUrl); + this.trackingUrl = join(HttpConfig.getSchemePrefix(), trackingUrl); } this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join( - "http://", trackingUrl); + HttpConfig.getSchemePrefix(), trackingUrl); this.applicationId = app.getApplicationId(); this.appIdNum = String.valueOf(app.getApplicationId().getId()); this.id = app.getApplicationId().toString(); @@ -104,7 +105,6 @@ public AppInfo(RMApp app, Boolean hasAccess) { } this.finalStatus = app.getFinalApplicationStatus(); this.clusterId = ResourceManager.clusterTimeStamp; - if (hasAccess) { this.startedTime = app.getStartTime(); this.finishedTime = app.getFinishTime(); @@ -116,7 +116,8 @@ public AppInfo(RMApp app, Boolean hasAccess) { Container masterContainer = attempt.getMasterContainer(); if (masterContainer != null) { this.amContainerLogsExist = true; - String url = join("http://", masterContainer.getNodeHttpAddress(), + String url = join(HttpConfig.getSchemePrefix(), + masterContainer.getNodeHttpAddress(), "/node", "/containerlogs/", ConverterUtils.toString(masterContainer.getId()), "/", app.getUser()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java index 61e31eee93..7545fc0d6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.records.ApplicationId; public class ProxyUriUtils { @@ -138,8 +139,8 @@ public static URI getProxyUri(URI originalUri, URI proxyUri, * @return a URI with an http scheme * @throws URISyntaxException if the url is not formatted correctly. */ - public static URI getUriFromAMUrl(String noSchemeUrl) + public static URI getUriFromAMUrl(String noSchemeUrl) throws URISyntaxException { - return new URI("http://"+noSchemeUrl); + return new URI(HttpConfig.getSchemePrefix() + noSchemeUrl); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java index fdd7a70ffc..bc43d51e29 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -37,7 +38,8 @@ public void initFilter(FilterContainer container, Configuration conf) { String proxy = YarnConfiguration.getProxyHostAndPort(conf); String[] parts = proxy.split(":"); params.put(AmIpFilter.PROXY_HOST, parts[0]); - params.put(AmIpFilter.PROXY_URI_BASE, "http://"+proxy+ + params.put(AmIpFilter.PROXY_URI_BASE, + HttpConfig.getSchemePrefix() + proxy + System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV)); container.addFilter(FILTER_NAME, FILTER_CLASS, params); } From bcb715459c44d08bd02a800505a8abc6c6d7af86 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Fri, 10 Aug 2012 03:42:36 +0000 Subject: [PATCH 2/7] HDFS-3758. TestFuseDFS test failing. Contributed by Colin Patrick McCabe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1371555 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../native/fuse-dfs/test/TestFuseDFS.java | 55 ++++++++++++++++--- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ac693b91ff..221f2b574d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -583,6 +583,8 @@ Branch-2 ( Unreleased changes ) HDFS-3721. hsync support broke wire compatibility. (todd and atm) + HDFS-3758. TestFuseDFS test failing. (Colin Patrick McCabe via eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java index c9827da145..dcb666fd65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java @@ -44,6 +44,7 @@ public class TestFuseDFS { private static MiniDFSCluster cluster; private static FileSystem fs; + private static Process fuseProcess; private static Runtime r; private static String mountPoint; @@ -137,8 +138,28 @@ private static void checkFile(File f, String expectedContents) assertEquals("File content differs", expectedContents, s); } + private static class RedirectToStdoutThread extends Thread { + private InputStream is; + + RedirectToStdoutThread(InputStream is) { + this.is = is; + } + public void run() { + try { + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + String line=null; + while ( (line = br.readLine()) != null) { + LOG.error("FUSE_LINE:" + line); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + /** Run a fuse-dfs process to mount the given DFS */ - private static void establishMount(URI uri) throws IOException { + private static Process establishMount(URI uri) throws IOException { Runtime r = Runtime.getRuntime(); String cp = System.getProperty("java.class.path"); @@ -163,6 +184,8 @@ private static void establishMount(URI uri) throws IOException { "-obig_writes", // Allow >4kb writes "-oentry_timeout=0.1", // Don't cache dents long "-oattribute_timeout=0.1", // Don't cache attributes long + "-ononempty", // Don't complain about junk in mount point + "-f", // Don't background the process "-ordbuffer=32768", // Read buffer size in kb "rw" }; @@ -178,17 +201,35 @@ private static void establishMount(URI uri) throws IOException { execAssertSucceeds("mkdir -p " + mountPoint); // Mount the mini cluster - try { - Process fuseProcess = r.exec(mountCmd, env); - assertEquals(0, fuseProcess.waitFor()); - } catch (InterruptedException ie) { - fail("Failed to mount"); + String cmdStr = ""; + for (String c : mountCmd) { + cmdStr += (" " + c); } + LOG.info("now mounting with:" + cmdStr); + Process fuseProcess = r.exec(mountCmd, env); + RedirectToStdoutThread stdoutThread = + new RedirectToStdoutThread(fuseProcess.getInputStream()); + RedirectToStdoutThread stderrThread = + new RedirectToStdoutThread(fuseProcess.getErrorStream()); + stdoutThread.start(); + stderrThread.start(); + // Wait for fusermount to start up, so that we know we're operating on the + // FUSE FS when we run the tests. + try { + Thread.sleep(50000); + } catch (InterruptedException e) { + } + return fuseProcess; } /** Tear down the fuse-dfs process and mount */ private static void teardownMount() throws IOException { execWaitRet("fusermount -u " + mountPoint); + try { + assertEquals(0, fuseProcess.waitFor()); // fuse_dfs should exit cleanly + } catch (InterruptedException e) { + fail("interrupted while waiting for fuse_dfs process to exit."); + } } @BeforeClass @@ -200,7 +241,7 @@ public static void startUp() throws IOException { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitClusterUp(); fs = cluster.getFileSystem(); - establishMount(fs.getUri()); + fuseProcess = establishMount(fs.getUri()); } @AfterClass From bbe0e823db1778e542397ed3d279293fad5d5a8a Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Mon, 13 Aug 2012 18:42:51 +0000 Subject: [PATCH 3/7] Revert HDFS-3719. See discussion there and HDFS-3770 for more info. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1372544 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 --- .../org/apache/hadoop/hdfs/TestFileConcurrentReader.java | 8 ++++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 221f2b574d..5aced5e81f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -570,9 +570,6 @@ Branch-2 ( Unreleased changes ) HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second one not properly configured. (tucu) - HDFS-3719. Re-enable append-related tests in TestFileConcurrentReader. - (Andrew Wang via atm) - HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm) HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java index 003dc6fbf4..97659eeab3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java @@ -288,8 +288,10 @@ public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite() runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE); } + // fails due to issue w/append, disable + @Ignore @Test - public void testUnfinishedBlockCRCErrorTransferToAppend() + public void _testUnfinishedBlockCRCErrorTransferToAppend() throws IOException { runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE); } @@ -305,8 +307,10 @@ public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite() runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE); } + // fails due to issue w/append, disable + @Ignore @Test - public void testUnfinishedBlockCRCErrorNormalTransferAppend() + public void _testUnfinishedBlockCRCErrorNormalTransferAppend() throws IOException { runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE); } From 7aacfd5a087bb2f4662ff99776e96ca8faec4e33 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 13 Aug 2012 19:36:31 +0000 Subject: [PATCH 4/7] HDFS-3789. JournalManager#format() should be able to throw IOException. Contributed by Ivan Kelly. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1372566 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/contrib/bkjournal/BookKeeperJournalManager.java | 2 +- .../apache/hadoop/hdfs/server/namenode/FileJournalManager.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/JournalManager.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/JournalSet.java | 2 +- .../hadoop/hdfs/server/namenode/TestGenericJournalConf.java | 2 +- 6 files changed, 8 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5aced5e81f..178073822b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -114,6 +114,9 @@ Trunk (unreleased changes) HDFS-3695. Genericize format() to non-file JournalManagers. (todd) + HDFS-3789. JournalManager#format() should be able to throw IOException + (Ivan Kelly via todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java index 8636fb7246..380db25703 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java @@ -272,7 +272,7 @@ public void processResult(int rc, String path, Object ctx, String name) { } @Override - public void format(NamespaceInfo ns) { + public void format(NamespaceInfo ns) throws IOException { // Currently, BKJM automatically formats itself when first accessed. // TODO: change over to explicit formatting so that the admin can // clear out the BK storage when reformatting a cluster. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 64bc607c32..d9acff9944 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -79,7 +79,7 @@ public FileJournalManager(StorageDirectory sd, public void close() throws IOException {} @Override - public void format(NamespaceInfo ns) { + public void format(NamespaceInfo ns) throws IOException { // Formatting file journals is done by the StorageDirectory // format code, since they may share their directory with // checkpoints, etc. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java index 6fe5aad4ea..c95cb206ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java @@ -41,7 +41,7 @@ public interface JournalManager extends Closeable, FormatConfirmable { * Format the underlying storage, removing any previously * stored data. */ - void format(NamespaceInfo ns); + void format(NamespaceInfo ns) throws IOException; /** * Begin writing to a new segment of the log stream, which starts at diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 7dc9b71f06..db64e63874 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -174,7 +174,7 @@ public boolean isRequired() { } @Override - public void format(NamespaceInfo nsInfo) { + public void format(NamespaceInfo nsInfo) throws IOException { // The iteration is done by FSEditLog itself throw new UnsupportedOperationException(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java index 6096946b7e..a941ae424d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java @@ -154,7 +154,7 @@ public DummyJournalManager(Configuration conf, URI u, } @Override - public void format(NamespaceInfo nsInfo) { + public void format(NamespaceInfo nsInfo) throws IOException { formatCalled = true; } From 783a0f4d8b033e33e462ed2c671bc82fad032004 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Mon, 13 Aug 2012 20:27:14 +0000 Subject: [PATCH 5/7] Amend HADOOP-8659. Native libraries must build with soft-float ABI for Oracle JVM on ARM. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1372583 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/src/JNIFlags.cmake | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake index 52c3c8ec4c..9ed2bf559f 100644 --- a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake +++ b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake @@ -18,17 +18,18 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR) -find_package(JNI REQUIRED) - # If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit. # This variable is set by maven. if (JVM_ARCH_DATA_MODEL EQUAL 32) # Force 32-bit code generation on amd64/x86_64, ppc64, sparc64 if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32") set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32") endif () if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") + # Set CMAKE_SYSTEM_PROCESSOR to ensure that find_package(JNI) will use + # the 32-bit version of libjvm.so. set(CMAKE_SYSTEM_PROCESSOR "i686") endif () endif (JVM_ARCH_DATA_MODEL EQUAL 32) @@ -63,3 +64,5 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux" endif () endif (READELF MATCHES "NOTFOUND") endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux") + +find_package(JNI REQUIRED) From c9ed8342f5273a57eebe9a0fc2d162216b338b7d Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 13 Aug 2012 20:52:17 +0000 Subject: [PATCH 6/7] Move HDFS-2330 and HDFS-3190 to branch-2 section, since they have been backported from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1372605 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 178073822b..95ef1ded2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -104,9 +104,6 @@ Trunk (unreleased changes) HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd) - HDFS-3190. Simple refactors in existing NN code to assist - QuorumJournalManager extension. (todd) - HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay) HDFS-3768. Exception in TestJettyHelper is incorrect. @@ -130,10 +127,6 @@ Trunk (unreleased changes) HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd) - HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector, - IOExceptions of stream closures can mask root exceptions. (Uma Maheswara - Rao G via szetszwo) - HDFS-46. Change default namespace quota of root directory from Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo) @@ -383,6 +376,9 @@ Branch-2 ( Unreleased changes ) HDFS-3634. Add self-contained, mavenized fuse_dfs test. (Colin Patrick McCabe via atm) + HDFS-3190. Simple refactors in existing NN code to assist + QuorumJournalManager extension. (todd) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log @@ -585,6 +581,10 @@ Branch-2 ( Unreleased changes ) HDFS-3758. TestFuseDFS test failing. (Colin Patrick McCabe via eli) + HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector, + IOExceptions of stream closures can mask root exceptions. (Uma Maheswara + Rao G via szetszwo) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) From c93185df660aa4fbb7885794550177286f9f3029 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 13 Aug 2012 21:26:39 +0000 Subject: [PATCH 7/7] HDFS-3276. initializeSharedEdits should have a -nonInteractive flag. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1372628 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/server/namenode/NameNode.java | 19 +++++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 95ef1ded2a..e146638810 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -379,6 +379,8 @@ Branch-2 ( Unreleased changes ) HDFS-3190. Simple refactors in existing NN code to assist QuorumJournalManager extension. (todd) + HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 1aae5873bc..38c8415165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -894,7 +894,10 @@ private static void printUsage() { StartupOption.ROLLBACK.getName() + "] | [" + StartupOption.FINALIZE.getName() + "] | [" + StartupOption.IMPORT.getName() + "] | [" + - StartupOption.INITIALIZESHAREDEDITS.getName() + "] | [" + + StartupOption.INITIALIZESHAREDEDITS.getName() + + " [" + StartupOption.FORCE.getName() + "] [" + + StartupOption.NONINTERACTIVE.getName() + "]" + + "] | [" + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName() + " ] ]"); @@ -964,6 +967,16 @@ private static StartupOption parseArguments(String args[]) { return startOpt; } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) { startOpt = StartupOption.INITIALIZESHAREDEDITS; + for (i = i + 1 ; i < argsLen; i++) { + if (StartupOption.NONINTERACTIVE.getName().equals(args[i])) { + startOpt.setInteractiveFormat(false); + } else if (StartupOption.FORCE.getName().equals(args[i])) { + startOpt.setForceFormat(true); + } else { + LOG.fatal("Invalid argument: " + args[i]); + return null; + } + } return startOpt; } else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) { if (startOpt != StartupOption.REGULAR) { @@ -1073,7 +1086,9 @@ public static NameNode createNameNode(String argv[], Configuration conf) return null; // avoid warning } case INITIALIZESHAREDEDITS: { - boolean aborted = initializeSharedEdits(conf, false, true); + boolean aborted = initializeSharedEdits(conf, + startOpt.getForceFormat(), + startOpt.getInteractiveFormat()); terminate(aborted ? 1 : 0); return null; // avoid warning }