diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index adfcf30785..4d4857a55a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -213,6 +213,8 @@ Branch-2 ( Unreleased changes )
HADOOP-8644. AuthenticatedURL should be able to use SSLFactory. (tucu)
+ HADOOP-8681. add support for HTTPS to the web UIs. (tucu)
+
IMPROVEMENTS
HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
diff --git a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake
index 52c3c8ec4c..9ed2bf559f 100644
--- a/hadoop-common-project/hadoop-common/src/JNIFlags.cmake
+++ b/hadoop-common-project/hadoop-common/src/JNIFlags.cmake
@@ -18,17 +18,18 @@
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
-find_package(JNI REQUIRED)
-
# If JVM_ARCH_DATA_MODEL is 32, compile all binaries as 32-bit.
# This variable is set by maven.
if (JVM_ARCH_DATA_MODEL EQUAL 32)
# Force 32-bit code generation on amd64/x86_64, ppc64, sparc64
if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_SYSTEM_PROCESSOR MATCHES ".*64")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -m32")
endif ()
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64")
+ # Set CMAKE_SYSTEM_PROCESSOR to ensure that find_package(JNI) will use
+ # the 32-bit version of libjvm.so.
set(CMAKE_SYSTEM_PROCESSOR "i686")
endif ()
endif (JVM_ARCH_DATA_MODEL EQUAL 32)
@@ -63,3 +64,5 @@ if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux"
endif ()
endif (READELF MATCHES "NOTFOUND")
endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")
+
+find_package(JNI REQUIRED)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 3f16de916f..81d1aa721a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -239,5 +239,8 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
"hadoop.security.auth_to_local";
+ public static final String HADOOP_SSL_ENABLED_KEY = "hadoop.ssl.enabled";
+ public static final boolean HADOOP_SSL_ENABLED_DEFAULT = false;
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
new file mode 100644
index 0000000000..4ee2f5582f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
+/**
+ * Singleton to get access to Http related configuration.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HttpConfig {
+ private static boolean sslEnabled;
+
+ static {
+ Configuration conf = new Configuration();
+ sslEnabled = conf.getBoolean(
+ CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
+ CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
+ }
+
+ public static boolean isSecure() {
+ return sslEnabled;
+ }
+
+ public static String getSchemePrefix() {
+ return (isSecure()) ? "https://" : "http://";
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 2f693b4714..de265725c0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -24,12 +24,14 @@
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.URL;
+import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import javax.net.ssl.SSLServerSocketFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
@@ -56,6 +58,7 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.ReflectionUtils;
import org.mortbay.io.Buffer;
import org.mortbay.jetty.Connector;
@@ -105,6 +108,7 @@ public class HttpServer implements FilterContainer {
private AccessControlList adminsAcl;
+ private SSLFactory sslFactory;
protected final Server webServer;
protected final Connector listener;
protected final WebAppContext webAppContext;
@@ -208,7 +212,23 @@ public HttpServer(String name, String bindAddress, int port,
if(connector == null) {
listenerStartedExternally = false;
- listener = createBaseListener(conf);
+ if (HttpConfig.isSecure()) {
+ sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+ try {
+ sslFactory.init();
+ } catch (GeneralSecurityException ex) {
+ throw new IOException(ex);
+ }
+ SslSocketConnector sslListener = new SslSocketConnector() {
+ @Override
+ protected SSLServerSocketFactory createFactory() throws Exception {
+ return sslFactory.createSSLServerSocketFactory();
+ }
+ };
+ listener = sslListener;
+ } else {
+ listener = createBaseListener(conf);
+ }
listener.setHost(bindAddress);
listener.setPort(port);
} else {
@@ -720,6 +740,16 @@ public void stop() throws Exception {
exception = addMultiException(exception, e);
}
+ try {
+ if (sslFactory != null) {
+ sslFactory.destroy();
+ }
+ } catch (Exception e) {
+ LOG.error("Error while destroying the SSLFactory"
+ + webAppContext.getDisplayName(), e);
+ exception = addMultiException(exception, e);
+ }
+
try {
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 8189cfdb27..2f65892db7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -40,10 +40,12 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
@@ -65,12 +67,23 @@ public class SecurityUtil {
static boolean useIpForTokenService;
@VisibleForTesting
static HostResolver hostResolver;
-
+
+ private static SSLFactory sslFactory;
+
static {
- boolean useIp = new Configuration().getBoolean(
+ Configuration conf = new Configuration();
+ boolean useIp = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
setTokenServiceUseIp(useIp);
+ if (HttpConfig.isSecure()) {
+ sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
+ try {
+ sslFactory.init();
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
}
/**
@@ -456,7 +469,7 @@ public static URLConnection openSecureHttpConnection(URL url) throws IOException
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
try {
- return new AuthenticatedURL().openConnection(url, token);
+ return new AuthenticatedURL(null, sslFactory).openConnection(url, token);
} catch (AuthenticationException e) {
throw new IOException("Exception trying to open authenticated connection to "
+ url, e);
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 93b11c008b..25d5798de9 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1073,4 +1073,14 @@
+
+ hadoop.ssl.enabled
+ false
+
+ Whether to use SSL for the HTTP endpoints. If set to true, the
+ NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and
+ MapReduceAppMaster web UIs will be served over HTTPS instead HTTP.
+
+
+
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
new file mode 100644
index 0000000000..f5ab957225
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import javax.net.ssl.HttpsURLConnection;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.InputStream;
+import java.io.Writer;
+import java.net.URL;
+
+/**
+ * This testcase issues SSL certificates configures the HttpServer to serve
+ * HTTPS using the created certficates and calls an echo servlet using the
+ * corresponding HTTPS URL.
+ */
+public class TestSSLHttpServer extends HttpServerFunctionalTest {
+ private static final String BASEDIR =
+ System.getProperty("test.build.dir", "target/test-dir") + "/" +
+ TestSSLHttpServer.class.getSimpleName();
+
+ static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
+ private static HttpServer server;
+ private static URL baseUrl;
+
+ @Before
+ public void setup() throws Exception {
+ File base = new File(BASEDIR);
+ FileUtil.fullyDelete(base);
+ base.mkdirs();
+ String classpathDir =
+ KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
+ Configuration conf = new Configuration();
+ String keystoresDir = new File(BASEDIR).getAbsolutePath();
+ String sslConfsDir =
+ KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
+ KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, false);
+ conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true);
+
+ //we do this trick because the MR AppMaster is started in another VM and
+ //the HttpServer configuration is not loaded from the job.xml but from the
+ //site.xml files in the classpath
+ Writer writer = new FileWriter(classpathDir + "/core-site.xml");
+ conf.writeXml(writer);
+ writer.close();
+
+ conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+ server = createServer("test", conf);
+ server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
+ server.start();
+ baseUrl = new URL("https://localhost:" + server.getPort() + "/");
+ LOG.info("HTTP server started: "+ baseUrl);
+ }
+
+ @After
+ public void cleanup() throws Exception {
+ server.stop();
+ String classpathDir =
+ KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
+ new File(classpathDir + "/core-site.xml").delete();
+ }
+
+
+ @Test
+ public void testEcho() throws Exception {
+ assertEquals("a:b\nc:d\n",
+ readOut(new URL(baseUrl, "/echo?a=b&c=d")));
+ assertEquals("a:b\nc<:d\ne:>\n",
+ readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>")));
+ }
+
+ private static String readOut(URL url) throws Exception {
+ StringBuilder out = new StringBuilder();
+ HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
+ SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, new Configuration());
+ sslf.init();
+ conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
+ InputStream in = conn.getInputStream();
+ byte[] buffer = new byte[64 * 1024];
+ int len = in.read(buffer);
+ while (len > 0) {
+ out.append(new String(buffer, 0, len));
+ len = in.read(buffer);
+ }
+ return out.toString();
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac693b91ff..e146638810 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -104,9 +104,6 @@ Trunk (unreleased changes)
HDFS-3573. Supply NamespaceInfo when instantiating JournalManagers (todd)
- HDFS-3190. Simple refactors in existing NN code to assist
- QuorumJournalManager extension. (todd)
-
HDFS-3630 Modify TestPersistBlocks to use both flush and hflush (sanjay)
HDFS-3768. Exception in TestJettyHelper is incorrect.
@@ -114,6 +111,9 @@ Trunk (unreleased changes)
HDFS-3695. Genericize format() to non-file JournalManagers. (todd)
+ HDFS-3789. JournalManager#format() should be able to throw IOException
+ (Ivan Kelly via todd)
+
OPTIMIZATIONS
BUG FIXES
@@ -127,10 +127,6 @@ Trunk (unreleased changes)
HDFS-2314. MRV1 test compilation broken after HDFS-2197 (todd)
- HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
- IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
- Rao G via szetszwo)
-
HDFS-46. Change default namespace quota of root directory from
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
@@ -380,6 +376,11 @@ Branch-2 ( Unreleased changes )
HDFS-3634. Add self-contained, mavenized fuse_dfs test. (Colin Patrick
McCabe via atm)
+ HDFS-3190. Simple refactors in existing NN code to assist
+ QuorumJournalManager extension. (todd)
+
+ HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd)
+
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log
@@ -570,9 +571,6 @@ Branch-2 ( Unreleased changes )
HDFS-3756. DelegationTokenFetcher creates 2 HTTP connections, the second
one not properly configured. (tucu)
- HDFS-3719. Re-enable append-related tests in TestFileConcurrentReader.
- (Andrew Wang via atm)
-
HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm)
HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli)
@@ -583,6 +581,12 @@ Branch-2 ( Unreleased changes )
HDFS-3721. hsync support broke wire compatibility. (todd and atm)
+ HDFS-3758. TestFuseDFS test failing. (Colin Patrick McCabe via eli)
+
+ HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
+ IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
+ Rao G via szetszwo)
+
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
index 8636fb7246..380db25703 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
@@ -272,7 +272,7 @@ public void processResult(int rc, String path, Object ctx, String name) {
}
@Override
- public void format(NamespaceInfo ns) {
+ public void format(NamespaceInfo ns) throws IOException {
// Currently, BKJM automatically formats itself when first accessed.
// TODO: change over to explicit formatting so that the admin can
// clear out the BK storage when reformatting a cluster.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
index e09440efe5..d19e54e112 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
@@ -19,7 +19,6 @@
import java.io.File;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
@@ -37,7 +36,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -45,6 +43,7 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -140,7 +139,7 @@ static void generateDirectoryStructure(JspWriter out,
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
String fqdn = canonicalize(chosenNode.getIpAddr());
int datanodePort = chosenNode.getXferPort();
- String redirectLocation = "http://" + fqdn + ":"
+ String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":"
+ chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
+ firstBlock.getBlock().getBlockId() + "&blockSize="
+ firstBlock.getBlock().getNumBytes() + "&genstamp="
@@ -220,7 +219,7 @@ static void generateDirectoryStructure(JspWriter out,
JspHelper.addTableFooter(out);
}
}
- out.print("
Go back to DFS home");
dfs.close();
@@ -296,7 +295,7 @@ static void generateFileDetails(JspWriter out,
Long.MAX_VALUE).getLocatedBlocks();
// Add the various links for looking at the file contents
// URL for downloading the full file
- String downloadUrl = "http://" + req.getServerName() + ":"
+ String downloadUrl = HttpConfig.getSchemePrefix() + req.getServerName() + ":"
+ req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true)
+ JspHelper.getDelegationTokenUrlParam(tokenString);
@@ -314,7 +313,7 @@ static void generateFileDetails(JspWriter out,
return;
}
String fqdn = canonicalize(chosenNode.getIpAddr());
- String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
+ String tailUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort()
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
+ "&namenodeInfoPort=" + namenodeInfoPort
+ "&chunkSizeToView=" + chunkSizeToView
@@ -363,7 +362,7 @@ static void generateFileDetails(JspWriter out,
String datanodeAddr = locs[j].getXferAddr();
datanodePort = locs[j].getXferPort();
fqdn = canonicalize(locs[j].getIpAddr());
- String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
+ String blockUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + locs[j].getInfoPort()
+ "/browseBlock.jsp?blockId=" + blockidstring
+ "&blockSize=" + blockSize
+ "&filename=" + URLEncoder.encode(filename, "UTF-8")
@@ -374,7 +373,7 @@ static void generateFileDetails(JspWriter out,
+ JspHelper.getDelegationTokenUrlParam(tokenString)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
- String blockInfoUrl = "http://" + nnCanonicalName + ":"
+ String blockInfoUrl = HttpConfig.getSchemePrefix() + nnCanonicalName + ":"
+ namenodeInfoPort
+ "/block_info_xml.jsp?blockId=" + blockidstring;
out.print("
  | "
@@ -385,7 +384,7 @@ static void generateFileDetails(JspWriter out,
}
out.println("");
out.print(" ");
- out.print(" Go back to DFS home");
dfs.close();
@@ -485,7 +484,7 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req,
String parent = new File(filename).getParent();
JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr);
out.print(" ");
- out.print(" localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
- String str = "http://" + nnHostPort + "/getimage?" + queryString;
+ String str = HttpConfig.getSchemePrefix() + nnHostPort + "/getimage?" +
+ queryString;
LOG.info("Opening connection to " + str);
//
// open connection to remote server
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 7124876aba..566d77a5fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -226,7 +227,7 @@ private String getCurrentNamenodeAddress() throws IOException {
}
private int doWork(final String[] args) throws IOException {
- final StringBuilder url = new StringBuilder("http://");
+ final StringBuilder url = new StringBuilder(HttpConfig.getSchemePrefix());
String namenodeAddress = getCurrentNamenodeAddress();
if (namenodeAddress == null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java
index c9827da145..dcb666fd65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/TestFuseDFS.java
@@ -44,6 +44,7 @@ public class TestFuseDFS {
private static MiniDFSCluster cluster;
private static FileSystem fs;
+ private static Process fuseProcess;
private static Runtime r;
private static String mountPoint;
@@ -137,8 +138,28 @@ private static void checkFile(File f, String expectedContents)
assertEquals("File content differs", expectedContents, s);
}
+ private static class RedirectToStdoutThread extends Thread {
+ private InputStream is;
+
+ RedirectToStdoutThread(InputStream is) {
+ this.is = is;
+ }
+ public void run() {
+ try {
+ InputStreamReader isr = new InputStreamReader(is);
+ BufferedReader br = new BufferedReader(isr);
+ String line=null;
+ while ( (line = br.readLine()) != null) {
+ LOG.error("FUSE_LINE:" + line);
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
/** Run a fuse-dfs process to mount the given DFS */
- private static void establishMount(URI uri) throws IOException {
+ private static Process establishMount(URI uri) throws IOException {
Runtime r = Runtime.getRuntime();
String cp = System.getProperty("java.class.path");
@@ -163,6 +184,8 @@ private static void establishMount(URI uri) throws IOException {
"-obig_writes", // Allow >4kb writes
"-oentry_timeout=0.1", // Don't cache dents long
"-oattribute_timeout=0.1", // Don't cache attributes long
+ "-ononempty", // Don't complain about junk in mount point
+ "-f", // Don't background the process
"-ordbuffer=32768", // Read buffer size in kb
"rw"
};
@@ -178,17 +201,35 @@ private static void establishMount(URI uri) throws IOException {
execAssertSucceeds("mkdir -p " + mountPoint);
// Mount the mini cluster
- try {
- Process fuseProcess = r.exec(mountCmd, env);
- assertEquals(0, fuseProcess.waitFor());
- } catch (InterruptedException ie) {
- fail("Failed to mount");
+ String cmdStr = "";
+ for (String c : mountCmd) {
+ cmdStr += (" " + c);
}
+ LOG.info("now mounting with:" + cmdStr);
+ Process fuseProcess = r.exec(mountCmd, env);
+ RedirectToStdoutThread stdoutThread =
+ new RedirectToStdoutThread(fuseProcess.getInputStream());
+ RedirectToStdoutThread stderrThread =
+ new RedirectToStdoutThread(fuseProcess.getErrorStream());
+ stdoutThread.start();
+ stderrThread.start();
+ // Wait for fusermount to start up, so that we know we're operating on the
+ // FUSE FS when we run the tests.
+ try {
+ Thread.sleep(50000);
+ } catch (InterruptedException e) {
+ }
+ return fuseProcess;
}
/** Tear down the fuse-dfs process and mount */
private static void teardownMount() throws IOException {
execWaitRet("fusermount -u " + mountPoint);
+ try {
+ assertEquals(0, fuseProcess.waitFor()); // fuse_dfs should exit cleanly
+ } catch (InterruptedException e) {
+ fail("interrupted while waiting for fuse_dfs process to exit.");
+ }
}
@BeforeClass
@@ -200,7 +241,7 @@ public static void startUp() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitClusterUp();
fs = cluster.getFileSystem();
- establishMount(fs.getUri());
+ fuseProcess = establishMount(fs.getUri());
}
@AfterClass
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
index 003dc6fbf4..97659eeab3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
@@ -288,8 +288,10 @@ public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
}
+ // fails due to issue w/append, disable
+ @Ignore
@Test
- public void testUnfinishedBlockCRCErrorTransferToAppend()
+ public void _testUnfinishedBlockCRCErrorTransferToAppend()
throws IOException {
runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
}
@@ -305,8 +307,10 @@ public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
}
+ // fails due to issue w/append, disable
+ @Ignore
@Test
- public void testUnfinishedBlockCRCErrorNormalTransferAppend()
+ public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
throws IOException {
runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
index 6096946b7e..a941ae424d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java
@@ -154,7 +154,7 @@ public DummyJournalManager(Configuration conf, URI u,
}
@Override
- public void format(NamespaceInfo nsInfo) {
+ public void format(NamespaceInfo nsInfo) throws IOException {
formatCalled = true;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
index c0d7de0f64..6b80c8c7d0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
@@ -20,7 +20,6 @@
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.yarn.util.StringHelper.join;
-import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
@@ -31,6 +30,7 @@
import java.util.Date;
import java.util.List;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -40,8 +40,6 @@
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
@@ -106,7 +104,8 @@ public class JobBlock extends HtmlBlock {
table.tr().
td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()).
- td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()),
+ td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
+ attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress())._().
td().a(".logslink", url(attempt.getLogsLink()),
"logs")._().
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
index 56a0a2f4c0..941b7b0b96 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -24,6 +24,7 @@
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -62,7 +63,8 @@ public class NavBlock extends HtmlBlock {
li().a(url("conf", jobid), "Configuration")._().
li().a(url("tasks", jobid, "m"), "Map tasks")._().
li().a(url("tasks", jobid, "r"), "Reduce tasks")._().
- li().a(".logslink", url("http://", nodeHttpAddress, "node",
+ li().a(".logslink", url(HttpConfig.getSchemePrefix(),
+ nodeHttpAddress, "node",
"containerlogs", thisAmInfo.getContainerId().toString(),
app.getJob().getUserName()),
"AM Logs")._()._();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index e83a957158..90f082a231 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -27,6 +27,7 @@
import java.util.Collection;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
import org.apache.hadoop.util.StringUtils;
@@ -93,13 +94,15 @@ protected void render(Block html) {
nodeTd._("N/A");
} else {
nodeTd.
- a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr);
+ a(".nodelink", url(HttpConfig.getSchemePrefix(),
+ nodeHttpAddr), nodeHttpAddr);
}
nodeTd._();
if (containerId != null) {
String containerIdStr = ta.getAssignedContainerIdStr();
row.td().
- a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
+ a(".logslink", url(HttpConfig.getSchemePrefix(),
+ nodeHttpAddr, "node", "containerlogs",
containerIdStr, app.getJob().getUserName()), "logs")._();
} else {
row.td()._("N/A")._();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
index 96e2f1d4eb..8dcb7c5bf2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
@@ -24,6 +24,7 @@
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@@ -63,7 +64,7 @@ public AMAttemptInfo(AMInfo amInfo, String jobId, String user) {
ContainerId containerId = amInfo.getContainerId();
if (containerId != null) {
this.containerId = containerId.toString();
- this.logsLink = join("http://" + nodeHttpAddress,
+ this.logsLink = join(HttpConfig.getSchemePrefix() + nodeHttpAddress,
ujoin("node", "containerlogs", this.containerId, user));
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
index 83bbbe9239..0a42bb73a2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/HostUtil.java
@@ -20,6 +20,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.http.HttpConfig;
@Private
@Unstable
@@ -33,9 +34,9 @@ public class HostUtil {
* @return the taskLogUrl
*/
public static String getTaskLogUrl(String taskTrackerHostName,
- String httpPort, String taskAttemptID) {
- return ("http://" + taskTrackerHostName + ":" + httpPort
- + "/tasklog?attemptid=" + taskAttemptID);
+ String httpPort, String taskAttemptID) {
+ return (HttpConfig.getSchemePrefix() + taskTrackerHostName + ":" +
+ httpPort + "/tasklog?attemptid=" + taskAttemptID);
}
public static String convertTrackerNameToHostName(String trackerName) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
index b21218e822..25b22f0d2a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java
@@ -21,28 +21,18 @@
import com.google.inject.Inject;
import java.util.Date;
import java.util.List;
-import java.util.Map;
-import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
-import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
-import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
-import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.NodeId;
-import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -142,7 +132,8 @@ public class HsJobBlock extends HtmlBlock {
table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attempt.getAttemptId())).
td(new Date(attempt.getStartTime()).toString()).
- td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()),
+ td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
+ attempt.getNodeHttpAddress()),
attempt.getNodeHttpAddress())._().
td().a(".logslink", url(attempt.getShortLogsLink()),
"logs")._().
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
index 5e4b701b30..9807b1f4a9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
@@ -29,6 +29,7 @@
import java.util.Collection;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
@@ -143,7 +144,7 @@ protected void render(Block html) {
td.br().$title(String.valueOf(sortId))._(). // sorting
_(taid)._().td(ta.getState().toString()).td().a(".nodelink",
- "http://"+ nodeHttpAddr,
+ HttpConfig.getSchemePrefix()+ nodeHttpAddr,
nodeRackName + "/" + nodeHttpAddr);
td._();
row.td().
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 19d83a8190..f2eb71c2e9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -32,6 +32,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -393,7 +394,7 @@ public JobStatus getJobStatus(JobID oldJobID) throws IOException {
String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl;
if (!UNAVAILABLE.equals(url)) {
- url = "http://" + url;
+ url = HttpConfig.getSchemePrefix() + url;
}
jobStatus = TypeConverter.fromYarn(report, url);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index f7a9f92567..f4187f483c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -33,6 +33,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -393,13 +394,13 @@ private String generateProxyUriWithoutScheme(
final String trackingUriWithoutScheme) {
this.readLock.lock();
try {
- URI trackingUri = trackingUriWithoutScheme == null ? null :
+ URI trackingUri = trackingUriWithoutScheme == null ? null :
ProxyUriUtils.getUriFromAMUrl(trackingUriWithoutScheme);
URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy);
- URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
+ URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri,
applicationAttemptId.getApplicationId());
//We need to strip off the scheme to have it match what was there before
- return result.toASCIIString().substring(7);
+ return result.toASCIIString().substring(HttpConfig.getSchemePrefix().length());
} catch (URISyntaxException e) {
LOG.warn("Could not proxify "+trackingUriWithoutScheme,e);
return trackingUriWithoutScheme;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
index 3dcd2f0268..c3593de53f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppBlock.java
@@ -30,6 +30,7 @@
import com.google.inject.Inject;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
@@ -137,7 +138,8 @@ protected void render(Block html) {
table.tr((odd = !odd) ? _ODD : _EVEN).
td(String.valueOf(attemptInfo.getAttemptId())).
td(Times.format(attemptInfo.getStartTime())).
- td().a(".nodelink", url("http://", attemptInfo.getNodeHttpAddress()),
+ td().a(".nodelink", url(HttpConfig.getSchemePrefix(),
+ attemptInfo.getNodeHttpAddress()),
attemptInfo.getNodeHttpAddress())._().
td().a(".logslink", url(attemptInfo.getLogsLink()), "logs")._().
_();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index a9aafc5dbb..18167c89cc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -26,6 +26,7 @@
import java.util.Collection;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -118,7 +119,8 @@ protected void render(Block html) {
row.td()._("N/A")._();
} else {
String httpAddress = info.getNodeHTTPAddress();
- row.td().a("http://" + httpAddress, httpAddress)._();
+ row.td().a(HttpConfig.getSchemePrefix() + httpAddress,
+ httpAddress)._();
}
row.td(info.getHealthStatus()).
td().br().$title(String.valueOf(info.getLastHealthUpdate()))._().
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
index 5ad726e3b3..61b4880e13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java
@@ -23,6 +23,7 @@
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.util.ConverterUtils;
@@ -55,7 +56,8 @@ public AppAttemptInfo(RMAppAttempt attempt) {
this.containerId = masterContainer.getId().toString();
this.nodeHttpAddress = masterContainer.getNodeHttpAddress();
this.nodeId = masterContainer.getNodeId().toString();
- this.logsLink = join("http://", masterContainer.getNodeHttpAddress(),
+ this.logsLink = join(HttpConfig.getSchemePrefix(),
+ masterContainer.getNodeHttpAddress(),
"/node", "/containerlogs/",
ConverterUtils.toString(masterContainer.getId()),
"/", attempt.getSubmissionContext().getUser());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 47a263ded8..8a38278e56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -24,6 +24,7 @@
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -87,10 +88,10 @@ public AppInfo(RMApp app, Boolean hasAccess) {
this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app
.getFinishTime() == 0 ? "ApplicationMaster" : "History");
if (!trackingUrlIsNotReady) {
- this.trackingUrl = join("http://", trackingUrl);
+ this.trackingUrl = join(HttpConfig.getSchemePrefix(), trackingUrl);
}
this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join(
- "http://", trackingUrl);
+ HttpConfig.getSchemePrefix(), trackingUrl);
this.applicationId = app.getApplicationId();
this.appIdNum = String.valueOf(app.getApplicationId().getId());
this.id = app.getApplicationId().toString();
@@ -104,7 +105,6 @@ public AppInfo(RMApp app, Boolean hasAccess) {
}
this.finalStatus = app.getFinalApplicationStatus();
this.clusterId = ResourceManager.clusterTimeStamp;
-
if (hasAccess) {
this.startedTime = app.getStartTime();
this.finishedTime = app.getFinishTime();
@@ -116,7 +116,8 @@ public AppInfo(RMApp app, Boolean hasAccess) {
Container masterContainer = attempt.getMasterContainer();
if (masterContainer != null) {
this.amContainerLogsExist = true;
- String url = join("http://", masterContainer.getNodeHttpAddress(),
+ String url = join(HttpConfig.getSchemePrefix(),
+ masterContainer.getNodeHttpAddress(),
"/node", "/containerlogs/",
ConverterUtils.toString(masterContainer.getId()),
"/", app.getUser());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java
index 61e31eee93..7545fc0d6e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java
@@ -27,6 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.records.ApplicationId;
public class ProxyUriUtils {
@@ -138,8 +139,8 @@ public static URI getProxyUri(URI originalUri, URI proxyUri,
* @return a URI with an http scheme
* @throws URISyntaxException if the url is not formatted correctly.
*/
- public static URI getUriFromAMUrl(String noSchemeUrl)
+ public static URI getUriFromAMUrl(String noSchemeUrl)
throws URISyntaxException {
- return new URI("http://"+noSchemeUrl);
+ return new URI(HttpConfig.getSchemePrefix() + noSchemeUrl);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
index fdd7a70ffc..bc43d51e29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
+import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -37,7 +38,8 @@ public void initFilter(FilterContainer container, Configuration conf) {
String proxy = YarnConfiguration.getProxyHostAndPort(conf);
String[] parts = proxy.split(":");
params.put(AmIpFilter.PROXY_HOST, parts[0]);
- params.put(AmIpFilter.PROXY_URI_BASE, "http://"+proxy+
+ params.put(AmIpFilter.PROXY_URI_BASE,
+ HttpConfig.getSchemePrefix() + proxy +
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV));
container.addFilter(FILTER_NAME, FILTER_CLASS, params);
}
|