diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 12255852a9..7a898741fa 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -3,6 +3,7 @@ Hadoop Change Log Trunk (unreleased changes) INCOMPATIBLE CHANGES + HADOOP-7920. Remove Avro Rpc. (suresh) NEW FEATURES HADOOP-7773. Add support for protocol buffer based RPC engine. @@ -136,6 +137,10 @@ Trunk (unreleased changes) HADOOP-7913 Fix bug in ProtoBufRpcEngine (sanjay) + HADOOP-7810. move hadoop archive to core from tools. (tucu) + + HADOOP-7892. IPC logs too verbose after "RpcKind" introduction (todd) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 1dc545a274..f477d385a5 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -239,11 +239,6 @@ avro compile - - org.apache.avro - avro-ipc - compile - net.sf.kosmosfs kfs @@ -282,7 +277,6 @@ generate-test-sources schema - protocol diff --git a/hadoop-mapreduce-project/src/tools/org/apache/hadoop/fs/HarFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java similarity index 100% rename from hadoop-mapreduce-project/src/tools/org/apache/hadoop/fs/HarFileSystem.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroRpcEngine.java deleted file mode 100644 index 8fec3d22b8..0000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroRpcEngine.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ipc; - -import java.io.Closeable; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.HashMap; - -import javax.net.SocketFactory; - -import org.apache.avro.ipc.Responder; -import org.apache.avro.ipc.Transceiver; -import org.apache.avro.ipc.reflect.ReflectRequestor; -import org.apache.avro.ipc.reflect.ReflectResponder; -import org.apache.avro.ipc.specific.SpecificRequestor; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.TokenIdentifier; - -/** Tunnel Avro-format RPC requests over a Hadoop {@link RPC} connection. This - * does not give cross-language wire compatibility, since the Hadoop RPC wire - * format is non-standard, but it does permit use of Avro's protocol versioning - * features for inter-Java RPCs. */ -@InterfaceStability.Evolving -public class AvroRpcEngine implements RpcEngine { - private static final Log LOG = LogFactory.getLog(RPC.class); - - private static int VERSION = 1; - - // the implementation we tunnel through - private static final RpcEngine ENGINE = new WritableRpcEngine(); - - /** Tunnel an Avro RPC request and response through Hadoop's RPC. */ - private static interface TunnelProtocol extends VersionedProtocol { - //WritableRpcEngine expects a versionID in every protocol. - public static final long versionID = VERSION; - /** All Avro methods and responses go through this. */ - BufferListWritable call(String protocol, BufferListWritable request) - throws IOException; - } - - /** A Writable that holds a List, The Avro RPC Transceiver's - * basic unit of data transfer.*/ - private static class BufferListWritable implements Writable { - private List buffers; - - public BufferListWritable() {} // required for RPC Writables - - public BufferListWritable(List buffers) { - this.buffers = buffers; - } - - public void readFields(DataInput in) throws IOException { - int size = in.readInt(); - buffers = new ArrayList(size); - for (int i = 0; i < size; i++) { - int length = in.readInt(); - ByteBuffer buffer = ByteBuffer.allocate(length); - in.readFully(buffer.array(), 0, length); - buffers.add(buffer); - } - } - - public void write(DataOutput out) throws IOException { - out.writeInt(buffers.size()); - for (ByteBuffer buffer : buffers) { - out.writeInt(buffer.remaining()); - out.write(buffer.array(), buffer.position(), buffer.remaining()); - } - } - } - - /** An Avro RPC Transceiver that tunnels client requests through Hadoop - * RPC. */ - private static class ClientTransceiver extends Transceiver { - private TunnelProtocol tunnel; - private InetSocketAddress remote; - private String protocol; - - public ClientTransceiver(InetSocketAddress addr, - UserGroupInformation ticket, - Configuration conf, SocketFactory factory, - int rpcTimeout, String protocol) - throws IOException { - this.tunnel = ENGINE.getProxy(TunnelProtocol.class, VERSION, - addr, ticket, conf, factory, - rpcTimeout).getProxy(); - this.remote = addr; - this.protocol = protocol; - } - - public String getRemoteName() { return remote.toString(); } - - public List transceive(List request) - throws IOException { - return tunnel.call(protocol, new BufferListWritable(request)).buffers; - } - - public List readBuffers() throws IOException { - throw new UnsupportedOperationException(); - } - - public void writeBuffers(List buffers) throws IOException { - throw new UnsupportedOperationException(); - } - - public void close() throws IOException { - RPC.stopProxy(tunnel); - } - } - - /** Construct a client-side proxy object that implements the named protocol, - * talking to a server at the named address. - * @param */ - @SuppressWarnings("unchecked") - public ProtocolProxy getProxy(Class protocol, long clientVersion, - InetSocketAddress addr, UserGroupInformation ticket, - Configuration conf, SocketFactory factory, - int rpcTimeout) - throws IOException { - return new ProtocolProxy(protocol, - (T)Proxy.newProxyInstance( - protocol.getClassLoader(), - new Class[] { protocol }, - new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout)), - false); - } - - private class Invoker implements InvocationHandler, Closeable { - private final ClientTransceiver tx; - private final SpecificRequestor requestor; - public Invoker(Class protocol, InetSocketAddress addr, - UserGroupInformation ticket, Configuration conf, - SocketFactory factory, - int rpcTimeout) throws IOException { - this.tx = new ClientTransceiver(addr, ticket, conf, factory, rpcTimeout, - protocol.getName()); - this.requestor = createRequestor(protocol, tx); - } - @Override public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - return requestor.invoke(proxy, method, args); - } - public void close() throws IOException { - tx.close(); - } - } - - protected SpecificRequestor createRequestor(Class protocol, - Transceiver transeiver) throws IOException { - return new ReflectRequestor(protocol, transeiver); - } - - protected Responder createResponder(Class iface, Object impl) { - return new ReflectResponder(iface, impl); - } - - /** An Avro RPC Responder that can process requests passed via Hadoop RPC. */ - private class TunnelResponder implements TunnelProtocol { - private Map responders = - new HashMap(); - - public void addProtocol(Class iface, Object impl) { - responders.put(iface.getName(), createResponder(iface, impl)); - } - - @Override - public long getProtocolVersion(String protocol, long version) - throws IOException { - return VERSION; - } - - @Override - public ProtocolSignature getProtocolSignature( - String protocol, long version, int clientMethodsHashCode) - throws IOException { - return ProtocolSignature.getProtocolSignature - (clientMethodsHashCode, VERSION, TunnelProtocol.class); - } - - public BufferListWritable call(String protocol, BufferListWritable request) - throws IOException { - Responder responder = responders.get(protocol); - if (responder == null) - throw new IOException("No responder for: "+protocol); - return new BufferListWritable(responder.respond(request.buffers)); - } - - } - - public Object[] call(Method method, Object[][] params, - InetSocketAddress[] addrs, UserGroupInformation ticket, - Configuration conf) throws IOException { - throw new UnsupportedOperationException(); - } - - private class Server extends WritableRpcEngine.Server { - private TunnelResponder responder = new TunnelResponder(); - - public Server(Class iface, Object impl, String bindAddress, - int port, int numHandlers, int numReaders, - int queueSizePerHandler, boolean verbose, - Configuration conf, - SecretManager secretManager - ) throws IOException { - super((Class)null, new Object(), conf, - bindAddress, port, numHandlers, numReaders, - queueSizePerHandler, verbose, secretManager); - // RpcKind is WRITABLE since Avro is tunneled through WRITABLE - super.addProtocol(RpcKind.RPC_WRITABLE, TunnelProtocol.class, responder); - responder.addProtocol(iface, impl); - } - - - @Override - public Server - addProtocol(RpcKind rpcKind, Class protocolClass, Object protocolImpl) - throws IOException { - responder.addProtocol(protocolClass, protocolImpl); - return this; - } - } - - /** Construct a server for a protocol implementation instance listening on a - * port and address. */ - public RPC.Server getServer(Class iface, Object impl, String bindAddress, - int port, int numHandlers, int numReaders, - int queueSizePerHandler, boolean verbose, - Configuration conf, - SecretManager secretManager - ) throws IOException { - return new Server - (iface, impl, bindAddress, port, numHandlers, numReaders, - queueSizePerHandler, verbose, conf, secretManager); - } - -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java index 430e0a9dea..6e97159fb4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java @@ -56,9 +56,8 @@ static RpcPayloadOperation readFields(DataInput in) throws IOException { public enum RpcKind { RPC_BUILTIN ((short) 1), // Used for built in calls by tests RPC_WRITABLE ((short) 2), // Use WritableRpcEngine - RPC_PROTOCOL_BUFFER ((short) 3), // Use ProtobufRpcEngine - RPC_AVRO ((short) 4); // Use AvroRpcEngine - static final short MAX_INDEX = RPC_AVRO.value; // used for array size + RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine + final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size private static final short FIRST_INDEX = RPC_BUILTIN.value; private final short value; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 52ea35c522..b9220a6df5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -170,7 +170,7 @@ public static void registerProtocolEngine(RpcKind rpcKind, throw new IllegalArgumentException("ReRegistration of rpcKind: " + rpcKind); } - LOG.info("rpcKind=" + rpcKind + + LOG.debug("rpcKind=" + rpcKind + ", rpcRequestWrapperClass=" + rpcRequestWrapperClass + ", rpcInvoker=" + rpcInvoker); } diff --git a/hadoop-common-project/hadoop-common/src/test/avro/AvroSpecificTestProtocol.avpr b/hadoop-common-project/hadoop-common/src/test/avro/AvroSpecificTestProtocol.avpr deleted file mode 100644 index 18960c1de4..0000000000 --- a/hadoop-common-project/hadoop-common/src/test/avro/AvroSpecificTestProtocol.avpr +++ /dev/null @@ -1,42 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -{ - "protocol" : "AvroSpecificTestProtocol", - "namespace" : "org.apache.hadoop.ipc", - - "messages" : { - "echo" : { - "request" : [ { - "name" : "message", - "type" : "string" - } ], - "response" : "string" - }, - - "add" : { - "request" : [ { - "name" : "arg1", - "type" : "int" - }, { - "name" : "arg2", - "type" : "int", - "default" : 0 - } ], - "response" : "int" - } - } -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java similarity index 100% rename from hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestHarFileSystem.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAvroRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAvroRpc.java deleted file mode 100644 index 5ce3359428..0000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestAvroRpc.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ipc; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import javax.security.sasl.Sasl; - -import junit.framework.Assert; -import junit.framework.TestCase; - -import org.apache.avro.AvroRemoteException; -import org.apache.avro.util.Utf8; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; -import org.apache.hadoop.ipc.TestSaslRPC.CustomSecurityInfo; -import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier; -import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.SecurityInfo; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.fs.CommonConfigurationKeys; - -/** Unit tests for AvroRpc. */ -public class TestAvroRpc extends TestCase { - private static final String ADDRESS = "0.0.0.0"; - - public static final Log LOG = - LogFactory.getLog(TestAvroRpc.class); - - int datasize = 1024*100; - int numThreads = 50; - - public TestAvroRpc(String name) { super(name); } - - public static interface EmptyProtocol {} - public static class EmptyImpl implements EmptyProtocol {} - - public static class TestImpl implements AvroTestProtocol { - - public void ping() {} - - public String echo(String value) { return value; } - - public int add(int v1, int v2) { return v1 + v2; } - - public int error() throws Problem { - throw new Problem(); - } - } - - public void testReflect() throws Exception { - testReflect(false); - } - - public void testSecureReflect() throws Exception { - testReflect(true); - } - - public void testSpecific() throws Exception { - testSpecific(false); - } - - public void testSecureSpecific() throws Exception { - testSpecific(true); - } - - private void testReflect(boolean secure) throws Exception { - Configuration conf = new Configuration(); - TestTokenSecretManager sm = null; - if (secure) { - makeSecure(conf); - sm = new TestTokenSecretManager(); - } - UserGroupInformation.setConfiguration(conf); - RPC.setProtocolEngine(conf, EmptyProtocol.class, AvroRpcEngine.class); - RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class); - RPC.Server server = RPC.getServer(EmptyProtocol.class, new EmptyImpl(), - ADDRESS, 0, 5, true, conf, sm); - server.addProtocol(RpcKind.RPC_WRITABLE, - AvroTestProtocol.class, new TestImpl()); - - try { - server.start(); - InetSocketAddress addr = NetUtils.getConnectAddress(server); - - if (secure) { - addToken(sm, addr); - //QOP must be auth - Assert.assertEquals("auth", SaslRpcServer.SASL_PROPS.get(Sasl.QOP)); - } - - AvroTestProtocol proxy = - (AvroTestProtocol)RPC.getProxy(AvroTestProtocol.class, 0, addr, conf); - - proxy.ping(); - - String echo = proxy.echo("hello world"); - assertEquals("hello world", echo); - - int intResult = proxy.add(1, 2); - assertEquals(3, intResult); - - boolean caught = false; - try { - proxy.error(); - } catch (AvroRemoteException e) { - if(LOG.isDebugEnabled()) { - LOG.debug("Caught " + e); - } - caught = true; - } - assertTrue(caught); - - } finally { - resetSecurity(); - server.stop(); - } - } - - private void makeSecure(Configuration conf) { - conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - conf.set("hadoop.rpc.socket.factory.class.default", ""); - //Avro doesn't work with security annotations on protocol. - //Avro works ONLY with custom security context - SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo()); - } - - private void resetSecurity() { - SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]); - } - - private void addToken(TestTokenSecretManager sm, - InetSocketAddress addr) throws IOException { - final UserGroupInformation current = UserGroupInformation.getCurrentUser(); - - TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current - .getUserName())); - Token token = new Token(tokenId, - sm); - Text host = new Text(addr.getAddress().getHostAddress() + ":" - + addr.getPort()); - token.setService(host); - LOG.info("Service IP address for token is " + host); - current.addToken(token); - } - - private void testSpecific(boolean secure) throws Exception { - Configuration conf = new Configuration(); - TestTokenSecretManager sm = null; - if (secure) { - makeSecure(conf); - sm = new TestTokenSecretManager(); - } - UserGroupInformation.setConfiguration(conf); - RPC.setProtocolEngine(conf, AvroSpecificTestProtocol.class, - AvroSpecificRpcEngine.class); - Server server = RPC.getServer(AvroSpecificTestProtocol.class, - new AvroSpecificTestProtocolImpl(), ADDRESS, 0, 5, true, - conf, sm); - try { - server.start(); - InetSocketAddress addr = NetUtils.getConnectAddress(server); - - if (secure) { - addToken(sm, addr); - //QOP must be auth - Assert.assertEquals("auth", SaslRpcServer.SASL_PROPS.get(Sasl.QOP)); - } - - AvroSpecificTestProtocol proxy = - (AvroSpecificTestProtocol)RPC.getProxy(AvroSpecificTestProtocol.class, - 0, addr, conf); - - CharSequence echo = proxy.echo("hello world"); - assertEquals("hello world", echo.toString()); - - int intResult = proxy.add(1, 2); - assertEquals(3, intResult); - - } finally { - resetSecurity(); - server.stop(); - } - } - - public static class AvroSpecificTestProtocolImpl implements - AvroSpecificTestProtocol { - - @Override - public int add(int arg1, int arg2) throws AvroRemoteException { - return arg1 + arg2; - } - - @Override - public CharSequence echo(CharSequence msg) throws AvroRemoteException { - return msg; - } - - } - -} diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 8ae1563541..fbe56b6395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -269,6 +269,13 @@ + + + + org.apache.maven.plugins + maven-eclipse-plugin + 2.6 + org.apache.maven.plugins maven-surefire-plugin diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index 824edc3210..299d6f8634 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -30,6 +30,7 @@ hadoop-hdfs hadoop-hdfs-httpfs + hadoop-hdfs/src/contrib/bkjournal diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7edc98da35..6748d60a7f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -3,6 +3,7 @@ Hadoop MapReduce Change Log Trunk (unreleased changes) INCOMPATIBLE CHANGES + MAPREDUCE-3545. Remove Avro RPC. (suresh) NEW FEATURES @@ -77,6 +78,12 @@ Trunk (unreleased changes) MAPREDUCE-3389. MRApps loads the 'mrapp-generated-classpath' file with classpath from the build machine. (tucu) + MAPREDUCE-3544. gridmix build is broken, requires hadoop-archives to be added as + ivy dependency. (tucu) + + MAPREDUCE-3557. MR1 test fail to compile because of missing hadoop-archives dependency. + (tucu) + Release 0.23.1 - Unreleased INCOMPATIBLE CHANGES @@ -85,6 +92,9 @@ Release 0.23.1 - Unreleased MAPREDUCE-3121. NodeManager should handle disk-failures (Ravi Gummadi via mahadev) + MAPREDUCE-2863. Support web services for YARN and MR components. (Thomas + Graves via vinodkv) + IMPROVEMENTS MAPREDUCE-3297. Moved log related components into yarn-common so that @@ -276,6 +286,22 @@ Release 0.23.1 - Unreleased MAPREDUCE-3537. Fix race condition in DefaultContainerExecutor which led to container localization occuring in wrong directories. (acmurthy) + MAPREDUCE-3542. Support "FileSystemCounter" legacy counter group name for + compatibility. (tomwhite) + + MAPREDUCE-3426. Fixed MR AM in uber mode to write map intermediate outputs + in the correct directory to work properly in secure mode. (Hitesh Shah via + vinodkv) + + MAPREDUCE-3541. Fix broken TestJobQueueClient test. (Ravi Prakash via + mahadev) + + MAPREDUCE-3398. Fixed log aggregation to work correctly in secure mode. + (Siddharth Seth via vinodkv) + + MAPREDUCE-3530. Fixed an NPE occuring during scheduling in the + ResourceManager. (Arun C Murthy via vinodkv) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index d2400f053f..cb3e80b8b2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -22,20 +22,19 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; -import java.net.URI; import java.util.HashSet; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSError; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TypeConverter; @@ -47,13 +46,12 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.app.job.Task; -import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnException; +import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.service.AbstractService; /** @@ -80,7 +78,10 @@ public LocalContainerLauncher(AppContext context, super(LocalContainerLauncher.class.getName()); this.context = context; this.umbilical = umbilical; - // umbilical: MRAppMaster creates (taskAttemptListener), passes to us (TODO/FIXME: pointless to use RPC to talk to self; should create LocalTaskAttemptListener or similar: implement umbilical protocol but skip RPC stuff) + // umbilical: MRAppMaster creates (taskAttemptListener), passes to us + // (TODO/FIXME: pointless to use RPC to talk to self; should create + // LocalTaskAttemptListener or similar: implement umbilical protocol + // but skip RPC stuff) try { curFC = FileContext.getFileContext(curDir.toURI()); @@ -152,7 +153,6 @@ public void handle(ContainerLauncherEvent event) { * ]] * - runs Task (runSubMap() or runSubReduce()) * - TA can safely send TA_UPDATE since in RUNNING state - * [modulo possible TA-state-machine race noted below: CHECK (TODO)] */ private class SubtaskRunner implements Runnable { @@ -162,6 +162,7 @@ private class SubtaskRunner implements Runnable { SubtaskRunner() { } + @SuppressWarnings("unchecked") @Override public void run() { ContainerLauncherEvent event = null; @@ -183,7 +184,7 @@ public void run() { ContainerRemoteLaunchEvent launchEv = (ContainerRemoteLaunchEvent)event; - TaskAttemptId attemptID = launchEv.getTaskAttemptID(); //FIXME: can attemptID ever be null? (only if retrieved over umbilical?) + TaskAttemptId attemptID = launchEv.getTaskAttemptID(); Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId()); int numMapTasks = job.getTotalMaps(); @@ -204,7 +205,6 @@ public void run() { // port number is set to -1 in this case. context.getEventHandler().handle( new TaskAttemptContainerLaunchedEvent(attemptID, -1)); - //FIXME: race condition here? or do we have same kind of lock on TA handler => MapTask can't send TA_UPDATE before TA_CONTAINER_LAUNCHED moves TA to RUNNING state? (probably latter) if (numMapTasks == 0) { doneWithMaps = true; @@ -259,6 +259,7 @@ public void run() { } } + @SuppressWarnings("deprecation") private void runSubtask(org.apache.hadoop.mapred.Task task, final TaskType taskType, TaskAttemptId attemptID, @@ -270,6 +271,19 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, try { JobConf conf = new JobConf(getConfig()); + conf.set(JobContext.TASK_ID, task.getTaskID().toString()); + conf.set(JobContext.TASK_ATTEMPT_ID, classicAttemptID.toString()); + conf.setBoolean(JobContext.TASK_ISMAP, (taskType == TaskType.MAP)); + conf.setInt(JobContext.TASK_PARTITION, task.getPartition()); + conf.set(JobContext.ID, task.getJobID().toString()); + + // Use the AM's local dir env to generate the intermediate step + // output files + String[] localSysDirs = StringUtils.getTrimmedStrings( + System.getenv(ApplicationConstants.LOCAL_DIR_ENV)); + conf.setStrings(MRConfig.LOCAL_DIR, localSysDirs); + LOG.info(MRConfig.LOCAL_DIR + " for uber task: " + + conf.get(MRConfig.LOCAL_DIR)); // mark this as an uberized subtask so it can set task counter // (longer-term/FIXME: could redefine as job counter and send @@ -285,12 +299,12 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, if (doneWithMaps) { LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task (" + attemptID + "), but should be finished with maps"); - // throw new RuntimeException() (FIXME: what's appropriate here?) + throw new RuntimeException(); } MapTask map = (MapTask)task; + map.setConf(conf); - //CODE-REVIEWER QUESTION: why not task.getConf() or map.getConf() instead of conf? do we need Task's localizeConfiguration() run on this first? map.run(conf, umbilical); if (renameOutputs) { @@ -305,19 +319,23 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, } else /* TaskType.REDUCE */ { if (!doneWithMaps) { - //check if event-queue empty? whole idea of counting maps vs. checking event queue is a tad wacky...but could enforce ordering (assuming no "lost events") at LocalMRAppMaster [CURRENT BUG(?): doesn't send reduce event until maps all done] + // check if event-queue empty? whole idea of counting maps vs. + // checking event queue is a tad wacky...but could enforce ordering + // (assuming no "lost events") at LocalMRAppMaster [CURRENT BUG(?): + // doesn't send reduce event until maps all done] LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task (" + attemptID + "), but not yet finished with maps"); - // throw new RuntimeException() (FIXME) // or push reduce event back onto end of queue? (probably former) + throw new RuntimeException(); } - ReduceTask reduce = (ReduceTask)task; - // a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner: // set framework name to local to make task local conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME); conf.set(MRConfig.MASTER_ADDRESS, "local"); // bypass shuffle + ReduceTask reduce = (ReduceTask)task; + reduce.setConf(conf); + reduce.run(conf, umbilical); //relocalize(); // needed only if more than one reducer supported (is MAPREDUCE-434 fixed yet?) } @@ -334,18 +352,7 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, try { if (task != null) { // do cleanup for the task -// if (childUGI == null) { // no need to job into doAs block - task.taskCleanup(umbilical); -// } else { -// final Task taskFinal = task; -// childUGI.doAs(new PrivilegedExceptionAction() { -// @Override -// public Object run() throws Exception { -// taskFinal.taskCleanup(umbilical); -// return null; -// } -// }); -// } + task.taskCleanup(umbilical); } } catch (Exception e) { LOG.info("Exception cleaning up: " @@ -354,51 +361,21 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, // Report back any failures, for diagnostic purposes ByteArrayOutputStream baos = new ByteArrayOutputStream(); exception.printStackTrace(new PrintStream(baos)); -// if (classicAttemptID != null) { - umbilical.reportDiagnosticInfo(classicAttemptID, baos.toString()); -// } + umbilical.reportDiagnosticInfo(classicAttemptID, baos.toString()); throw new RuntimeException(); } catch (Throwable throwable) { LOG.fatal("Error running local (uberized) 'child' : " + StringUtils.stringifyException(throwable)); -// if (classicAttemptID != null) { - Throwable tCause = throwable.getCause(); - String cause = (tCause == null) - ? throwable.getMessage() - : StringUtils.stringifyException(tCause); - umbilical.fatalError(classicAttemptID, cause); -// } + Throwable tCause = throwable.getCause(); + String cause = (tCause == null) + ? throwable.getMessage() + : StringUtils.stringifyException(tCause); + umbilical.fatalError(classicAttemptID, cause); throw new RuntimeException(); - - } finally { -/* -FIXME: do we need to do any of this stuff? (guessing not since not in own JVM) - RPC.stopProxy(umbilical); - DefaultMetricsSystem.shutdown(); - // Shutting down log4j of the child-vm... - // This assumes that on return from Task.run() - // there is no more logging done. - LogManager.shutdown(); - */ } } - -/* FIXME: may not need renameMapOutputForReduce() anymore? TEST! - -${local.dir}/usercache/$user/appcache/$appId/$contId/ == $cwd for containers; -contains launch_container.sh script, which, when executed, creates symlinks and -sets up env - "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out - "$local.dir"/usercache/$user/appcache/$appId/$contId/file.out.idx (?) - "$local.dir"/usercache/$user/appcache/$appId/output/$taskId/ is where file.out* is moved after MapTask done - - OHO! no further need for this at all? $taskId is unique per subtask - now => should work fine to leave alone. TODO: test with teragen or - similar - */ - /** * Within the _local_ filesystem (not HDFS), all activity takes place within * a single subdir (${local.dir}/usercache/$user/appcache/$appId/$contId/), @@ -409,14 +386,21 @@ private void runSubtask(org.apache.hadoop.mapred.Task task, * filenames instead of "file.out". (All of this is entirely internal, * so there are no particular compatibility issues.) */ + @SuppressWarnings("deprecation") private void renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); // move map output to reduce input Path mapOut = subMapOutputFile.getOutputFile(); + FileStatus mStatus = localFs.getFileStatus(mapOut); Path reduceIn = subMapOutputFile.getInputFileForWrite( - TypeConverter.fromYarn(mapId).getTaskID(), localFs.getLength(mapOut)); + TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen()); + if (LOG.isDebugEnabled()) { + LOG.debug("Renaming map output file for task attempt " + + mapId.toString() + " from original location " + mapOut.toString() + + " to destination " + reduceIn.toString()); + } if (!localFs.mkdirs(reduceIn.getParent())) { throw new IOException("Mkdirs failed to create " + reduceIn.getParent().toString()); @@ -429,8 +413,7 @@ private void renameMapOutputForReduce(JobConf conf, TaskAttemptId mapId, * Also within the local filesystem, we need to restore the initial state * of the directory as much as possible. Compare current contents against * the saved original state and nuke everything that doesn't belong, with - * the exception of the renamed map outputs (see above). -FIXME: do we really need to worry about renamed map outputs, or already moved to output dir on commit? if latter, fix comment + * the exception of the renamed map outputs. * * Any jobs that go out of their way to rename or delete things from the * local directory are considered broken and deserve what they get... diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java index 689671a6fe..de77711956 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java @@ -236,6 +236,13 @@ public static List getVMCommand( getTaskLogFile(TaskLog.LogName.PROFILE) ) ); + if (task.isMapTask()) { + vargs.add(conf.get(MRJobConfig.TASK_MAP_PROFILE_PARAMS, "")); + } + else { + vargs.add(conf.get(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "")); + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 800dfa9d36..f9de4bc503 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -156,6 +156,7 @@ public class MRAppMaster extends CompositeService { private OutputCommitter committer; private JobEventDispatcher jobEventDispatcher; private boolean inRecovery = false; + private SpeculatorEventDispatcher speculatorEventDispatcher; private Job job; private Credentials fsTokens = new Credentials(); // Filled during init @@ -265,8 +266,9 @@ public void init(final Configuration conf) { addIfService(speculator); } + speculatorEventDispatcher = new SpeculatorEventDispatcher(conf); dispatcher.register(Speculator.EventType.class, - new SpeculatorEventDispatcher(conf)); + speculatorEventDispatcher); // service to allocate containers from RM (if non-uber) or to fake it (uber) containerAllocator = createContainerAllocator(clientService, context); @@ -386,7 +388,7 @@ public void handle(JobFinishEvent event) { // This will also send the final report to the ResourceManager LOG.info("Calling stop for all the services"); stop(); - + // Send job-end notification try { LOG.info("Job end notification started for jobID : " @@ -401,14 +403,14 @@ public void handle(JobFinishEvent event) { } catch (Throwable t) { LOG.warn("Graceful stop failed ", t); } - + // Cleanup staging directory try { cleanupStagingDir(); } catch(IOException io) { LOG.warn("Failed to delete staging dir"); } - + //Bring the process down by force. //Not needed after HADOOP-7140 LOG.info("Exiting MR AppMaster..GoodBye!"); @@ -790,10 +792,6 @@ public void start() { // job-init to be done completely here. jobEventDispatcher.handle(initJobEvent); - // send init to speculator. This won't yest start as dispatcher isn't - // started yet. - dispatcher.getEventHandler().handle( - new SpeculatorEvent(job.getID(), clock.getTime())); // JobImpl's InitTransition is done (call above is synchronous), so the // "uber-decision" (MR-1220) has been made. Query job and switch to @@ -801,9 +799,15 @@ public void start() { // and container-launcher services/event-handlers). if (job.isUber()) { + speculatorEventDispatcher.disableSpeculation(); LOG.info("MRAppMaster uberizing job " + job.getID() - + " in local container (\"uber-AM\")."); + + " in local container (\"uber-AM\") on node " + + nmHost + ":" + nmPort + "."); } else { + // send init to speculator only for non-uber jobs. + // This won't yet start as dispatcher isn't started yet. + dispatcher.getEventHandler().handle( + new SpeculatorEvent(job.getID(), clock.getTime())); LOG.info("MRAppMaster launching normal, non-uberized, multi-container " + "job " + job.getID() + "."); } @@ -865,17 +869,24 @@ public void handle(TaskAttemptEvent event) { private class SpeculatorEventDispatcher implements EventHandler { private final Configuration conf; + private volatile boolean disabled; public SpeculatorEventDispatcher(Configuration config) { this.conf = config; } @Override public void handle(SpeculatorEvent event) { - if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) - || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) { + if (!disabled && + (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) + || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false))) { // Speculator IS enabled, direct the event to there. speculator.handle(event); } } + + public void disableSpeculation() { + disabled = true; + } + } private static void validateInputParam(String value, String param) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 0c27d23dc7..2d787a6776 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -151,7 +151,7 @@ public void start() { + ":" + server.getPort()); LOG.info("Instantiated MRClientService at " + this.bindAddress); try { - webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf). + webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").with(conf). start(new AMWebApp()); } catch (Exception e) { LOG.error("Webapps failed to start. Ignoring for now:", e); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index dd19ed07b9..9291075e6d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -54,6 +54,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent; import org.apache.hadoop.mapreduce.lib.chain.ChainMapper; import org.apache.hadoop.mapreduce.lib.chain.ChainReducer; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; @@ -583,13 +584,13 @@ public JobReport getReport() { if (getState() == JobState.NEW) { return MRBuilderUtils.newJobReport(jobId, jobName, username, state, appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f, - cleanupProgress, remoteJobConfFile.toString(), amInfos); + cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber); } return MRBuilderUtils.newJobReport(jobId, jobName, username, state, appSubmitTime, startTime, finishTime, setupProgress, computeProgress(mapTasks), computeProgress(reduceTasks), - cleanupProgress, remoteJobConfFile.toString(), amInfos); + cleanupProgress, remoteJobConfFile.toString(), amInfos, isUber); } finally { readLock.unlock(); } @@ -812,6 +813,129 @@ public List getAMInfos() { return amInfos; } + /** + * Decide whether job can be run in uber mode based on various criteria. + * @param dataInputLength Total length for all splits + */ + private void makeUberDecision(long dataInputLength) { + //FIXME: need new memory criterion for uber-decision (oops, too late here; + // until AM-resizing supported, + // must depend on job client to pass fat-slot needs) + // these are no longer "system" settings, necessarily; user may override + int sysMaxMaps = conf.getInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 9); + + //FIXME: handling multiple reduces within a single AM does not seem to + //work. + // int sysMaxReduces = + // job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1); + int sysMaxReduces = 1; + + long sysMaxBytes = conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES, + conf.getLong("dfs.block.size", 64*1024*1024)); //FIXME: this is + // wrong; get FS from [File?]InputFormat and default block size from that + + long sysMemSizeForUberSlot = + conf.getInt(MRJobConfig.MR_AM_VMEM_MB, + MRJobConfig.DEFAULT_MR_AM_VMEM_MB); + + boolean uberEnabled = + conf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + boolean smallNumMapTasks = (numMapTasks <= sysMaxMaps); + boolean smallNumReduceTasks = (numReduceTasks <= sysMaxReduces); + boolean smallInput = (dataInputLength <= sysMaxBytes); + // ignoring overhead due to UberAM and statics as negligible here: + boolean smallMemory = + ( (Math.max(conf.getLong(MRJobConfig.MAP_MEMORY_MB, 0), + conf.getLong(MRJobConfig.REDUCE_MEMORY_MB, 0)) + <= sysMemSizeForUberSlot) + || (sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT)); + boolean notChainJob = !isChainJob(conf); + + // User has overall veto power over uberization, or user can modify + // limits (overriding system settings and potentially shooting + // themselves in the head). Note that ChainMapper/Reducer are + // fundamentally incompatible with MR-1220; they employ a blocking + // queue between the maps/reduces and thus require parallel execution, + // while "uber-AM" (MR AM + LocalContainerLauncher) loops over tasks + // and thus requires sequential execution. + isUber = uberEnabled && smallNumMapTasks && smallNumReduceTasks + && smallInput && smallMemory && notChainJob; + + if (isUber) { + LOG.info("Uberizing job " + jobId + ": " + numMapTasks + "m+" + + numReduceTasks + "r tasks (" + dataInputLength + + " input bytes) will run sequentially on single node."); + + // make sure reduces are scheduled only after all map are completed + conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, + 1.0f); + // uber-subtask attempts all get launched on same node; if one fails, + // probably should retry elsewhere, i.e., move entire uber-AM: ergo, + // limit attempts to 1 (or at most 2? probably not...) + conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1); + conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1); + + // disable speculation + conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); + conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); + } else { + StringBuilder msg = new StringBuilder(); + msg.append("Not uberizing ").append(jobId).append(" because:"); + if (!uberEnabled) + msg.append(" not enabled;"); + if (!smallNumMapTasks) + msg.append(" too many maps;"); + if (!smallNumReduceTasks) + msg.append(" too many reduces;"); + if (!smallInput) + msg.append(" too much input;"); + if (!smallMemory) + msg.append(" too much RAM;"); + if (!notChainJob) + msg.append(" chainjob"); + LOG.info(msg.toString()); + } + } + + /** + * ChainMapper and ChainReducer must execute in parallel, so they're not + * compatible with uberization/LocalContainerLauncher (100% sequential). + */ + private boolean isChainJob(Configuration conf) { + boolean isChainJob = false; + try { + String mapClassName = conf.get(MRJobConfig.MAP_CLASS_ATTR); + if (mapClassName != null) { + Class mapClass = Class.forName(mapClassName); + if (ChainMapper.class.isAssignableFrom(mapClass)) + isChainJob = true; + } + } catch (ClassNotFoundException cnfe) { + // don't care; assume it's not derived from ChainMapper + } + try { + String reduceClassName = conf.get(MRJobConfig.REDUCE_CLASS_ATTR); + if (reduceClassName != null) { + Class reduceClass = Class.forName(reduceClassName); + if (ChainReducer.class.isAssignableFrom(reduceClass)) + isChainJob = true; + } + } catch (ClassNotFoundException cnfe) { + // don't care; assume it's not derived from ChainReducer + } + return isChainJob; + } + + /* + private int getBlockSize() { + String inputClassName = conf.get(MRJobConfig.INPUT_FORMAT_CLASS_ATTR); + if (inputClassName != null) { + Class inputClass - Class.forName(inputClassName); + if (FileInputFormat) + } + } + */ + public static class InitTransition implements MultipleArcTransition { @@ -863,81 +987,8 @@ public JobState transition(JobImpl job, JobEvent event) { inputLength += taskSplitMetaInfo[i].getInputDataLength(); } - //FIXME: need new memory criterion for uber-decision (oops, too late here; - // until AM-resizing supported, must depend on job client to pass fat-slot needs) - // these are no longer "system" settings, necessarily; user may override - int sysMaxMaps = job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 9); - int sysMaxReduces = - job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1); - long sysMaxBytes = job.conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES, - job.conf.getLong("dfs.block.size", 64*1024*1024)); //FIXME: this is - // wrong; get FS from [File?]InputFormat and default block size from that - //long sysMemSizeForUberSlot = JobTracker.getMemSizeForReduceSlot(); - // FIXME [could use default AM-container memory size...] - - boolean uberEnabled = - job.conf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); - boolean smallNumMapTasks = (job.numMapTasks <= sysMaxMaps); - boolean smallNumReduceTasks = (job.numReduceTasks <= sysMaxReduces); - boolean smallInput = (inputLength <= sysMaxBytes); - boolean smallMemory = true; //FIXME (see above) - // ignoring overhead due to UberTask and statics as negligible here: - // FIXME && (Math.max(memoryPerMap, memoryPerReduce) <= sysMemSizeForUberSlot - // || sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT) - boolean notChainJob = !isChainJob(job.conf); - - // User has overall veto power over uberization, or user can modify - // limits (overriding system settings and potentially shooting - // themselves in the head). Note that ChainMapper/Reducer are - // fundamentally incompatible with MR-1220; they employ a blocking - - // User has overall veto power over uberization, or user can modify - // limits (overriding system settings and potentially shooting - // themselves in the head). Note that ChainMapper/Reducer are - // fundamentally incompatible with MR-1220; they employ a blocking - // queue between the maps/reduces and thus require parallel execution, - // while "uber-AM" (MR AM + LocalContainerLauncher) loops over tasks - // and thus requires sequential execution. - job.isUber = uberEnabled && smallNumMapTasks && smallNumReduceTasks - && smallInput && smallMemory && notChainJob; - - if (job.isUber) { - LOG.info("Uberizing job " + job.jobId + ": " + job.numMapTasks + "m+" - + job.numReduceTasks + "r tasks (" + inputLength - + " input bytes) will run sequentially on single node."); - //TODO: also note which node? - - // make sure reduces are scheduled only after all map are completed - job.conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, - 1.0f); - // uber-subtask attempts all get launched on same node; if one fails, - // probably should retry elsewhere, i.e., move entire uber-AM: ergo, - // limit attempts to 1 (or at most 2? probably not...) - job.conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1); - job.conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1); - - // disable speculation: makes no sense to speculate an entire job - //canSpeculateMaps = canSpeculateReduces = false; // [TODO: in old - //version, ultimately was from conf.getMapSpeculativeExecution(), - //conf.getReduceSpeculativeExecution()] - } else { - StringBuilder msg = new StringBuilder(); - msg.append("Not uberizing ").append(job.jobId).append(" because:"); - if (!uberEnabled) - msg.append(" not enabled;"); - if (!smallNumMapTasks) - msg.append(" too many maps;"); - if (!smallNumReduceTasks) - msg.append(" too many reduces;"); - if (!smallInput) - msg.append(" too much input;"); - if (!smallMemory) - msg.append(" too much RAM;"); - if (!notChainJob) - msg.append(" chainjob"); - LOG.info(msg.toString()); - } - + job.makeUberDecision(inputLength); + job.taskAttemptCompletionEvents = new ArrayList( job.numMapTasks + job.numReduceTasks + 10); @@ -1008,35 +1059,6 @@ protected void setup(JobImpl job) throws IOException { } } - /** - * ChainMapper and ChainReducer must execute in parallel, so they're not - * compatible with uberization/LocalContainerLauncher (100% sequential). - */ - boolean isChainJob(Configuration conf) { - boolean isChainJob = false; - try { - String mapClassName = conf.get(MRJobConfig.MAP_CLASS_ATTR); - if (mapClassName != null) { - Class mapClass = Class.forName(mapClassName); - if (ChainMapper.class.isAssignableFrom(mapClass)) - isChainJob = true; - } - } catch (ClassNotFoundException cnfe) { - // don't care; assume it's not derived from ChainMapper - } - try { - String reduceClassName = conf.get(MRJobConfig.REDUCE_CLASS_ATTR); - if (reduceClassName != null) { - Class reduceClass = Class.forName(reduceClassName); - if (ChainReducer.class.isAssignableFrom(reduceClass)) - isChainJob = true; - } - } catch (ClassNotFoundException cnfe) { - // don't care; assume it's not derived from ChainReducer - } - return isChainJob; - } - private void createMapTasks(JobImpl job, long inputLength, TaskSplitMetaInfo[] splits) { for (int i=0; i < job.numMapTasks; ++i) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java index f0ce272bb8..f9e58b7ecc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java @@ -60,8 +60,8 @@ public class LocalContainerAllocator extends RMCommunicator private static final Log LOG = LogFactory.getLog(LocalContainerAllocator.class); + @SuppressWarnings("rawtypes") private final EventHandler eventHandler; -// private final ApplicationId appID; private AtomicInteger containerCount = new AtomicInteger(); private long retryInterval; private long retrystartTime; @@ -73,8 +73,6 @@ public LocalContainerAllocator(ClientService clientService, AppContext context) { super(clientService, context); this.eventHandler = context.getEventHandler(); -// this.appID = context.getApplicationID(); - } @Override @@ -88,6 +86,7 @@ public void init(Configuration conf) { retrystartTime = System.currentTimeMillis(); } + @SuppressWarnings("unchecked") @Override protected synchronized void heartbeat() throws Exception { AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest( @@ -124,6 +123,7 @@ protected synchronized void heartbeat() throws Exception { } } + @SuppressWarnings("unchecked") @Override public void handle(ContainerAllocatorEvent event) { if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 68d9c2462b..5028355acf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.security.PrivilegedAction; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -65,7 +66,7 @@ public abstract class RMCommunicator extends AbstractService { private int rmPollInterval;//millis protected ApplicationId applicationId; protected ApplicationAttemptId applicationAttemptId; - private volatile boolean stopped; + private AtomicBoolean stopped; protected Thread allocatorThread; protected EventHandler eventHandler; protected AMRMProtocol scheduler; @@ -88,6 +89,7 @@ public RMCommunicator(ClientService clientService, AppContext context) { this.eventHandler = context.getEventHandler(); this.applicationId = context.getApplicationID(); this.applicationAttemptId = context.getApplicationAttemptId(); + this.stopped = new AtomicBoolean(false); } @Override @@ -213,7 +215,10 @@ protected Resource getMaxContainerCapability() { @Override public void stop() { - stopped = true; + if (stopped.getAndSet(true)) { + // return if already stopped + return; + } allocatorThread.interrupt(); try { allocatorThread.join(); @@ -228,7 +233,7 @@ protected void startAllocatorThread() { allocatorThread = new Thread(new Runnable() { @Override public void run() { - while (!stopped && !Thread.currentThread().isInterrupted()) { + while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { Thread.sleep(rmPollInterval); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java index 55601180a3..e3b62c71ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java @@ -18,8 +18,9 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; -import static org.apache.hadoop.yarn.util.StringHelper.*; +import static org.apache.hadoop.yarn.util.StringHelper.pajoin; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; /** @@ -29,6 +30,9 @@ public class AMWebApp extends WebApp implements AMParams { @Override public void setup() { + bind(JAXBContextResolver.class); + bind(GenericExceptionHandler.class); + bind(AMWebServices.class); route("/", AppController.class); route("/app", AppController.class); route(pajoin("/job", JOB_ID), AppController.class, "job"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java new file mode 100644 index 0000000000..72ee762fc6 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java @@ -0,0 +1,362 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.webapp; + +import java.io.IOException; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response.Status; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.v2.api.records.JobId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.YarnException; +import org.apache.hadoop.yarn.webapp.BadRequestException; +import org.apache.hadoop.yarn.webapp.NotFoundException; + +import com.google.inject.Inject; + +@Path("/ws/v1/mapreduce") +public class AMWebServices { + private final AppContext appCtx; + private final App app; + private final Configuration conf; + + @Inject + public AMWebServices(final App app, final AppContext context, + final Configuration conf) { + this.appCtx = context; + this.app = app; + this.conf = conf; + } + + Boolean hasAccess(Job job, HttpServletRequest request) { + UserGroupInformation callerUgi = UserGroupInformation + .createRemoteUser(request.getRemoteUser()); + if (!job.checkAccess(callerUgi, JobACL.VIEW_JOB)) { + return false; + } + return true; + } + + /** + * check for job access. + * + * @param job + * the job that is being accessed + */ + void checkAccess(Job job, HttpServletRequest request) { + if (!hasAccess(job, request)) { + throw new WebApplicationException(Status.UNAUTHORIZED); + } + } + + @GET + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppInfo get() { + return getAppInfo(); + } + + @GET + @Path("/info") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppInfo getAppInfo() { + return new AppInfo(this.app, this.app.context); + } + + @GET + @Path("/jobs") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobsInfo getJobs(@Context HttpServletRequest hsr) { + JobsInfo allJobs = new JobsInfo(); + for (Job job : appCtx.getAllJobs().values()) { + // getAllJobs only gives you a partial we want a full + Job fullJob = appCtx.getJob(job.getID()); + if (fullJob == null) { + continue; + } + allJobs.add(new JobInfo(fullJob, hasAccess(fullJob, hsr))); + } + return allJobs; + } + + @GET + @Path("/jobs/{jobid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobInfo getJob(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + return new JobInfo(job, hasAccess(job, hsr)); + + } + + @GET + @Path("/jobs/{jobid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + return new JobCounterInfo(this.appCtx, job); + } + + @GET + @Path("/jobs/{jobid}/tasks/{taskid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobTaskCounterInfo getSingleTaskCounters( + @Context HttpServletRequest hsr, @PathParam("jobid") String jid, + @PathParam("taskid") String tid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = this.appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + return new JobTaskCounterInfo(task); + } + + @GET + @Path("/jobs/{jobid}/conf") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ConfInfo getJobConf(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + ConfInfo info; + try { + info = new ConfInfo(job, this.conf); + } catch (IOException e) { + throw new NotFoundException("unable to load configuration for job: " + jid); + } + return info; + } + + @GET + @Path("/jobs/{jobid}/tasks") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TasksInfo getJobTasks(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid, @QueryParam("type") String type) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TasksInfo allTasks = new TasksInfo(); + for (Task task : job.getTasks().values()) { + TaskType ttype = null; + if (type != null && !type.isEmpty()) { + try { + ttype = MRApps.taskType(type); + } catch (YarnException e) { + throw new BadRequestException("tasktype must be either m or r"); } + } + if (ttype != null && task.getType() != ttype) { + continue; + } + allTasks.add(new TaskInfo(task)); + } + return allTasks; + } + + @GET + @Path("/jobs/{jobid}/tasks/{taskid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskInfo getJobTask(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid, @PathParam("taskid") String tid) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + return new TaskInfo(task); + + } + + @GET + @Path("/jobs/{jobid}/tasks/{taskid}/attempts") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid, @PathParam("taskid") String tid) { + TaskAttemptsInfo attempts = new TaskAttemptsInfo(); + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + for (TaskAttempt ta : task.getAttempts().values()) { + if (ta != null) { + if (task.getType() == TaskType.REDUCE) { + attempts.add(new ReduceTaskAttemptInfo(ta, task.getType())); + } else { + attempts.add(new TaskAttemptInfo(ta, task.getType(), true)); + } + } + } + return attempts; + } + + @GET + @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, + @PathParam("jobid") String jid, @PathParam("taskid") String tid, + @PathParam("attemptid") String attId) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId); + if (attemptId == null) { + throw new NotFoundException("task attempt id " + attId + + " not found or invalid"); + } + TaskAttempt ta = task.getAttempt(attemptId); + if (ta == null) { + throw new NotFoundException("Error getting info on task attempt id " + + attId); + } + if (task.getType() == TaskType.REDUCE) { + return new ReduceTaskAttemptInfo(ta, task.getType()); + } else { + return new TaskAttemptInfo(ta, task.getType(), true); + } + } + + @GET + @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters( + @Context HttpServletRequest hsr, @PathParam("jobid") String jid, + @PathParam("taskid") String tid, @PathParam("attemptid") String attId) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = this.appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + checkAccess(job, hsr); + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId); + if (attemptId == null) { + throw new NotFoundException("task attempt id " + attId + + " not found or invalid"); + } + TaskAttempt ta = task.getAttempt(attemptId); + if (ta == null) { + throw new NotFoundException("Error getting info on task attempt id " + + attId); + } + return new JobTaskAttemptCounterInfo(ta); + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java index aa0d89c034..45ad63f0fb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java @@ -32,6 +32,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -72,13 +73,14 @@ protected AppController(App app, Configuration conf, RequestContext ctx) { * Render the /info page with an overview of current application. */ public void info() { + AppInfo info = new AppInfo(app, app.context); info("Application Master Overview"). - _("Application ID:", $(APP_ID)). - _("Application Name:", app.context.getApplicationName()). - _("User:", app.context.getUser()). - _("Started on:", Times.format(app.context.getStartTime())). + _("Application ID:", info.getId()). + _("Application Name:", info.getName()). + _("User:", info.getUser()). + _("Started on:", Times.format(info.getStartTime())). _("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime( - Times.elapsed(app.context.getStartTime(), 0))); + info.getElapsedTime() )); render(InfoPage.class); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java index edd1d9746e..303c8a3b00 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java @@ -22,14 +22,14 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.io.IOException; -import java.util.Map; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; @@ -71,11 +71,8 @@ public class ConfBlock extends HtmlBlock { } Path confPath = job.getConfFile(); try { - //Read in the configuration file and put it in a key/value table. - FileContext fc = FileContext.getFileContext(confPath.toUri(), conf); - Configuration jobConf = new Configuration(false); - jobConf.addResource(fc.open(confPath)); - + ConfInfo info = new ConfInfo(job, this.conf); + html.div().h3(confPath.toString())._(); TBODY> tbody = html. // Tasks table @@ -87,10 +84,10 @@ public class ConfBlock extends HtmlBlock { _(). _(). tbody(); - for(Map.Entry entry : jobConf) { + for (ConfEntryInfo entry : info.getProperties()) { tbody. tr(). - td(entry.getKey()). + td(entry.getName()). td(entry.getValue()). _(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java new file mode 100644 index 0000000000..ec1c151d8b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java @@ -0,0 +1,77 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.mapreduce.v2.app.webapp; + +import java.util.Set; +import java.util.HashSet; +import java.util.Arrays; + +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.google.inject.Singleton; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; + +@Singleton +@Provider +public class JAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = {AppInfo.class, CounterInfo.class, + JobTaskAttemptCounterInfo.class, JobTaskCounterInfo.class, + TaskCounterGroupInfo.class, ConfInfo.class, JobCounterInfo.class, + TaskCounterInfo.class, CounterGroupInfo.class, JobInfo.class, + JobsInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class, + TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class, + ConfEntryInfo.class}; + + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + this.context = new JSONJAXBContext(JSONConfiguration.natural(). + rootUnwrapping(false).build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java index 4969a76cb4..fec0a50c5c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java @@ -18,47 +18,32 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; -import com.google.inject.Inject; +import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; + import java.util.Date; -import java.util.Map; import org.apache.hadoop.mapreduce.v2.api.records.JobId; -import org.apache.hadoop.mapreduce.v2.api.records.JobReport; -import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; -import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.app.job.Task; -import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; -import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*; -import static org.apache.hadoop.yarn.util.StringHelper.*; -import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; + +import com.google.inject.Inject; public class JobBlock extends HtmlBlock { final AppContext appContext; - int runningMapTasks = 0; - int pendingMapTasks = 0; - int runningReduceTasks = 0; - int pendingReduceTasks = 0; - - int newMapAttempts = 0; - int runningMapAttempts = 0; - int killedMapAttempts = 0; - int failedMapAttempts = 0; - int successfulMapAttempts = 0; - int newReduceAttempts = 0; - int runningReduceAttempts = 0; - int killedReduceAttempts = 0; - int failedReduceAttempts = 0; - int successfulReduceAttempts = 0; - @Inject JobBlock(AppContext appctx) { appContext = appctx; } @@ -77,23 +62,13 @@ public class JobBlock extends HtmlBlock { p()._("Sorry, ", jid, " not found.")._(); return; } - JobReport jobReport = job.getReport(); - String mapPct = percent(jobReport.getMapProgress()); - String reducePct = percent(jobReport.getReduceProgress()); - int mapTasks = job.getTotalMaps(); - int mapTasksComplete = job.getCompletedMaps(); - int reduceTasks = job.getTotalReduces(); - int reducesTasksComplete = job.getCompletedReduces(); - long startTime = jobReport.getStartTime(); - long finishTime = jobReport.getFinishTime(); - countTasksAndAttempts(job); + JobInfo jinfo = new JobInfo(job, true); info("Job Overview"). - _("Job Name:", job.getName()). - _("State:", job.getState()). - _("Uberized:", job.isUber()). - _("Started:", new Date(startTime)). - _("Elapsed:", StringUtils.formatTime( - Times.elapsed(startTime, finishTime))); + _("Job Name:", jinfo.getName()). + _("State:", jinfo.getState()). + _("Uberized:", jinfo.isUberized()). + _("Started:", new Date(jinfo.getStartTime())). + _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime())); html. _(InfoBlock.class). div(_INFO_WRAP). @@ -112,25 +87,25 @@ public class JobBlock extends HtmlBlock { a(url("tasks", jid, "m"), "Map")._(). td(). div(_PROGRESSBAR). - $title(join(mapPct, '%')). // tooltip + $title(join(jinfo.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", mapPct, '%'))._()._()._(). - td(String.valueOf(mapTasks)). - td(String.valueOf(pendingMapTasks)). - td(String.valueOf(runningMapTasks)). - td(String.valueOf(mapTasksComplete))._(). + $style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._(). + td(String.valueOf(jinfo.getMapsTotal())). + td(String.valueOf(jinfo.getMapsPending())). + td(String.valueOf(jinfo.getMapsRunning())). + td(String.valueOf(jinfo.getMapsCompleted()))._(). tr(_EVEN). th(). a(url("tasks", jid, "r"), "Reduce")._(). td(). div(_PROGRESSBAR). - $title(join(reducePct, '%')). // tooltip + $title(join(jinfo.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", reducePct, '%'))._()._()._(). - td(String.valueOf(reduceTasks)). - td(String.valueOf(pendingReduceTasks)). - td(String.valueOf(runningReduceTasks)). - td(String.valueOf(reducesTasksComplete))._() + $style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._(). + td(String.valueOf(jinfo.getReducesTotal())). + td(String.valueOf(jinfo.getReducesPending())). + td(String.valueOf(jinfo.getReducesRunning())). + td(String.valueOf(jinfo.getReducesCompleted()))._() ._(). // Attempts table @@ -145,110 +120,41 @@ public class JobBlock extends HtmlBlock { tr(_ODD). th("Maps"). td().a(url("attempts", jid, "m", - TaskAttemptStateUI.NEW.toString()), - String.valueOf(newMapAttempts))._(). + TaskAttemptStateUI.NEW.toString()), + String.valueOf(jinfo.getNewMapAttempts()))._(). td().a(url("attempts", jid, "m", - TaskAttemptStateUI.RUNNING.toString()), - String.valueOf(runningMapAttempts))._(). + TaskAttemptStateUI.RUNNING.toString()), + String.valueOf(jinfo.getRunningMapAttempts()))._(). td().a(url("attempts", jid, "m", - TaskAttemptStateUI.FAILED.toString()), - String.valueOf(failedMapAttempts))._(). + TaskAttemptStateUI.FAILED.toString()), + String.valueOf(jinfo.getFailedMapAttempts()))._(). td().a(url("attempts", jid, "m", - TaskAttemptStateUI.KILLED.toString()), - String.valueOf(killedMapAttempts))._(). + TaskAttemptStateUI.KILLED.toString()), + String.valueOf(jinfo.getKilledMapAttempts()))._(). td().a(url("attempts", jid, "m", - TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(successfulMapAttempts))._(). + TaskAttemptStateUI.SUCCESSFUL.toString()), + String.valueOf(jinfo.getSuccessfulMapAttempts()))._(). _(). tr(_EVEN). th("Reduces"). td().a(url("attempts", jid, "r", - TaskAttemptStateUI.NEW.toString()), - String.valueOf(newReduceAttempts))._(). + TaskAttemptStateUI.NEW.toString()), + String.valueOf(jinfo.getNewReduceAttempts()))._(). td().a(url("attempts", jid, "r", - TaskAttemptStateUI.RUNNING.toString()), - String.valueOf(runningReduceAttempts))._(). + TaskAttemptStateUI.RUNNING.toString()), + String.valueOf(jinfo.getRunningReduceAttempts()))._(). td().a(url("attempts", jid, "r", - TaskAttemptStateUI.FAILED.toString()), - String.valueOf(failedReduceAttempts))._(). + TaskAttemptStateUI.FAILED.toString()), + String.valueOf(jinfo.getFailedReduceAttempts()))._(). td().a(url("attempts", jid, "r", - TaskAttemptStateUI.KILLED.toString()), - String.valueOf(killedReduceAttempts))._(). + TaskAttemptStateUI.KILLED.toString()), + String.valueOf(jinfo.getKilledReduceAttempts()))._(). td().a(url("attempts", jid, "r", - TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(successfulReduceAttempts))._(). + TaskAttemptStateUI.SUCCESSFUL.toString()), + String.valueOf(jinfo.getSuccessfulReduceAttempts()))._(). _(). _(). _(); } - private void countTasksAndAttempts(Job job) { - Map tasks = job.getTasks(); - for (Task task : tasks.values()) { - switch (task.getType()) { - case MAP: - // Task counts - switch (task.getState()) { - case RUNNING: - ++runningMapTasks; - break; - case SCHEDULED: - ++pendingMapTasks; - break; - } - break; - case REDUCE: - // Task counts - switch (task.getState()) { - case RUNNING: - ++runningReduceTasks; - break; - case SCHEDULED: - ++pendingReduceTasks; - break; - } - break; - } - - // Attempts counts - Map attempts = task.getAttempts(); - for (TaskAttempt attempt : attempts.values()) { - - int newAttempts = 0, running = 0, successful = 0, failed = 0, killed =0; - - if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { - ++newAttempts; - } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt - .getState())) { - ++running; - } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt - .getState())) { - ++successful; - } else if (TaskAttemptStateUI.FAILED - .correspondsTo(attempt.getState())) { - ++failed; - } else if (TaskAttemptStateUI.KILLED - .correspondsTo(attempt.getState())) { - ++killed; - } - - switch (task.getType()) { - case MAP: - newMapAttempts += newAttempts; - runningMapAttempts += running; - successfulMapAttempts += successful; - failedMapAttempts += failed; - killedMapAttempts += killed; - break; - case REDUCE: - newReduceAttempts += newAttempts; - runningReduceAttempts += running; - successfulReduceAttempts += successful; - failedReduceAttempts += failed; - killedReduceAttempts += killed; - break; - } - } - } - } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java index 5e56295160..720219ece1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java @@ -18,18 +18,19 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; -import com.google.inject.Inject; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; -import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import static org.apache.hadoop.yarn.util.StringHelper.*; -import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; +import com.google.inject.Inject; public class JobsBlock extends HtmlBlock { final AppContext appContext; @@ -54,38 +55,31 @@ public class JobsBlock extends HtmlBlock { th("Reduces Total"). th("Reduces Completed")._()._(). tbody(); - for (Job job : appContext.getAllJobs().values()) { - String jobID = MRApps.toString(job.getID()); - JobReport report = job.getReport(); - String mapPct = percent(report.getMapProgress()); - String mapsTotal = String.valueOf(job.getTotalMaps()); - String mapsCompleted = String.valueOf(job.getCompletedMaps()); - String reducePct = percent(report.getReduceProgress()); - String reduceTotal = String.valueOf(job.getTotalReduces()); - String reduceCompleted = String.valueOf(job.getCompletedReduces()); + for (Job j : appContext.getAllJobs().values()) { + JobInfo job = new JobInfo(j, false); tbody. tr(). td(). - span().$title(String.valueOf(job.getID().getId()))._(). // for sorting - a(url("job", jobID), jobID)._(). - td(job.getName().toString()). - td(job.getState().toString()). + span().$title(String.valueOf(job.getId()))._(). // for sorting + a(url("job", job.getId()), job.getId())._(). + td(job.getName()). + td(job.getState()). td(). - span().$title(mapPct)._(). // for sorting + span().$title(job.getMapProgressPercent())._(). // for sorting div(_PROGRESSBAR). - $title(join(mapPct, '%')). // tooltip + $title(join(job.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", mapPct, '%'))._()._()._(). - td(mapsTotal). - td(mapsCompleted). + $style(join("width:", job.getMapProgressPercent(), '%'))._()._()._(). + td(String.valueOf(job.getMapsTotal())). + td(String.valueOf(job.getMapsCompleted())). td(). - span().$title(reducePct)._(). // for sorting + span().$title(job.getReduceProgressPercent())._(). // for sorting div(_PROGRESSBAR). - $title(join(reducePct, '%')). // tooltip + $title(join(job.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). - $style(join("width:", reducePct, '%'))._()._()._(). - td(reduceTotal). - td(reduceCompleted)._(); + $style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._(). + td(String.valueOf(job.getReducesTotal())). + td(String.valueOf(job.getReducesCompleted()))._(); } tbody._()._(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java index c5f581ac46..3d70c3cbb3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java @@ -18,23 +18,29 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; +import static org.apache.hadoop.yarn.util.StringHelper.percent; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; + import java.util.Collection; -import com.google.common.base.Joiner; -import com.google.inject.Inject; - import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; -import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import static org.apache.hadoop.yarn.util.StringHelper.*; -import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; + +import com.google.inject.Inject; public class TaskPage extends AppView { @@ -66,24 +72,26 @@ protected void render(Block html) { th(".tsh", "Elapsed"). th(".note", "Note")._()._(). tbody(); - for (TaskAttempt ta : getTaskAttempts()) { - String taid = MRApps.toString(ta.getID()); - String progress = percent(ta.getProgress()); - ContainerId containerId = ta.getAssignedContainerID(); + for (TaskAttempt attempt : getTaskAttempts()) { + TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true); + String taid = ta.getId(); + String progress = percent(ta.getProgress() / 100); + ContainerId containerId = ta.getAssignedContainerId(); - String nodeHttpAddr = ta.getNodeHttpAddress(); - long startTime = ta.getLaunchTime(); + String nodeHttpAddr = ta.getNode(); + long startTime = ta.getStartTime(); long finishTime = ta.getFinishTime(); - long elapsed = Times.elapsed(startTime, finishTime); + long elapsed = ta.getElapsedTime(); + String diag = ta.getNote() == null ? "" : ta.getNote(); TD>>> nodeTd = tbody. tr(). td(".id", taid). td(".progress", progress). - td(".state", ta.getState().toString()). + td(".state", ta.getState()). td(). a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr); if (containerId != null) { - String containerIdStr = ConverterUtils.toString(containerId); + String containerIdStr = ta.getAssignedContainerIdStr(); nodeTd._(" "). a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs", containerIdStr, app.getJob().getUserName()), "logs"); @@ -92,7 +100,7 @@ protected void render(Block html) { td(".ts", Times.format(startTime)). td(".ts", Times.format(finishTime)). td(".dt", StringUtils.formatTime(elapsed)). - td(".note", Joiner.on('\n').join(ta.getDiagnostics()))._(); + td(".note", diag)._(); } tbody._()._(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java index 7247761d65..ccef4625eb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java @@ -18,21 +18,24 @@ package org.apache.hadoop.mapreduce.v2.app.webapp; -import com.google.inject.Inject; +import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.percent; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; -import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*; -import static org.apache.hadoop.yarn.util.StringHelper.*; -import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; +import com.google.inject.Inject; public class TasksBlock extends HtmlBlock { final App app; @@ -67,16 +70,16 @@ public class TasksBlock extends HtmlBlock { if (type != null && task.getType() != type) { continue; } - String tid = MRApps.toString(task.getID()); - TaskReport report = task.getReport(); - String pct = percent(report.getProgress()); - long startTime = report.getStartTime(); - long finishTime = report.getFinishTime(); - long elapsed = Times.elapsed(startTime, finishTime); + TaskInfo info = new TaskInfo(task); + String tid = info.getId(); + String pct = percent(info.getProgress() / 100); + long startTime = info.getStartTime(); + long finishTime = info.getFinishTime(); + long elapsed = info.getElapsedTime(); tbody. tr(). td(). - br().$title(String.valueOf(task.getID().getId()))._(). // sorting + br().$title(String.valueOf(info.getTaskNum()))._(). // sorting a(url("task", tid), tid)._(). td(). br().$title(pct)._(). @@ -84,7 +87,7 @@ public class TasksBlock extends HtmlBlock { $title(join(pct, '%')). // tooltip div(_PROGRESSBAR_VALUE). $style(join("width:", pct, '%'))._()._()._(). - td(report.getTaskState().toString()). + td(info.getState()). td(). br().$title(String.valueOf(startTime))._(). _(Times.format(startTime))._(). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java new file mode 100644 index 0000000000..3e9a7e1b80 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.mapreduce.v2.app.webapp.App; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "info") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppInfo { + + protected String appId; + protected String name; + protected String user; + protected String hostname; + protected long startedOn; + protected long elapsedTime; + + public AppInfo() { + } + + public AppInfo(App app, AppContext context) { + this.appId = context.getApplicationID().toString(); + this.name = context.getApplicationName().toString(); + this.user = context.getUser().toString(); + this.startedOn = context.getStartTime(); + this.elapsedTime = Times.elapsed(context.getStartTime(), 0); + } + + public String getId() { + return this.appId; + } + + public String getName() { + return this.name; + } + + public String getUser() { + return this.user; + } + + public long getStartTime() { + return this.startedOn; + } + + public long getElapsedTime() { + return this.elapsedTime; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfEntryInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfEntryInfo.java new file mode 100644 index 0000000000..5cfa40aa1a --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfEntryInfo.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class ConfEntryInfo { + + protected String name; + protected String value; + + public ConfEntryInfo() { + } + + public ConfEntryInfo(String key, String value) { + this.name = key; + this.value = value; + } + + public String getName() { + return this.name; + } + + public String getValue() { + return this.value; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java new file mode 100644 index 0000000000..0cc7bc46f8 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapreduce.v2.app.job.Job; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class ConfInfo { + + protected String path; + protected ArrayList property; + + public ConfInfo() { + } + + public ConfInfo(Job job, Configuration conf) throws IOException { + + Path confPath = job.getConfFile(); + this.property = new ArrayList(); + // Read in the configuration file and put it in a key/value table. + FileContext fc = FileContext.getFileContext(confPath.toUri(), conf); + Configuration jobConf = new Configuration(false); + jobConf.addResource(fc.open(confPath)); + this.path = confPath.toString(); + for (Map.Entry entry : jobConf) { + this.property.add(new ConfEntryInfo(entry.getKey(), entry.getValue())); + } + + } + + public ArrayList getProperties() { + return this.property; + } + + public String getPath() { + return this.path; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java new file mode 100644 index 0000000000..99009ca1f6 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.mapreduce.v2.api.records.Counter; +import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; + +@XmlRootElement(name = "counterGroup") +@XmlAccessorType(XmlAccessType.FIELD) +public class CounterGroupInfo { + + protected String counterGroupName; + @XmlElement(name = "counter") + protected ArrayList counter; + + public CounterGroupInfo() { + } + + public CounterGroupInfo(String name, CounterGroup g, CounterGroup mg, + CounterGroup rg) { + this.counterGroupName = name; + this.counter = new ArrayList(); + + for (Counter c : g.getAllCounters().values()) { + Counter mc = mg == null ? null : mg.getCounter(c.getName()); + Counter rc = rg == null ? null : rg.getCounter(c.getName()); + CounterInfo cinfo = new CounterInfo(c, mc, rc); + this.counter.add(cinfo); + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java new file mode 100644 index 0000000000..97c3563d62 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.mapreduce.v2.api.records.Counter; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class CounterInfo { + + protected String counterName; + protected long totalCounterValue; + protected long mapCounterValue; + protected long reduceCounterValue; + + public CounterInfo() { + } + + public CounterInfo(Counter counter, Counter mc, Counter rc) { + this.counterName = counter.getName(); + this.totalCounterValue = counter.getValue(); + this.mapCounterValue = mc == null ? 0 : mc.getValue(); + this.reduceCounterValue = rc == null ? 0 : rc.getValue(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java new file mode 100644 index 0000000000..6276e6a443 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; +import org.apache.hadoop.mapreduce.v2.api.records.Counters; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; +import org.apache.hadoop.mapreduce.v2.util.MRApps; + +@XmlRootElement(name = "jobCounters") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobCounterInfo { + + @XmlTransient + protected Counters total = null; + @XmlTransient + protected Counters map = null; + @XmlTransient + protected Counters reduce = null; + + protected String id; + protected ArrayList counterGroups; + + public JobCounterInfo() { + } + + public JobCounterInfo(AppContext ctx, Job job) { + getCounters(ctx, job); + counterGroups = new ArrayList(); + this.id = MRApps.toString(job.getID()); + + int numGroups = 0; + + if (total != null) { + for (CounterGroup g : total.getAllCounterGroups().values()) { + if (g != null) { + CounterGroup mg = map == null ? null : map.getCounterGroup(g + .getName()); + CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g + .getName()); + ++numGroups; + + CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g, mg, rg); + counterGroups.add(cginfo); + } + } + } + } + + private void getCounters(AppContext ctx, Job job) { + total = JobImpl.newCounters(); + if (job == null) { + return; + } + map = JobImpl.newCounters(); + reduce = JobImpl.newCounters(); + // Get all types of counters + Map tasks = job.getTasks(); + for (Task t : tasks.values()) { + Counters counters = t.getCounters(); + JobImpl.incrAllCounters(total, counters); + switch (t.getType()) { + case MAP: + JobImpl.incrAllCounters(map, counters); + break; + case REDUCE: + JobImpl.incrAllCounters(reduce, counters); + break; + } + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java new file mode 100644 index 0000000000..c46fc07dd6 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java @@ -0,0 +1,349 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import static org.apache.hadoop.yarn.util.StringHelper.percent; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.v2.api.records.JobReport; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "job") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobInfo { + + // ok for any user to see + protected long startTime; + protected long finishTime; + protected long elapsedTime; + protected String id; + protected String name; + protected String user; + protected String state; + protected int mapsTotal; + protected int mapsCompleted; + protected float mapProgress; + protected int reducesTotal; + protected int reducesCompleted; + protected float reduceProgress; + + @XmlTransient + protected String mapProgressPercent; + @XmlTransient + protected String reduceProgressPercent; + + // these should only be seen if acls allow + protected int mapsPending; + protected int mapsRunning; + protected int reducesPending; + protected int reducesRunning; + protected boolean uberized; + protected String diagnostics; + protected int newReduceAttempts = 0; + protected int runningReduceAttempts = 0; + protected int failedReduceAttempts = 0; + protected int killedReduceAttempts = 0; + protected int successfulReduceAttempts = 0; + protected int newMapAttempts = 0; + protected int runningMapAttempts = 0; + protected int failedMapAttempts = 0; + protected int killedMapAttempts = 0; + protected int successfulMapAttempts = 0; + protected ArrayList acls; + + @XmlTransient + protected int numMaps; + @XmlTransient + protected int numReduces; + + public JobInfo() { + } + + public JobInfo(Job job, Boolean hasAccess) { + this.id = MRApps.toString(job.getID()); + JobReport report = job.getReport(); + countTasksAndAttempts(job); + this.startTime = report.getStartTime(); + this.finishTime = report.getFinishTime(); + this.elapsedTime = Times.elapsed(this.startTime, this.finishTime); + if (this.elapsedTime == -1) { + this.elapsedTime = 0; + } + this.name = job.getName().toString(); + this.user = job.getUserName(); + this.state = job.getState().toString(); + this.mapsTotal = job.getTotalMaps(); + this.mapsCompleted = job.getCompletedMaps(); + this.mapProgress = report.getMapProgress() * 100; + this.mapProgressPercent = percent(report.getMapProgress()); + this.reducesTotal = job.getTotalReduces(); + this.reducesCompleted = job.getCompletedReduces(); + this.reduceProgress = report.getReduceProgress() * 100; + this.reduceProgressPercent = percent(report.getReduceProgress()); + + this.acls = new ArrayList(); + if (hasAccess) { + this.uberized = job.isUber(); + + List diagnostics = job.getDiagnostics(); + if (diagnostics != null && !diagnostics.isEmpty()) { + StringBuffer b = new StringBuffer(); + for (String diag : diagnostics) { + b.append(diag); + } + this.diagnostics = b.toString(); + } + + Map allacls = job.getJobACLs(); + if (allacls != null) { + for (Map.Entry entry : allacls.entrySet()) { + this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry + .getValue().getAclString())); + } + } + } + } + + public int getNewReduceAttempts() { + return this.newReduceAttempts; + } + + public int getKilledReduceAttempts() { + return this.killedReduceAttempts; + } + + public int getFailedReduceAttempts() { + return this.failedReduceAttempts; + } + + public int getRunningReduceAttempts() { + return this.runningReduceAttempts; + } + + public int getSuccessfulReduceAttempts() { + return this.successfulReduceAttempts; + } + + public int getNewMapAttempts() { + return this.newMapAttempts; + } + + public int getKilledMapAttempts() { + return this.killedMapAttempts; + } + + public ArrayList getAcls() { + return acls; + } + + public int getFailedMapAttempts() { + return this.failedMapAttempts; + } + + public int getRunningMapAttempts() { + return this.runningMapAttempts; + } + + public int getSuccessfulMapAttempts() { + return this.successfulMapAttempts; + } + + public int getReducesCompleted() { + return this.reducesCompleted; + } + + public int getReducesTotal() { + return this.reducesTotal; + } + + public int getReducesPending() { + return this.reducesPending; + } + + public int getReducesRunning() { + return this.reducesRunning; + } + + public int getMapsCompleted() { + return this.mapsCompleted; + } + + public int getMapsTotal() { + return this.mapsTotal; + } + + public int getMapsPending() { + return this.mapsPending; + } + + public int getMapsRunning() { + return this.mapsRunning; + } + + public String getState() { + return this.state; + } + + public String getUser() { + return this.user; + } + + public String getName() { + return this.name; + } + + public String getId() { + return this.id; + } + + public long getStartTime() { + return this.startTime; + } + + public long getElapsedTime() { + return this.elapsedTime; + } + + public long getFinishTime() { + return this.finishTime; + } + + public boolean isUberized() { + return this.uberized; + } + + public String getdiagnostics() { + return this.diagnostics; + } + + public float getMapProgress() { + return this.mapProgress; + } + + public String getMapProgressPercent() { + return this.mapProgressPercent; + } + + public float getReduceProgress() { + return this.reduceProgress; + } + + public String getReduceProgressPercent() { + return this.reduceProgressPercent; + } + + /** + * Go through a job and update the member variables with counts for + * information to output in the page. + * + * @param job + * the job to get counts for. + */ + private void countTasksAndAttempts(Job job) { + numReduces = 0; + numMaps = 0; + final Map tasks = job.getTasks(); + if (tasks == null) { + return; + } + for (Task task : tasks.values()) { + switch (task.getType()) { + case MAP: + // Task counts + switch (task.getState()) { + case RUNNING: + ++this.mapsRunning; + break; + case SCHEDULED: + ++this.mapsPending; + break; + } + break; + case REDUCE: + // Task counts + switch (task.getState()) { + case RUNNING: + ++this.reducesRunning; + break; + case SCHEDULED: + ++this.reducesPending; + break; + } + break; + } + // Attempts counts + Map attempts = task.getAttempts(); + int newAttempts, running, successful, failed, killed; + for (TaskAttempt attempt : attempts.values()) { + + newAttempts = 0; + running = 0; + successful = 0; + failed = 0; + killed = 0; + if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { + ++newAttempts; + } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) { + ++running; + } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt + .getState())) { + ++successful; + } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) { + ++failed; + } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) { + ++killed; + } + + switch (task.getType()) { + case MAP: + this.newMapAttempts += newAttempts; + this.runningMapAttempts += running; + this.successfulMapAttempts += successful; + this.failedMapAttempts += failed; + this.killedMapAttempts += killed; + break; + case REDUCE: + this.newReduceAttempts += newAttempts; + this.runningReduceAttempts += running; + this.successfulReduceAttempts += successful; + this.failedReduceAttempts += failed; + this.killedReduceAttempts += killed; + break; + } + } + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java new file mode 100644 index 0000000000..da23b7a24d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; +import org.apache.hadoop.mapreduce.v2.api.records.Counters; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.util.MRApps; + +@XmlRootElement(name = "JobTaskAttemptCounters") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobTaskAttemptCounterInfo { + + @XmlTransient + protected Counters total = null; + + protected String id; + protected ArrayList taskCounterGroups; + + public JobTaskAttemptCounterInfo() { + } + + public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) { + + long value = 0; + this.id = MRApps.toString(taskattempt.getID()); + total = taskattempt.getCounters(); + taskCounterGroups = new ArrayList(); + if (total != null) { + for (CounterGroup g : total.getAllCounterGroups().values()) { + if (g != null) { + TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g); + if (cginfo != null) { + taskCounterGroups.add(cginfo); + } + } + } + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java new file mode 100644 index 0000000000..7ba57f13a0 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; +import org.apache.hadoop.mapreduce.v2.api.records.Counters; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.util.MRApps; + +@XmlRootElement(name = "jobTaskCounters") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobTaskCounterInfo { + + @XmlTransient + protected Counters total = null; + + protected String id; + protected ArrayList taskCounterGroups; + + public JobTaskCounterInfo() { + } + + public JobTaskCounterInfo(Task task) { + total = task.getCounters(); + this.id = MRApps.toString(task.getID()); + taskCounterGroups = new ArrayList(); + if (total != null) { + for (CounterGroup g : total.getAllCounterGroups().values()) { + if (g != null) { + TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g); + taskCounterGroups.add(cginfo); + } + } + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobsInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobsInfo.java new file mode 100644 index 0000000000..0e83362b4c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "jobs") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobsInfo { + + protected ArrayList job = new ArrayList(); + + public JobsInfo() { + } // JAXB needs this + + public void add(JobInfo jobInfo) { + job.add(jobInfo); + } + + public ArrayList getJobs() { + return job; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ReduceTaskAttemptInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ReduceTaskAttemptInfo.java new file mode 100644 index 0000000000..4d44d11b41 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ReduceTaskAttemptInfo.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "taskAttempt") +@XmlAccessorType(XmlAccessType.FIELD) +public class ReduceTaskAttemptInfo extends TaskAttemptInfo { + + protected long shuffleFinishTime; + protected long mergeFinishTime; + protected long elapsedShuffleTime; + protected long elapsedMergeTime; + protected long elapsedReduceTime; + + public ReduceTaskAttemptInfo() { + } + + public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) { + super(ta, type, false); + + this.shuffleFinishTime = ta.getShuffleFinishTime(); + this.mergeFinishTime = ta.getSortFinishTime(); + this.elapsedShuffleTime = Times.elapsed(this.startTime, + this.shuffleFinishTime, false); + if (this.elapsedShuffleTime == -1) { + this.elapsedShuffleTime = 0; + } + this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime, + this.mergeFinishTime, false); + if (this.elapsedMergeTime == -1) { + this.elapsedMergeTime = 0; + } + this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime, + this.finishTime, false); + if (this.elapsedReduceTime == -1) { + this.elapsedReduceTime = 0; + } + } + + public long getShuffleFinishTime() { + return this.shuffleFinishTime; + } + + public long getMergeFinishTime() { + return this.mergeFinishTime; + } + + public long getElapsedShuffleTime() { + return this.elapsedShuffleTime; + } + + public long getElapsedMergeTime() { + return this.elapsedMergeTime; + } + + public long getElapsedReduceTime() { + return this.elapsedReduceTime; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java new file mode 100644 index 0000000000..231e36bdf0 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlSeeAlso; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "taskAttempt") +@XmlSeeAlso({ ReduceTaskAttemptInfo.class }) +@XmlAccessorType(XmlAccessType.FIELD) +public class TaskAttemptInfo { + + protected long startTime; + protected long finishTime; + protected long elapsedTime; + protected float progress; + protected String id; + protected String rack; + protected String state; + protected String nodeHttpAddress; + protected String diagnostics; + protected String type; + protected String assignedContainerId; + + @XmlTransient + protected ContainerId assignedContainer; + + public TaskAttemptInfo() { + } + + public TaskAttemptInfo(TaskAttempt ta, Boolean isRunning) { + this(ta, TaskType.MAP, isRunning); + } + + public TaskAttemptInfo(TaskAttempt ta, TaskType type, Boolean isRunning) { + this.type = type.toString(); + this.id = MRApps.toString(ta.getID()); + this.nodeHttpAddress = ta.getNodeHttpAddress(); + this.startTime = ta.getLaunchTime(); + this.finishTime = ta.getFinishTime(); + this.assignedContainerId = ConverterUtils.toString(ta + .getAssignedContainerID()); + this.assignedContainer = ta.getAssignedContainerID(); + this.progress = ta.getProgress() * 100; + this.state = ta.getState().toString(); + this.elapsedTime = Times + .elapsed(this.startTime, this.finishTime, isRunning); + if (this.elapsedTime == -1) { + this.elapsedTime = 0; + } + List diagnostics = ta.getDiagnostics(); + if (diagnostics != null && !diagnostics.isEmpty()) { + StringBuffer b = new StringBuffer(); + for (String diag : diagnostics) { + b.append(diag); + } + this.diagnostics = b.toString(); + } + this.rack = ta.getNodeRackName(); + } + + public String getAssignedContainerIdStr() { + return this.assignedContainerId; + } + + public ContainerId getAssignedContainerId() { + return this.assignedContainer; + } + + public String getState() { + return this.state; + } + + public String getId() { + return this.id; + } + + public long getStartTime() { + return this.startTime; + } + + public long getFinishTime() { + return this.finishTime; + } + + public float getProgress() { + return this.progress; + } + + public long getElapsedTime() { + return this.elapsedTime; + } + + public String getNode() { + return this.nodeHttpAddress; + } + + public String getRack() { + return this.rack; + } + + public String getNote() { + return this.diagnostics; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java new file mode 100644 index 0000000000..b8a48fe489 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by taskattemptlicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "taskattempts") +@XmlAccessorType(XmlAccessType.FIELD) +public class TaskAttemptsInfo { + + protected ArrayList taskattempt = new ArrayList(); + + public TaskAttemptsInfo() { + } // JAXB needs this + + public void add(TaskAttemptInfo taskattemptInfo) { + taskattempt.add(taskattemptInfo); + } + + public ArrayList getTaskAttempts() { + return taskattempt; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java new file mode 100644 index 0000000000..fa9dfcb7a4 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.mapreduce.v2.api.records.Counter; +import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class TaskCounterGroupInfo { + + protected String counterGroupName; + protected ArrayList counter; + + public TaskCounterGroupInfo() { + } + + public TaskCounterGroupInfo(String name, CounterGroup g) { + this.counterGroupName = name; + this.counter = new ArrayList(); + + for (Counter c : g.getAllCounters().values()) { + TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue()); + this.counter.add(cinfo); + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterInfo.java new file mode 100644 index 0000000000..24d8395f08 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterInfo.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "counter") +@XmlAccessorType(XmlAccessType.FIELD) +public class TaskCounterInfo { + + protected String name; + protected long value; + + public TaskCounterInfo() { + } + + public TaskCounterInfo(String name, long value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public long getValue() { + return value; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java new file mode 100644 index 0000000000..d38635af2c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; +import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "task") +@XmlAccessorType(XmlAccessType.FIELD) +public class TaskInfo { + + protected long startTime; + protected long finishTime; + protected long elapsedTime; + protected float progress; + protected String id; + protected String state; + protected String type; + protected String successfulAttempt; + + @XmlTransient + int taskNum; + + @XmlTransient + TaskAttempt successful; + + public TaskInfo() { + } + + public TaskInfo(Task task) { + TaskType ttype = task.getType(); + this.type = ttype.toString(); + TaskReport report = task.getReport(); + this.startTime = report.getStartTime(); + this.finishTime = report.getFinishTime(); + this.elapsedTime = Times.elapsed(this.startTime, this.finishTime, false); + if (this.elapsedTime == -1) { + this.elapsedTime = 0; + } + this.state = report.getTaskState().toString(); + this.progress = report.getProgress() * 100; + this.id = MRApps.toString(task.getID()); + this.taskNum = task.getID().getId(); + this.successful = getSuccessfulAttempt(task); + if (successful != null) { + this.successfulAttempt = MRApps.toString(successful.getID()); + } else { + this.successfulAttempt = ""; + } + } + + public float getProgress() { + return this.progress; + } + + public String getState() { + return this.state; + } + + public String getId() { + return this.id; + } + + public int getTaskNum() { + return this.taskNum; + } + + public long getStartTime() { + return this.startTime; + } + + public long getFinishTime() { + return this.finishTime; + } + + public long getElapsedTime() { + return this.elapsedTime; + } + + public String getSuccessfulAttempt() { + return this.successfulAttempt; + } + + public TaskAttempt getSuccessful() { + return this.successful; + } + + private TaskAttempt getSuccessfulAttempt(Task task) { + for (TaskAttempt attempt : task.getAttempts().values()) { + if (attempt.getState() == TaskAttemptState.SUCCEEDED) { + return attempt; + } + } + return null; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TasksInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TasksInfo.java new file mode 100644 index 0000000000..75c9adfe37 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TasksInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by tasklicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.app.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "tasks") +@XmlAccessorType(XmlAccessType.FIELD) +public class TasksInfo { + + protected ArrayList task = new ArrayList(); + + public TasksInfo() { + } // JAXB needs this + + public void add(TaskInfo taskInfo) { + task.add(taskInfo); + } + + public ArrayList getTasks() { + return task; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index 9c59269ec6..27fcec2c6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -183,6 +183,7 @@ private void verifyJobReport(JobReport jr) { Assert.assertEquals(1, amInfo.getContainerId().getApplicationAttemptId() .getAttemptId()); Assert.assertTrue(amInfo.getStartTime() > 0); + Assert.assertEquals(false, jr.isUber()); } private void verifyTaskAttemptReport(TaskAttemptReport tar) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java index 812393c1b5..c9436e5645 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java @@ -118,7 +118,7 @@ public void testSimple() throws Exception { Job mockJob = mock(Job.class); when(mockJob.getReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null)); + 0, 0, 0, 0, 0, 0, "jobfile", null, false)); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); @@ -195,7 +195,7 @@ public void testResource() throws Exception { Job mockJob = mock(Job.class); when(mockJob.getReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null)); + 0, 0, 0, 0, 0, 0, "jobfile", null, false)); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); @@ -261,7 +261,7 @@ public void testMapReduceScheduling() throws Exception { Job mockJob = mock(Job.class); when(mockJob.getReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null)); + 0, 0, 0, 0, 0, 0, "jobfile", null, false)); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); @@ -375,7 +375,7 @@ void setProgress(float setupProgress, float mapProgress, public JobReport getReport() { return MRBuilderUtils.newJobReport(this.jobId, "job", "user", JobState.RUNNING, 0, 0, 0, this.setupProgress, this.mapProgress, - this.reduceProgress, this.cleanupProgress, "jobfile", null); + this.reduceProgress, this.cleanupProgress, "jobfile", null, false); } } @@ -511,7 +511,7 @@ public void testBlackListedNodes() throws Exception { Job mockJob = mock(Job.class); when(mockJob.getReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null)); + 0, 0, 0, 0, 0, 0, "jobfile", null, false)); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); @@ -610,7 +610,7 @@ public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { Job mockJob = mock(Job.class); when(mockJob.getReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, - 0, 0, 0, 0, 0, 0, "jobfile", null)); + 0, 0, 0, 0, 0, 0, "jobfile", null, false)); MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index f82c1d5833..303c488d70 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -754,8 +754,6 @@ public MyAppMaster(Clock clock) { } class MyAppContext implements AppContext { - // I'll be making Avro objects by hand. Please don't do that very often. - private final ApplicationAttemptId myAppAttemptID; private final ApplicationId myApplicationID; private final JobId myJobID; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro deleted file mode 100644 index fdf98ab696..0000000000 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/avro/MRClientProtocol.genavro +++ /dev/null @@ -1,153 +0,0 @@ -@namespace("org.apache.hadoop.mapreduce.v2.api") -protocol MRClientProtocol { - - import idl "./yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - enum TaskType { - MAP, - REDUCE - } - - record JobID { - org.apache.hadoop.yarn.ApplicationID appID; - int id; - } - - record TaskID { - JobID jobID; - TaskType taskType; - int id; - } - - record TaskAttemptID { - TaskID taskID; - int id; - } - - enum TaskState { - NEW, - SCHEDULED, - RUNNING, - SUCCEEDED, - FAILED, - KILL_WAIT, - KILLED - } - - enum Phase { - STARTING, - MAP, - SHUFFLE, - SORT, - REDUCE, - CLEANUP - } - - record Counter { - string name; - string displayName; - long value; - } - - record CounterGroup { - string name; - string displayname; - map counters; - } - - record Counters { - map groups; - } - - record TaskReport { - TaskID id; - TaskState state; - float progress; - long startTime; - long finishTime; - Counters counters; - array runningAttempts; - union{TaskAttemptID, null} successfulAttempt; - array diagnostics; - } - - enum TaskAttemptState { - NEW, - UNASSIGNED, - ASSIGNED, - RUNNING, - COMMIT_PENDING, - SUCCESS_CONTAINER_CLEANUP, - SUCCEEDED, - FAIL_CONTAINER_CLEANUP, - FAIL_TASK_CLEANUP, - FAILED, - KILL_CONTAINER_CLEANUP, - KILL_TASK_CLEANUP, - KILLED - } - - record TaskAttemptReport { - TaskAttemptID id; - TaskAttemptState state; - float progress; - long startTime; - long finishTime; - Counters counters; - string diagnosticInfo; - string stateString; - Phase phase; - } - - enum JobState { - NEW, - INITED, - RUNNING, - SUCCEEDED, - FAILED, - KILL_WAIT, - KILLED, - ERROR - } - - record JobReport { - JobID id; - JobState state; - float mapProgress; - float reduceProgress; - float cleanupProgress; - float setupProgress; - long startTime; - long finishTime; - } - - enum TaskAttemptCompletionEventStatus { - FAILED, - KILLED, - SUCCEEDED, - OBSOLETE, - TIPFAILED - } - - record TaskAttemptCompletionEvent { - TaskAttemptID attemptId; - TaskAttemptCompletionEventStatus status; - string mapOutputServerAddress; - int attemptRunTime; - int eventId; - } - - JobReport getJobReport(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException; - TaskReport getTaskReport(TaskID taskID) throws org.apache.hadoop.yarn.YarnRemoteException; - TaskAttemptReport getTaskAttemptReport(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException; - Counters getCounters(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException; - array getTaskAttemptCompletionEvents(JobID jobID, int fromEventId, int maxEvents) throws org.apache.hadoop.yarn.YarnRemoteException; - array getTaskReports(JobID jobID, TaskType taskType) throws org.apache.hadoop.yarn.YarnRemoteException; - array getDiagnostics(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException; - - void killJob(JobID jobID) throws org.apache.hadoop.yarn.YarnRemoteException; - void killTask(TaskID taskID) throws org.apache.hadoop.yarn.YarnRemoteException; - void killTaskAttempt(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException; - void failTaskAttempt(TaskAttemptID taskAttemptID) throws org.apache.hadoop.yarn.YarnRemoteException; - -} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index f20fbf934a..d3ebee62b2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -288,7 +288,7 @@ public static JobStatus fromYarn(JobReport jobreport, String trackingUrl) { .getMapProgress(), jobreport.getReduceProgress(), jobreport .getCleanupProgress(), fromYarn(jobreport.getJobState()), jobPriority, jobreport.getUser(), jobreport.getJobName(), jobreport - .getJobFile(), trackingUrl); + .getJobFile(), trackingUrl, jobreport.isUber()); jobStatus.setFailureInfo(jobreport.getDiagnostics()); return jobStatus; } @@ -421,7 +421,7 @@ public static JobStatus fromYarn(ApplicationReport application, TypeConverter.fromYarn(application.getYarnApplicationState()), org.apache.hadoop.mapreduce.JobPriority.NORMAL, application.getUser(), application.getName(), - application.getQueue(), jobFile, trackingUrl + application.getQueue(), jobFile, trackingUrl, false ); jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url jobStatus.setStartTime(application.getStartTime()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java index 469c425feb..b2f2cc1fc8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java @@ -36,6 +36,7 @@ public interface JobReport { public abstract String getDiagnostics(); public abstract String getJobFile(); public abstract List getAMInfos(); + public abstract boolean isUber(); public abstract void setJobId(JobId jobId); public abstract void setJobState(JobState jobState); @@ -52,4 +53,5 @@ public interface JobReport { public abstract void setDiagnostics(String diagnostics); public abstract void setJobFile(String jobFile); public abstract void setAMInfos(List amInfos); + public abstract void setIsUber(boolean isUber); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java index 41e46c3391..1b16c864f8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java @@ -332,4 +332,16 @@ private JobStateProto convertToProtoFormat(JobState e) { private JobState convertFromProtoFormat(JobStateProto e) { return MRProtoUtils.convertFromProtoFormat(e); } + + @Override + public synchronized boolean isUber() { + JobReportProtoOrBuilder p = viaProto ? proto : builder; + return p.getIsUber(); + } + + @Override + public synchronized void setIsUber(boolean isUber) { + maybeInitBuilder(); + builder.setIsUber(isUber); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java index 109028205d..2b5b21c867 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java @@ -60,7 +60,8 @@ public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) { public static JobReport newJobReport(JobId jobId, String jobName, String userName, JobState state, long submitTime, long startTime, long finishTime, float setupProgress, float mapProgress, float reduceProgress, - float cleanupProgress, String jobFile, List amInfos) { + float cleanupProgress, String jobFile, List amInfos, + boolean isUber) { JobReport report = Records.newRecord(JobReport.class); report.setJobId(jobId); report.setJobName(jobName); @@ -75,6 +76,7 @@ public static JobReport newJobReport(JobId jobId, String jobName, report.setReduceProgress(reduceProgress); report.setJobFile(jobFile); report.setAMInfos(amInfos); + report.setIsUber(isUber); return report; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto index 3390b7ad84..95345ac816 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto @@ -152,6 +152,7 @@ message JobReportProto { optional string jobFile = 13; repeated AMInfoProto am_infos = 14; optional int64 submit_time = 15; + optional bool is_uber = 16 [default = false]; } message AMInfoProto { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java index c7834adb7e..44669ec686 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java @@ -109,7 +109,14 @@ public int run(String[] argv) throws Exception { return exitcode; } +// format and print information about the passed in job queue. + void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer) + throws IOException { + printJobQueueInfo(jobQueueInfo, writer, ""); + } + // format and print information about the passed in job queue. + @SuppressWarnings("deprecation") void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer, String prefix) throws IOException { if (jobQueueInfo == null) { @@ -136,7 +143,7 @@ void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer, private void displayQueueList() throws IOException { JobQueueInfo[] rootQueues = jc.getRootQueues(); for (JobQueueInfo queue : rootQueues) { - printJobQueueInfo(queue, new PrintWriter(System.out), ""); + printJobQueueInfo(queue, new PrintWriter(System.out)); } } @@ -174,7 +181,7 @@ private void displayQueueInfo(String queue, boolean showJobs) System.out.println("Queue \"" + queue + "\" does not exist."); return; } - printJobQueueInfo(jobQueueInfo, new PrintWriter(System.out), ""); + printJobQueueInfo(jobQueueInfo, new PrintWriter(System.out)); if (showJobs && (jobQueueInfo.getChildren() == null || jobQueueInfo.getChildren().size() == 0)) { JobStatus[] jobs = jc.getJobsFromQueue(queue); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java index e5add2139f..c10a4c0a64 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java @@ -97,7 +97,7 @@ public JobStatus(JobID jobid, float mapProgress, float reduceProgress, String user, String jobName, String jobFile, String trackingUrl) { this(jobid, mapProgress, reduceProgress, cleanupProgress, runState, - JobPriority.NORMAL, user, jobName, jobFile, trackingUrl); + JobPriority.NORMAL, user, jobName, jobFile, trackingUrl); } /** @@ -135,7 +135,8 @@ public JobStatus(JobID jobid, float mapProgress, float reduceProgress, String user, String jobName, String jobFile, String trackingUrl) { this(jobid, 0.0f, mapProgress, reduceProgress, - cleanupProgress, runState, jp, user, jobName, jobFile, trackingUrl); + cleanupProgress, runState, jp, user, jobName, jobFile, + trackingUrl); } /** @@ -157,10 +158,57 @@ public JobStatus(JobID jobid, float setupProgress, float mapProgress, int runState, JobPriority jp, String user, String jobName, String jobFile, String trackingUrl) { this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, - runState, jp, - user, jobName, "default", jobFile, trackingUrl); + runState, jp, user, jobName, "default", jobFile, trackingUrl); } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param setupProgress The progress made on the setup + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on the cleanup + * @param runState The current state of the job + * @param jp Priority of the job. + * @param user userid of the person who submitted the job. + * @param jobName user-specified job name. + * @param jobFile job configuration file. + * @param trackingUrl link to the web-ui for details of the job. + * @param isUber Whether job running in uber mode + */ + public JobStatus(JobID jobid, float setupProgress, float mapProgress, + float reduceProgress, float cleanupProgress, + int runState, JobPriority jp, String user, String jobName, + String jobFile, String trackingUrl, boolean isUber) { + this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, + runState, jp, user, jobName, "default", jobFile, trackingUrl, isUber); + } + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param setupProgress The progress made on the setup + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on the cleanup + * @param runState The current state of the job + * @param jp Priority of the job. + * @param user userid of the person who submitted the job. + * @param jobName user-specified job name. + * @param queue job queue name. + * @param jobFile job configuration file. + * @param trackingUrl link to the web-ui for details of the job. + */ + public JobStatus(JobID jobid, float setupProgress, float mapProgress, + float reduceProgress, float cleanupProgress, + int runState, JobPriority jp, + String user, String jobName, String queue, + String jobFile, String trackingUrl) { + this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, + runState, jp, + user, jobName, queue, jobFile, trackingUrl, false); + } + /** * Create a job status object for a given jobid. * @param jobid The jobid of the job @@ -175,25 +223,25 @@ public JobStatus(JobID jobid, float setupProgress, float mapProgress, * @param queue job queue name. * @param jobFile job configuration file. * @param trackingUrl link to the web-ui for details of the job. + * @param isUber Whether job running in uber mode */ public JobStatus(JobID jobid, float setupProgress, float mapProgress, float reduceProgress, float cleanupProgress, int runState, JobPriority jp, String user, String jobName, String queue, - String jobFile, String trackingUrl) { + String jobFile, String trackingUrl, boolean isUber) { super(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, getEnum(runState), org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()), - user, jobName, queue, jobFile, trackingUrl); + user, jobName, queue, jobFile, trackingUrl, isUber); } - public static JobStatus downgrade(org.apache.hadoop.mapreduce.JobStatus stat){ JobStatus old = new JobStatus(JobID.downgrade(stat.getJobID()), stat.getSetupProgress(), stat.getMapProgress(), stat.getReduceProgress(), stat.getCleanupProgress(), stat.getState().getValue(), JobPriority.valueOf(stat.getPriority().name()), stat.getUsername(), stat.getJobName(), stat.getJobFile(), - stat.getTrackingUrl()); + stat.getTrackingUrl(), stat.isUber()); old.setStartTime(stat.getStartTime()); old.setFinishTime(stat.getFinishTime()); old.setSchedulingInfo(stat.getSchedulingInfo()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java index 5e92baa8b6..e4351536c8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java @@ -467,6 +467,7 @@ public String toString() { sb.append("Job File: ").append(status.getJobFile()).append("\n"); sb.append("Job Tracking URL : ").append(status.getTrackingUrl()); sb.append("\n"); + sb.append("Uber job : ").append(status.isUber()).append("\n"); sb.append("map() completion: "); sb.append(status.getMapProgress()).append("\n"); sb.append("reduce() completion: "); @@ -1268,12 +1269,20 @@ public boolean monitorAndPrintJob() Job.getProgressPollInterval(clientConf); /* make sure to report full progress after the job is done */ boolean reportedAfterCompletion = false; + boolean reportedUberMode = false; while (!isComplete() || !reportedAfterCompletion) { if (isComplete()) { reportedAfterCompletion = true; } else { Thread.sleep(progMonitorPollIntervalMillis); } + if (status.getState() == JobStatus.State.PREP) { + continue; + } + if (!reportedUberMode) { + reportedUberMode = true; + LOG.info("Job " + jobId + " running in uber mode : " + isUber()); + } String report = (" map " + StringUtils.formatPercent(mapProgress(), 0)+ " reduce " + @@ -1497,4 +1506,10 @@ public static void setTaskOutputFilter(Configuration conf, conf.set(Job.OUTPUT_FILTER, newValue.toString()); } + public boolean isUber() throws IOException, InterruptedException { + ensureState(JobState.RUNNING); + updateStatus(); + return status.isUber(); + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java index 6edda66ca1..bdd5a299ee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java @@ -97,7 +97,7 @@ public int getValue() { private int usedMem; private int reservedMem; private int neededMem; - + private boolean isUber; /** */ @@ -115,17 +115,17 @@ public JobStatus() { * @param jp Priority of the job. * @param user userid of the person who submitted the job. * @param jobName user-specified job name. - * @param jobFile job configuration file. + * @param jobFile job configuration file. * @param trackingUrl link to the web-ui for details of the job. */ public JobStatus(JobID jobid, float setupProgress, float mapProgress, - float reduceProgress, float cleanupProgress, + float reduceProgress, float cleanupProgress, State runState, JobPriority jp, String user, String jobName, String jobFile, String trackingUrl) { this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, - runState, jp, user, jobName, "default", jobFile, trackingUrl); + runState, jp, user, jobName, "default", jobFile, trackingUrl, false); } - + /** * Create a job status object for a given jobid. * @param jobid The jobid of the job @@ -138,14 +138,39 @@ public JobStatus(JobID jobid, float setupProgress, float mapProgress, * @param user userid of the person who submitted the job. * @param jobName user-specified job name. * @param queue queue name - * @param jobFile job configuration file. + * @param jobFile job configuration file. * @param trackingUrl link to the web-ui for details of the job. */ public JobStatus(JobID jobid, float setupProgress, float mapProgress, - float reduceProgress, float cleanupProgress, - State runState, JobPriority jp, - String user, String jobName, String queue, + float reduceProgress, float cleanupProgress, + State runState, JobPriority jp, + String user, String jobName, String queue, String jobFile, String trackingUrl) { + this(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress, + runState, jp, user, jobName, queue, jobFile, trackingUrl, false); + } + + /** + * Create a job status object for a given jobid. + * @param jobid The jobid of the job + * @param setupProgress The progress made on the setup + * @param mapProgress The progress made on the maps + * @param reduceProgress The progress made on the reduces + * @param cleanupProgress The progress made on the cleanup + * @param runState The current state of the job + * @param jp Priority of the job. + * @param user userid of the person who submitted the job. + * @param jobName user-specified job name. + * @param queue queue name + * @param jobFile job configuration file. + * @param trackingUrl link to the web-ui for details of the job. + * @param isUber Whether job running in uber mode + */ + public JobStatus(JobID jobid, float setupProgress, float mapProgress, + float reduceProgress, float cleanupProgress, + State runState, JobPriority jp, + String user, String jobName, String queue, + String jobFile, String trackingUrl, boolean isUber) { this.jobid = jobid; this.setupProgress = setupProgress; this.mapProgress = mapProgress; @@ -161,8 +186,9 @@ public JobStatus(JobID jobid, float setupProgress, float mapProgress, this.jobName = jobName; this.jobFile = jobFile; this.trackingUrl = trackingUrl; + this.isUber = isUber; } - + /** * Sets the map progress of this job @@ -411,6 +437,7 @@ public synchronized void write(DataOutput out) throws IOException { Text.writeString(out, jobName); Text.writeString(out, trackingUrl); Text.writeString(out, jobFile); + out.writeBoolean(isUber); // Serialize the job's ACLs out.writeInt(jobACLs.size()); @@ -438,6 +465,7 @@ public synchronized void readFields(DataInput in) throws IOException { this.jobName = Text.readString(in); this.trackingUrl = Text.readString(in); this.jobFile = Text.readString(in); + this.isUber = in.readBoolean(); // De-serialize the job's ACLs int numACLs = in.readInt(); @@ -562,9 +590,26 @@ public void setNeededMem(int n) { this.neededMem = n; } + /** + * Whether job running in uber mode + * @return job in uber-mode + */ + public synchronized boolean isUber() { + return isUber; + } + + /** + * Set uber-mode flag + * @param isUber Whether job running in uber-mode + */ + public synchronized void setUber(boolean isUber) { + this.isUber = isUber; + } + public String toString() { StringBuffer buffer = new StringBuffer(); buffer.append("job-id : " + jobid); + buffer.append("uber-mode : " + isUber); buffer.append("map-progress : " + mapProgress); buffer.append("reduce-progress : " + reduceProgress); buffer.append("cleanup-progress : " + cleanupProgress); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index dd189e2eaf..621787981d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -150,6 +150,10 @@ public interface MRJobConfig { public static final String NUM_REDUCE_PROFILES = "mapreduce.task.profile.reduces"; + public static final String TASK_MAP_PROFILE_PARAMS = "mapreduce.task.profile.map.params"; + + public static final String TASK_REDUCE_PROFILE_PARAMS = "mapreduce.task.profile.reduce.params"; + public static final String TASK_TIMEOUT = "mapreduce.task.timeout"; public static final String TASK_ID = "mapreduce.task.id"; @@ -298,12 +302,6 @@ public interface MRJobConfig { "mapreduce.job.ubertask.maxreduces"; public static final String JOB_UBERTASK_MAXBYTES = "mapreduce.job.ubertask.maxbytes"; - public static final String UBERTASK_JAVA_OPTS = - "mapreduce.ubertask.child.java.opts"; // or mapreduce.uber.java.opts? - public static final String UBERTASK_ULIMIT = - "mapreduce.ubertask.child.ulimit"; // or mapreduce.uber.ulimit? - public static final String UBERTASK_ENV = - "mapreduce.ubertask.child.env"; // or mapreduce.uber.env? public static final String MR_PREFIX = "yarn.app.mapreduce."; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java index d7c12375b1..b09aaef793 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java @@ -76,6 +76,7 @@ enum GroupType { FRAMEWORK, FILESYSTEM }; TaskCounter.class.getName()); legacyMap.put("org.apache.hadoop.mapred.JobInProgress$Counter", JobCounter.class.getName()); + legacyMap.put("FileSystemCounter", FileSystemCounter.class.getName()); } private final Limits limits = new Limits(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java index 3390fb7be2..a12db54afc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java @@ -353,7 +353,7 @@ private void handleJobSubmittedEvent(JobSubmittedEvent event) { * The class where job information is aggregated into after parsing */ public static class JobInfo { - String errorInfo = "None"; + String errorInfo = ""; long submitTime; long finishTime; JobID jobid; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java index 4ce1c8159d..bdd5bed4f3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.text.ParseException; +import org.apache.hadoop.mapred.Counters.Counter; +import org.apache.hadoop.mapreduce.FileSystemCounter; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.TaskCounter; import org.junit.Test; @@ -102,6 +104,7 @@ public void testLegacyNames() { Counters counters = new Counters(); counters.incrCounter(TaskCounter.MAP_INPUT_RECORDS, 1); counters.incrCounter(JobCounter.DATA_LOCAL_MAPS, 1); + counters.findCounter("file", FileSystemCounter.BYTES_READ).increment(1); assertEquals("New name", 1, counters.findCounter( TaskCounter.class.getName(), "MAP_INPUT_RECORDS").getValue()); @@ -114,6 +117,14 @@ public void testLegacyNames() { assertEquals("Legacy name", 1, counters.findCounter( "org.apache.hadoop.mapred.JobInProgress$Counter", "DATA_LOCAL_MAPS").getValue()); + + assertEquals("New name", 1, counters.findCounter( + FileSystemCounter.class.getName(), "FILE_BYTES_READ").getValue()); + assertEquals("New name and method", 1, counters.findCounter("file", + FileSystemCounter.BYTES_READ).getValue()); + assertEquals("Legacy name", 1, counters.findCounter( + "FileSystemCounter", + "FILE_BYTES_READ").getValue()); } public static void main(String[] args) throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobQueueClient.java index 636b12a89d..491516995f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobQueueClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobQueueClient.java @@ -45,7 +45,7 @@ public void testPrintJobQueueInfo() throws IOException { ByteArrayOutputStream bbos = new ByteArrayOutputStream(); PrintWriter writer = new PrintWriter(bbos); - queueClient.printJobQueueInfo(parent, writer, ""); + queueClient.printJobQueueInfo(parent, writer); Assert.assertTrue("printJobQueueInfo did not print grandchild's name", bbos.toString().contains("GrandChildQueue")); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java index f18cbe3318..7121620906 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobMonitorAndPrint.java @@ -63,17 +63,20 @@ public void setUp() throws IOException { when(cluster.getConf()).thenReturn(conf); when(cluster.getClient()).thenReturn(clientProtocol); JobStatus jobStatus = new JobStatus(new JobID("job_000", 1), 0f, 0f, 0f, 0f, - State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); + State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", + "tmp-jobfile", "tmp-url"); job = Job.getInstance(cluster, jobStatus, conf); job = spy(job); } @Test public void testJobMonitorAndPrint() throws Exception { - JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, 0.1f, 0f, - State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); - JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, 1f, 1f, - State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", "tmp-jobfile", "tmp-url"); + JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f, + 0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname", + "tmp-queue", "tmp-jobfile", "tmp-url", true); + JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f, + 1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname", + "tmp-queue", "tmp-jobfile", "tmp-url", true); doAnswer( new Answer() { @@ -102,15 +105,21 @@ public TaskCompletionEvent[] answer(InvocationOnMock invocation) String line; boolean foundHundred = false; boolean foundComplete = false; - String match_1 = "map 100% reduce 100%"; - String match_2 = "completed successfully"; + boolean foundUber = false; + String match_1 = "uber mode : true"; + String match_2 = "map 100% reduce 100%"; + String match_3 = "completed successfully"; while ((line = r.readLine()) != null) { - foundHundred = line.contains(match_1); + if (line.contains(match_1)) { + foundUber = true; + } + foundHundred = line.contains(match_2); if (foundHundred) break; } line = r.readLine(); - foundComplete = line.contains(match_2); + foundComplete = line.contains(match_3); + assertTrue(foundUber); assertTrue(foundHundred); assertTrue(foundComplete); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index ca4ab183ad..c1b308935a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -107,6 +107,7 @@ public CompletedJob(Configuration conf, JobId jobId, Path historyFile, report.setTrackingUrl(JobHistoryUtils.getHistoryUrl(conf, TypeConverter .toYarn(TypeConverter.fromYarn(jobId)).getAppId())); report.setAMInfos(getAMInfos()); + report.setIsUber(isUber()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index e9877736e0..c4af33ad58 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -27,12 +27,11 @@ import java.util.Arrays; import java.util.Collection; -import org.apache.hadoop.ipc.Server; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; @@ -79,14 +78,14 @@ import org.apache.hadoop.yarn.webapp.WebApps; /** - * This module is responsible for talking to the + * This module is responsible for talking to the * JobClient (user facing). * */ public class HistoryClientService extends AbstractService { private static final Log LOG = LogFactory.getLog(HistoryClientService.class); - + private MRClientProtocol protocolHandler; private Server server; private WebApp webApp; @@ -118,22 +117,22 @@ public void start() { server = rpc.getServer(MRClientProtocol.class, protocolHandler, address, conf, null, - conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, + conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT)); - + // Enable service authorization? if (conf.getBoolean( - CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new MRAMPolicyProvider()); } - + server.start(); this.bindAddress = NetUtils.createSocketAddr(hostNameResolved.getHostAddress() + ":" + server.getPort()); LOG.info("Instantiated MRClientService at " + this.bindAddress); - + super.start(); } @@ -141,7 +140,7 @@ private void initializeWebApp(Configuration conf) { webApp = new HsWebApp(history); String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS); - WebApps.$for("jobhistory", this).with(conf).at(bindAddress).start(webApp); + WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).at(bindAddress).start(webApp); } @Override @@ -158,7 +157,7 @@ public void stop() { private class MRClientProtocolHandler implements MRClientProtocol { private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - + private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException { UserGroupInformation loginUgi = null; Job job = null; @@ -194,7 +193,7 @@ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRe response.setCounters(job.getCounters()); return response; } - + @Override public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException { JobId jobId = request.getJobId(); @@ -227,23 +226,23 @@ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(Get JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - + Job job = verifyAndGetJob(jobId); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents))); return response; } - + @Override public KillJobResponse killJob(KillJobRequest request) throws YarnRemoteException { throw RPCUtil.getRemoteException("Invalid operation on completed job"); } - + @Override public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException { throw RPCUtil.getRemoteException("Invalid operation on completed job"); } - + @Override public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException { throw RPCUtil.getRemoteException("Invalid operation on completed job"); @@ -252,15 +251,15 @@ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) t @Override public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws YarnRemoteException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - + Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId()); - + GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); response.addAllDiagnostics(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getDiagnostics()); return response; } - @Override + @Override public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException { throw RPCUtil.getRemoteException("Invalid operation on completed job"); } @@ -269,7 +268,7 @@ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) t public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws YarnRemoteException { JobId jobId = request.getJobId(); TaskType taskType = request.getTaskType(); - + GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); Job job = verifyAndGetJob(jobId); Collection tasks = job.getTasks(taskType).values(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java index 4c9f667b5b..0542765c2a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsAboutPage.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; -import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -45,8 +45,9 @@ public class HsAboutPage extends HsView { * @return AttemptsBlock.class */ @Override protected Class content() { + HistoryInfo info = new HistoryInfo(); info("History Server"). - _("BuildVersion", VersionInfo.getBuildVersion()); + _("BuildVersion", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn()); return InfoBlock.class; } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java index bf6c8450bb..972b295cae 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java @@ -34,6 +34,9 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.security.authorize.AccessControlList; @@ -56,19 +59,6 @@ public class HsJobBlock extends HtmlBlock { final AppContext appContext; - int killedMapAttempts = 0; - int failedMapAttempts = 0; - int successfulMapAttempts = 0; - int killedReduceAttempts = 0; - int failedReduceAttempts = 0; - int successfulReduceAttempts = 0; - long avgMapTime = 0; - long avgReduceTime = 0; - long avgShuffleTime = 0; - long avgSortTime = 0; - int numMaps; - int numReduces; - @Inject HsJobBlock(AppContext appctx) { appContext = appctx; } @@ -85,37 +75,30 @@ public class HsJobBlock extends HtmlBlock { return; } JobId jobID = MRApps.toJobID(jid); - Job job = appContext.getJob(jobID); - if (job == null) { + Job j = appContext.getJob(jobID); + if (j == null) { html. p()._("Sorry, ", jid, " not found.")._(); return; } - Map acls = job.getJobACLs(); - List amInfos = job.getAMInfos(); - JobReport jobReport = job.getReport(); - int mapTasks = job.getTotalMaps(); - int mapTasksComplete = job.getCompletedMaps(); - int reduceTasks = job.getTotalReduces(); - int reducesTasksComplete = job.getCompletedReduces(); - long startTime = jobReport.getStartTime(); - long finishTime = jobReport.getFinishTime(); - countTasksAndAttempts(job); + List amInfos = j.getAMInfos(); + JobInfo job = new JobInfo(j); ResponseInfo infoBlock = info("Job Overview"). _("Job Name:", job.getName()). _("User Name:", job.getUserName()). _("Queue:", job.getQueueName()). _("State:", job.getState()). _("Uberized:", job.isUber()). - _("Started:", new Date(startTime)). - _("Finished:", new Date(finishTime)). + _("Started:", new Date(job.getStartTime())). + _("Finished:", new Date(job.getFinishTime())). _("Elapsed:", StringUtils.formatTime( - Times.elapsed(startTime, finishTime, false))); + Times.elapsed(job.getStartTime(), job.getFinishTime(), false))); String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters"; - List diagnostics = job.getDiagnostics(); + // todo - switch to use JobInfo + List diagnostics = j.getDiagnostics(); if(diagnostics != null && !diagnostics.isEmpty()) { StringBuffer b = new StringBuffer(); for(String diag: diagnostics) { @@ -124,18 +107,17 @@ public class HsJobBlock extends HtmlBlock { infoBlock._("Diagnostics:", b.toString()); } - if(numMaps > 0) { - infoBlock._("Average Map Time", StringUtils.formatTime(avgMapTime)); + if(job.getNumMaps() > 0) { + infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime())); } - if(numReduces > 0) { - infoBlock._("Average Reduce Time", StringUtils.formatTime(avgReduceTime)); - infoBlock._("Average Shuffle Time", StringUtils.formatTime(avgShuffleTime)); - infoBlock._("Average Merge Time", StringUtils.formatTime(avgSortTime)); + if(job.getNumReduces() > 0) { + infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime())); + infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime())); + infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime())); } - for(Map.Entry entry : acls.entrySet()) { - infoBlock._("ACL "+entry.getKey().getAclName()+":", - entry.getValue().getAclString()); + for (ConfEntryInfo entry : job.getAcls()) { + infoBlock._("ACL "+entry.getName()+":", entry.getValue()); } DIV div = html. _(InfoBlock.class). @@ -154,18 +136,14 @@ public class HsJobBlock extends HtmlBlock { th(_TH, "Logs"). _(); for (AMInfo amInfo : amInfos) { - String nodeHttpAddress = amInfo.getNodeManagerHost() + - ":" + amInfo.getNodeManagerHttpPort(); - NodeId nodeId = BuilderUtils.newNodeId( - amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort()); - + AMAttemptInfo attempt = new AMAttemptInfo(amInfo, + job.getId(), job.getUserName(), "", ""); table.tr(). - td(String.valueOf(amInfo.getAppAttemptId().getAttemptId())). - td(new Date(amInfo.getStartTime()).toString()). - td().a(".nodelink", url("http://", nodeHttpAddress), - nodeHttpAddress)._(). - td().a(".logslink", url("logs", nodeId.toString(), - amInfo.getContainerId().toString(), jid, job.getUserName()), + td(String.valueOf(attempt.getAttemptId())). + td(new Date(attempt.getStartTime()).toString()). + td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), + attempt.getNodeHttpAddress())._(). + td().a(".logslink", url(attempt.getShortLogsLink()), "logs")._(). _(); } @@ -184,13 +162,13 @@ public class HsJobBlock extends HtmlBlock { tr(_ODD). th(). a(url("tasks", jid, "m"), "Map")._(). - td(String.valueOf(mapTasks)). - td(String.valueOf(mapTasksComplete))._(). + td(String.valueOf(String.valueOf(job.getMapsTotal()))). + td(String.valueOf(String.valueOf(job.getMapsCompleted())))._(). tr(_EVEN). th(). a(url("tasks", jid, "r"), "Reduce")._(). - td(String.valueOf(reduceTasks)). - td(String.valueOf(reducesTasksComplete))._() + td(String.valueOf(String.valueOf(job.getReducesTotal()))). + td(String.valueOf(String.valueOf(job.getReducesCompleted())))._() ._(). // Attempts table @@ -204,99 +182,27 @@ public class HsJobBlock extends HtmlBlock { th("Maps"). td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(failedMapAttempts))._(). + String.valueOf(job.getFailedMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(killedMapAttempts))._(). + String.valueOf(job.getKilledMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(successfulMapAttempts))._(). + String.valueOf(job.getSuccessfulMapAttempts()))._(). _(). tr(_EVEN). th("Reduces"). td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), - String.valueOf(failedReduceAttempts))._(). + String.valueOf(job.getFailedReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), - String.valueOf(killedReduceAttempts))._(). + String.valueOf(job.getKilledReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), - String.valueOf(successfulReduceAttempts))._(). + String.valueOf(job.getSuccessfulReduceAttempts()))._(). _(). _(). _(); } - - /** - * Go through a job and update the member variables with counts for - * information to output in the page. - * @param job the job to get counts for. - */ - private void countTasksAndAttempts(Job job) { - numReduces = 0; - numMaps = 0; - Map tasks = job.getTasks(); - for (Task task : tasks.values()) { - // Attempts counts - Map attempts = task.getAttempts(); - for (TaskAttempt attempt : attempts.values()) { - - int successful = 0, failed = 0, killed =0; - - if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { - //Do Nothing - } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt - .getState())) { - //Do Nothing - } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt - .getState())) { - ++successful; - } else if (TaskAttemptStateUI.FAILED - .correspondsTo(attempt.getState())) { - ++failed; - } else if (TaskAttemptStateUI.KILLED - .correspondsTo(attempt.getState())) { - ++killed; - } - - switch (task.getType()) { - case MAP: - successfulMapAttempts += successful; - failedMapAttempts += failed; - killedMapAttempts += killed; - if(attempt.getState() == TaskAttemptState.SUCCEEDED) { - numMaps++; - avgMapTime += (attempt.getFinishTime() - - attempt.getLaunchTime()); - } - break; - case REDUCE: - successfulReduceAttempts += successful; - failedReduceAttempts += failed; - killedReduceAttempts += killed; - if(attempt.getState() == TaskAttemptState.SUCCEEDED) { - numReduces++; - avgShuffleTime += (attempt.getShuffleFinishTime() - - attempt.getLaunchTime()); - avgSortTime += attempt.getSortFinishTime() - - attempt.getLaunchTime(); - avgReduceTime += (attempt.getFinishTime() - - attempt.getShuffleFinishTime()); - } - break; - } - } - } - - if(numMaps > 0) { - avgMapTime = avgMapTime / numMaps; - } - - if(numReduces > 0) { - avgReduceTime = avgReduceTime / numReduces; - avgShuffleTime = avgShuffleTime / numReduces; - avgSortTime = avgSortTime / numReduces; - } - } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java index 0a6b9692a4..a6aa4cb66d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobsBlock.java @@ -21,10 +21,9 @@ import java.text.SimpleDateFormat; import java.util.Date; -import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; -import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; @@ -38,8 +37,8 @@ */ public class HsJobsBlock extends HtmlBlock { final AppContext appContext; - static final SimpleDateFormat dateFormat = - new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z"); + static final SimpleDateFormat dateFormat = + new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z"); @Inject HsJobsBlock(AppContext appCtx) { appContext = appCtx; @@ -68,28 +67,21 @@ public class HsJobsBlock extends HtmlBlock { th("Reduces Completed")._()._(). tbody(); LOG.info("Getting list of all Jobs."); - for (Job job : appContext.getAllJobs().values()) { - String jobID = MRApps.toString(job.getID()); - JobReport report = job.getReport(); - String mapsTotal = String.valueOf(job.getTotalMaps()); - String mapsCompleted = String.valueOf(job.getCompletedMaps()); - String reduceTotal = String.valueOf(job.getTotalReduces()); - String reduceCompleted = String.valueOf(job.getCompletedReduces()); - long startTime = report.getStartTime(); - long finishTime = report.getFinishTime(); + for (Job j : appContext.getAllJobs().values()) { + JobInfo job = new JobInfo(j); tbody. tr(). - td(dateFormat.format(new Date(startTime))). - td(dateFormat.format(new Date(finishTime))). - td().a(url("job", jobID), jobID)._(). - td(job.getName().toString()). + td(dateFormat.format(new Date(job.getStartTime()))). + td(dateFormat.format(new Date(job.getFinishTime()))). + td().a(url("job", job.getId()), job.getId())._(). + td(job.getName()). td(job.getUserName()). td(job.getQueueName()). - td(job.getState().toString()). - td(mapsTotal). - td(mapsCompleted). - td(reduceTotal). - td(reduceCompleted)._(); + td(job.getState()). + td(String.valueOf(job.getMapsTotal())). + td(String.valueOf(job.getMapsCompleted())). + td(String.valueOf(job.getReducesTotal())). + td(String.valueOf(job.getReducesCompleted()))._(); } tbody._(). tfoot(). diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java index 54ddfbae95..a51d9884b7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java @@ -20,12 +20,13 @@ import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE; -import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; -import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.App; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.util.Times; @@ -65,7 +66,7 @@ public class HsTasksBlock extends HtmlBlock { if (!symbol.isEmpty()) { type = MRApps.taskType(symbol); } - + THEAD> thead = html.table("#tasks").thead(); //Create the spanning row int attemptColSpan = type == TaskType.REDUCE ? 8 : 3; @@ -74,7 +75,7 @@ public class HsTasksBlock extends HtmlBlock { th().$colspan(attemptColSpan).$class("ui-state-default"). _("Successful Attempt")._(). _(); - + TR>> theadRow = thead. tr(). th("Name"). @@ -83,33 +84,33 @@ public class HsTasksBlock extends HtmlBlock { th("Finish Time"). th("Elapsed Time"). th("Start Time"); //Attempt - + if(type == TaskType.REDUCE) { theadRow.th("Shuffle Finish Time"); //Attempt theadRow.th("Merge Finish Time"); //Attempt } - + theadRow.th("Finish Time"); //Attempt - + if(type == TaskType.REDUCE) { theadRow.th("Elapsed Time Shuffle"); //Attempt theadRow.th("Elapsed Time Merge"); //Attempt theadRow.th("Elapsed Time Reduce"); //Attempt } theadRow.th("Elapsed Time"); //Attempt - + TBODY> tbody = theadRow._()._().tbody(); for (Task task : app.getJob().getTasks().values()) { if (type != null && task.getType() != type) { continue; } - String tid = MRApps.toString(task.getID()); - - TaskReport report = task.getReport(); - long startTime = report.getStartTime(); - long finishTime = report.getFinishTime(); - long elapsed = Times.elapsed(startTime, finishTime, false); - + TaskInfo info = new TaskInfo(task); + String tid = info.getId(); + + long startTime = info.getStartTime(); + long finishTime = info.getFinishTime(); + long elapsed = info.getElapsedTime(); + long attemptStartTime = -1; long shuffleFinishTime = -1; long sortFinishTime = -1; @@ -118,30 +119,31 @@ public class HsTasksBlock extends HtmlBlock { long elapsedSortTime = -1;; long elapsedReduceTime = -1; long attemptElapsed = -1; - TaskAttempt successful = getSuccessfulAttempt(task); + TaskAttempt successful = info.getSuccessful(); if(successful != null) { - attemptStartTime = successful.getLaunchTime(); - attemptFinishTime = successful.getFinishTime(); + TaskAttemptInfo ta; if(type == TaskType.REDUCE) { - shuffleFinishTime = successful.getShuffleFinishTime(); - sortFinishTime = successful.getSortFinishTime(); - elapsedShuffleTime = - Times.elapsed(attemptStartTime, shuffleFinishTime, false); - elapsedSortTime = - Times.elapsed(shuffleFinishTime, sortFinishTime, false); - elapsedReduceTime = - Times.elapsed(sortFinishTime, attemptFinishTime, false); + ReduceTaskAttemptInfo rta = new ReduceTaskAttemptInfo(successful, type); + shuffleFinishTime = rta.getShuffleFinishTime(); + sortFinishTime = rta.getMergeFinishTime(); + elapsedShuffleTime = rta.getElapsedShuffleTime(); + elapsedSortTime = rta.getElapsedMergeTime(); + elapsedReduceTime = rta.getElapsedReduceTime(); + ta = rta; + } else { + ta = new TaskAttemptInfo(successful, type, false); } - attemptElapsed = - Times.elapsed(attemptStartTime, attemptFinishTime, false); + attemptStartTime = ta.getStartTime(); + attemptFinishTime = ta.getFinishTime(); + attemptElapsed = ta.getElapsedTime(); } - + TR>> row = tbody.tr(); row. td(). - br().$title(String.valueOf(task.getID().getId()))._(). // sorting + br().$title(String.valueOf(info.getTaskNum()))._(). // sorting a(url("task", tid), tid)._(). - td(report.getTaskState().toString()). + td(info.getState()). td(). br().$title(String.valueOf(startTime))._(). _(Times.format(startTime))._(). @@ -166,7 +168,7 @@ public class HsTasksBlock extends HtmlBlock { td(). br().$title(String.valueOf(attemptFinishTime))._(). _(Times.format(attemptFinishTime))._(); - + if(type == TaskType.REDUCE) { row.td(). br().$title(String.valueOf(elapsedShuffleTime))._(). @@ -178,7 +180,7 @@ public class HsTasksBlock extends HtmlBlock { br().$title(String.valueOf(elapsedReduceTime))._(). _(formatTime(elapsedReduceTime))._(); } - + row.td(). br().$title(String.valueOf(attemptElapsed))._(). _(formatTime(attemptElapsed))._(); @@ -194,7 +196,7 @@ public class HsTasksBlock extends HtmlBlock { .$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._() ._().th().input("search_init").$type(InputType.text) .$name("attempt_start_time").$value("Start Time")._()._(); - + if(type == TaskType.REDUCE) { footRow.th().input("search_init").$type(InputType.text) .$name("shuffle_time").$value("Shuffle Time")._()._(); @@ -216,20 +218,12 @@ public class HsTasksBlock extends HtmlBlock { footRow.th().input("search_init").$type(InputType.text) .$name("attempt_elapsed").$value("Elapsed Time")._()._(); - + footRow._()._()._(); } private String formatTime(long elapsed) { return elapsed < 0 ? "N/A" : StringUtils.formatTime(elapsed); } - - private TaskAttempt getSuccessfulAttempt(Task task) { - for(TaskAttempt attempt: task.getAttempts().values()) { - if(attempt.getState() == TaskAttemptState.SUCCEEDED) { - return attempt; - } - } - return null; - } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java index a9f08c5dc0..71f1e30a28 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebApp.java @@ -27,6 +27,7 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams; import org.apache.hadoop.mapreduce.v2.hs.HistoryContext; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; public class HsWebApp extends WebApp implements AMParams { @@ -39,6 +40,9 @@ public HsWebApp(HistoryContext history) { @Override public void setup() { + bind(HsWebServices.class); + bind(JAXBContextResolver.class); + bind(GenericExceptionHandler.class); bind(AppContext.class).toInstance(history); route("/", HsController.class); route("/app", HsController.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java new file mode 100644 index 0000000000..8463f27d94 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java @@ -0,0 +1,469 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.hs.webapp; + +import java.io.IOException; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.UriInfo; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; +import org.apache.hadoop.mapreduce.v2.api.records.JobId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskType; +import org.apache.hadoop.mapreduce.v2.app.AppContext; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.yarn.YarnException; +import org.apache.hadoop.yarn.webapp.BadRequestException; +import org.apache.hadoop.yarn.webapp.NotFoundException; +import org.apache.hadoop.yarn.webapp.WebApp; + +import com.google.inject.Inject; + +@Path("/ws/v1/history") +public class HsWebServices { + private final AppContext appCtx; + private WebApp webapp; + private final Configuration conf; + + @Context + UriInfo uriInfo; + + @Inject + public HsWebServices(final AppContext appCtx, final Configuration conf, + final WebApp webapp) { + this.appCtx = appCtx; + this.conf = conf; + this.webapp = webapp; + } + + @GET + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public HistoryInfo get() { + return getHistoryInfo(); + } + + @GET + @Path("/info") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public HistoryInfo getHistoryInfo() { + return new HistoryInfo(); + } + + @GET + @Path("/mapreduce/jobs") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobsInfo getJobs(@QueryParam("user") String userQuery, + @QueryParam("limit") String count, + @QueryParam("queue") String queueQuery, + @QueryParam("startedTimeBegin") String startedBegin, + @QueryParam("startedTimeEnd") String startedEnd, + @QueryParam("finishedTimeBegin") String finishBegin, + @QueryParam("finishedTimeEnd") String finishEnd) { + JobsInfo allJobs = new JobsInfo(); + long num = 0; + boolean checkCount = false; + boolean checkStart = false; + boolean checkEnd = false; + long countNum = 0; + + // set values suitable in case both of begin/end not specified + long sBegin = 0; + long sEnd = Long.MAX_VALUE; + long fBegin = 0; + long fEnd = Long.MAX_VALUE; + + if (count != null && !count.isEmpty()) { + checkCount = true; + try { + countNum = Long.parseLong(count); + } catch (NumberFormatException e) { + throw new BadRequestException(e.getMessage()); + } + if (countNum <= 0) { + throw new BadRequestException("limit value must be greater then 0"); + } + } + + if (startedBegin != null && !startedBegin.isEmpty()) { + checkStart = true; + try { + sBegin = Long.parseLong(startedBegin); + } catch (NumberFormatException e) { + throw new BadRequestException(e.getMessage()); + } + if (sBegin < 0) { + throw new BadRequestException("startedTimeBegin must be greater than 0"); + } + } + if (startedEnd != null && !startedEnd.isEmpty()) { + checkStart = true; + try { + sEnd = Long.parseLong(startedEnd); + } catch (NumberFormatException e) { + throw new BadRequestException(e.getMessage()); + } + if (sEnd < 0) { + throw new BadRequestException("startedTimeEnd must be greater than 0"); + } + } + if (sBegin > sEnd) { + throw new BadRequestException( + "startedTimeEnd must be greater than startTimeBegin"); + } + + if (finishBegin != null && !finishBegin.isEmpty()) { + checkEnd = true; + try { + fBegin = Long.parseLong(finishBegin); + } catch (NumberFormatException e) { + throw new BadRequestException(e.getMessage()); + } + if (fBegin < 0) { + throw new BadRequestException("finishTimeBegin must be greater than 0"); + } + } + if (finishEnd != null && !finishEnd.isEmpty()) { + checkEnd = true; + try { + fEnd = Long.parseLong(finishEnd); + } catch (NumberFormatException e) { + throw new BadRequestException(e.getMessage()); + } + if (fEnd < 0) { + throw new BadRequestException("finishTimeEnd must be greater than 0"); + } + } + if (fBegin > fEnd) { + throw new BadRequestException( + "finishTimeEnd must be greater than finishTimeBegin"); + } + + for (Job job : appCtx.getAllJobs().values()) { + if (checkCount && num == countNum) { + break; + } + + // getAllJobs only gives you a partial we want a full + Job fullJob = appCtx.getJob(job.getID()); + if (fullJob == null) { + continue; + } + + JobInfo jobInfo = new JobInfo(fullJob); + // can't really validate queue is a valid one since queues could change + if (queueQuery != null && !queueQuery.isEmpty()) { + if (!jobInfo.getQueueName().equals(queueQuery)) { + continue; + } + } + + if (userQuery != null && !userQuery.isEmpty()) { + if (!jobInfo.getName().equals(userQuery)) { + continue; + } + } + + if (checkStart + && (jobInfo.getStartTime() < sBegin || jobInfo.getStartTime() > sEnd)) { + continue; + } + if (checkEnd + && (jobInfo.getFinishTime() < fBegin || jobInfo.getFinishTime() > fEnd)) { + continue; + } + + allJobs.add(jobInfo); + num++; + } + return allJobs; + } + + @GET + @Path("/mapreduce/jobs/{jobid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobInfo getJob(@PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + return new JobInfo(job); + } + + @GET + @Path("/mapreduce/jobs/{jobid}/attempts") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + AMAttemptsInfo amAttempts = new AMAttemptsInfo(); + for (AMInfo amInfo : job.getAMInfos()) { + AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(job + .getID()), job.getUserName(), uriInfo.getBaseUri().toString(), + webapp.name()); + amAttempts.add(attempt); + } + return amAttempts; + } + + @GET + @Path("/mapreduce/jobs/{jobid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobCounterInfo getJobCounters(@PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + return new JobCounterInfo(this.appCtx, job); + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobTaskCounterInfo getSingleTaskCounters( + @PathParam("jobid") String jid, @PathParam("taskid") String tid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = this.appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + return new JobTaskCounterInfo(task); + } + + @GET + @Path("/mapreduce/jobs/{jobid}/conf") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ConfInfo getJobConf(@PathParam("jobid") String jid) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + ConfInfo info; + try { + info = new ConfInfo(job, this.conf); + } catch (IOException e) { + throw new NotFoundException("unable to load configuration for job: " + + jid); + } + + return info; + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TasksInfo getJobTasks(@PathParam("jobid") String jid, + @QueryParam("type") String type) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TasksInfo allTasks = new TasksInfo(); + for (Task task : job.getTasks().values()) { + TaskType ttype = null; + if (type != null && !type.isEmpty()) { + try { + ttype = MRApps.taskType(type); + } catch (YarnException e) { + throw new BadRequestException("tasktype must be either m or r"); + } + } + if (ttype != null && task.getType() != ttype) { + continue; + } + allTasks.add(new TaskInfo(task)); + } + return allTasks; + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks/{taskid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskInfo getJobTask(@PathParam("jobid") String jid, + @PathParam("taskid") String tid) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + return new TaskInfo(task); + + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskAttemptsInfo getJobTaskAttempts(@PathParam("jobid") String jid, + @PathParam("taskid") String tid) { + TaskAttemptsInfo attempts = new TaskAttemptsInfo(); + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + for (TaskAttempt ta : task.getAttempts().values()) { + if (ta != null) { + if (task.getType() == TaskType.REDUCE) { + attempts.add(new ReduceTaskAttemptInfo(ta, task.getType())); + } else { + attempts.add(new TaskAttemptInfo(ta, task.getType(), false)); + } + } + } + return attempts; + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public TaskAttemptInfo getJobTaskAttemptId(@PathParam("jobid") String jid, + @PathParam("taskid") String tid, @PathParam("attemptid") String attId) { + Job job = this.appCtx.getJob(MRApps.toJobID(jid)); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId); + if (attemptId == null) { + throw new NotFoundException("task attempt id " + attId + + " not found or invalid"); + } + TaskAttempt ta = task.getAttempt(attemptId); + if (ta == null) { + throw new NotFoundException("Error getting info on task attempt id " + + attId); + } + if (task.getType() == TaskType.REDUCE) { + return new ReduceTaskAttemptInfo(ta, task.getType()); + } else { + return new TaskAttemptInfo(ta, task.getType(), false); + } + } + + @GET + @Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters( + @PathParam("jobid") String jid, @PathParam("taskid") String tid, + @PathParam("attemptid") String attId) { + JobId jobId = MRApps.toJobID(jid); + if (jobId == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + Job job = this.appCtx.getJob(jobId); + if (job == null) { + throw new NotFoundException("job, " + jid + ", is not found"); + } + TaskId taskID = MRApps.toTaskID(tid); + if (taskID == null) { + throw new NotFoundException("taskid " + tid + " not found or invalid"); + } + Task task = job.getTask(taskID); + if (task == null) { + throw new NotFoundException("task not found with id " + tid); + } + TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId); + if (attemptId == null) { + throw new NotFoundException("task attempt id " + attId + + " not found or invalid"); + } + TaskAttempt ta = task.getAttempt(attemptId); + if (ta == null) { + throw new NotFoundException("Error getting info on task attempt id " + + attId); + } + return new JobTaskAttemptCounterInfo(ta); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/JAXBContextResolver.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/JAXBContextResolver.java new file mode 100644 index 0000000000..13c557e80b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/JAXBContextResolver.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.hs.webapp; + +import com.google.inject.Singleton; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; +import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo; + +@Singleton +@Provider +public class JAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = { HistoryInfo.class, JobInfo.class, + JobsInfo.class, TasksInfo.class, TaskAttemptsInfo.class, ConfInfo.class, + CounterInfo.class, JobTaskCounterInfo.class, + JobTaskAttemptCounterInfo.class, + TaskCounterInfo.class, JobCounterInfo.class, ReduceTaskAttemptInfo.class, + TaskAttemptInfo.class, TaskAttemptsInfo.class, CounterGroupInfo.class, + TaskCounterGroupInfo.class, + AMAttemptInfo.class, AMAttemptsInfo.class}; + + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + this.context = new JSONJAXBContext(JSONConfiguration.natural() + .rootUnwrapping(false).build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java new file mode 100644 index 0000000000..d4cf3686b5 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.hs.webapp.dao; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.ujoin; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.util.BuilderUtils; + +@XmlRootElement(name = "amAttempt") +@XmlAccessorType(XmlAccessType.FIELD) +public class AMAttemptInfo { + + protected String nodeHttpAddress; + protected String nodeId; + protected int id; + protected long startTime; + protected String containerId; + protected String logsLink; + + @XmlTransient + protected String shortLogsLink; + + public AMAttemptInfo() { + } + + public AMAttemptInfo(AMInfo amInfo, String jobId, String user, String host, + String pathPrefix) { + this.nodeHttpAddress = amInfo.getNodeManagerHost() + ":" + + amInfo.getNodeManagerHttpPort(); + NodeId nodeId = BuilderUtils.newNodeId(amInfo.getNodeManagerHost(), + amInfo.getNodeManagerPort()); + this.nodeId = nodeId.toString(); + this.id = amInfo.getAppAttemptId().getAttemptId(); + this.startTime = amInfo.getStartTime(); + this.containerId = amInfo.getContainerId().toString(); + this.logsLink = join( + host, + pathPrefix, + ujoin("logs", nodeId.toString(), amInfo.getContainerId().toString(), + jobId, user)); + this.shortLogsLink = ujoin("logs", nodeId.toString(), amInfo + .getContainerId().toString(), jobId, user); + } + + public String getNodeHttpAddress() { + return this.nodeHttpAddress; + } + + public String getNodeId() { + return this.nodeId; + } + + public int getAttemptId() { + return this.id; + } + + public long getStartTime() { + return this.startTime; + } + + public String getContainerId() { + return this.containerId; + } + + public String getLogsLink() { + return this.logsLink; + } + + public String getShortLogsLink() { + return this.shortLogsLink; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java new file mode 100644 index 0000000000..ee092b8c44 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.hs.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "attempts") +@XmlAccessorType(XmlAccessType.FIELD) +public class AMAttemptsInfo { + + protected ArrayList attempt = new ArrayList(); + + public AMAttemptsInfo() { + } // JAXB needs this + + public void add(AMAttemptInfo info) { + this.attempt.add(info); + } + + public ArrayList getAttempts() { + return this.attempt; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/HistoryInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/HistoryInfo.java new file mode 100644 index 0000000000..54cff30266 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/HistoryInfo.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.hs.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.util.VersionInfo; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class HistoryInfo { + + protected String hadoopVersion; + protected String hadoopBuildVersion; + protected String hadoopVersionBuiltOn; + + public HistoryInfo() { + this.hadoopVersion = VersionInfo.getVersion(); + this.hadoopBuildVersion = VersionInfo.getBuildVersion(); + this.hadoopVersionBuiltOn = VersionInfo.getDate(); + } + + public String getHadoopVersion() { + return this.hadoopVersion; + } + + public String getHadoopBuildVersion() { + return this.hadoopBuildVersion; + } + + public String getHadoopVersionBuiltOn() { + return this.hadoopVersionBuiltOn; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java new file mode 100644 index 0000000000..bcc9d31b40 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobInfo.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.hs.webapp.dao; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.v2.api.records.JobReport; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; +import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; +import org.apache.hadoop.mapreduce.v2.api.records.TaskId; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.Task; +import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; +import org.apache.hadoop.mapreduce.v2.hs.CompletedJob; +import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; +import org.apache.hadoop.security.authorize.AccessControlList; + +@XmlRootElement(name = "job") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobInfo { + + protected long startTime; + protected long finishTime; + protected String id; + protected String name; + protected String queue; + protected String user; + protected String state; + protected int mapsTotal; + protected int mapsCompleted; + protected int reducesTotal; + protected int reducesCompleted; + protected boolean uberized; + protected String diagnostics; + protected long avgMapTime = 0; + protected long avgReduceTime = 0; + protected long avgShuffleTime = 0; + protected long avgMergeTime = 0; + protected int failedReduceAttempts = 0; + protected int killedReduceAttempts = 0; + protected int successfulReduceAttempts = 0; + protected int failedMapAttempts = 0; + protected int killedMapAttempts = 0; + protected int successfulMapAttempts = 0; + protected ArrayList acls; + + @XmlTransient + protected int numMaps; + @XmlTransient + protected int numReduces; + + public JobInfo() { + } + + public JobInfo(Job job) { + this.id = MRApps.toString(job.getID()); + JobReport report = job.getReport(); + countTasksAndAttempts(job); + this.mapsTotal = job.getTotalMaps(); + this.mapsCompleted = job.getCompletedMaps(); + this.reducesTotal = job.getTotalReduces(); + this.reducesCompleted = job.getCompletedReduces(); + this.startTime = report.getStartTime(); + this.finishTime = report.getFinishTime(); + this.name = job.getName().toString(); + this.queue = job.getQueueName(); + this.user = job.getUserName(); + this.state = job.getState().toString(); + this.uberized = job.isUber(); + List diagnostics = job.getDiagnostics(); + if (diagnostics != null && !diagnostics.isEmpty()) { + StringBuffer b = new StringBuffer(); + for (String diag : diagnostics) { + b.append(diag); + } + this.diagnostics = b.toString(); + } + + this.acls = new ArrayList(); + if (job instanceof CompletedJob) { + Map allacls = job.getJobACLs(); + if (allacls != null) { + for (Map.Entry entry : allacls.entrySet()) { + this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry + .getValue().getAclString())); + } + } + } + } + + public long getNumMaps() { + return numMaps; + } + + public long getNumReduces() { + return numReduces; + } + + public long getAvgMapTime() { + return avgMapTime; + } + + public long getAvgReduceTime() { + return avgReduceTime; + } + + public long getAvgShuffleTime() { + return avgShuffleTime; + } + + public long getAvgMergeTime() { + return avgMergeTime; + } + + public long getFailedReduceAttempts() { + return failedReduceAttempts; + } + + public long getKilledReduceAttempts() { + return killedReduceAttempts; + } + + public long getSuccessfulReduceAttempts() { + return successfulReduceAttempts; + } + + public long getFailedMapAttempts() { + return failedMapAttempts; + } + + public long getKilledMapAttempts() { + return killedMapAttempts; + } + + public long getSuccessfulMapAttempts() { + return successfulMapAttempts; + } + + public ArrayList getAcls() { + return acls; + } + + public int getReducesCompleted() { + return this.reducesCompleted; + } + + public int getReducesTotal() { + return this.reducesTotal; + } + + public int getMapsCompleted() { + return this.mapsCompleted; + } + + public int getMapsTotal() { + return this.mapsTotal; + } + + public String getState() { + return this.state; + } + + public String getUserName() { + return this.user; + } + + public String getName() { + return this.name; + } + + public String getQueueName() { + return this.queue; + } + + public String getId() { + return this.id; + } + + public long getStartTime() { + return this.startTime; + } + + public long getFinishTime() { + return this.finishTime; + } + + public boolean isUber() { + return this.uberized; + } + + public String getDiagnostics() { + return this.diagnostics; + } + + /** + * Go through a job and update the member variables with counts for + * information to output in the page. + * + * @param job + * the job to get counts for. + */ + private void countTasksAndAttempts(Job job) { + numReduces = 0; + numMaps = 0; + final Map tasks = job.getTasks(); + if (tasks == null) { + return; + } + for (Task task : tasks.values()) { + // Attempts counts + Map attempts = task.getAttempts(); + int successful, failed, killed; + for (TaskAttempt attempt : attempts.values()) { + + successful = 0; + failed = 0; + killed = 0; + if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { + // Do Nothing + } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) { + // Do Nothing + } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt + .getState())) { + ++successful; + } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) { + ++failed; + } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) { + ++killed; + } + + switch (task.getType()) { + case MAP: + successfulMapAttempts += successful; + failedMapAttempts += failed; + killedMapAttempts += killed; + if (attempt.getState() == TaskAttemptState.SUCCEEDED) { + numMaps++; + avgMapTime += (attempt.getFinishTime() - attempt.getLaunchTime()); + } + break; + case REDUCE: + successfulReduceAttempts += successful; + failedReduceAttempts += failed; + killedReduceAttempts += killed; + if (attempt.getState() == TaskAttemptState.SUCCEEDED) { + numReduces++; + avgShuffleTime += (attempt.getShuffleFinishTime() - attempt + .getLaunchTime()); + avgMergeTime += attempt.getSortFinishTime() + - attempt.getLaunchTime(); + avgReduceTime += (attempt.getFinishTime() - attempt + .getShuffleFinishTime()); + } + break; + } + } + } + + if (numMaps > 0) { + avgMapTime = avgMapTime / numMaps; + } + + if (numReduces > 0) { + avgReduceTime = avgReduceTime / numReduces; + avgShuffleTime = avgShuffleTime / numReduces; + avgMergeTime = avgMergeTime / numReduces; + } + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobsInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobsInfo.java new file mode 100644 index 0000000000..cb17c77c6b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/JobsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.hs.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "jobs") +@XmlAccessorType(XmlAccessType.FIELD) +public class JobsInfo { + + protected ArrayList job = new ArrayList(); + + public JobsInfo() { + } // JAXB needs this + + public void add(JobInfo jobInfo) { + this.job.add(jobInfo); + } + + public ArrayList getJobs() { + return this.job; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java index c388759a4b..b17cb427d3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java @@ -168,7 +168,7 @@ public void testReconnectOnAMRestart() throws IOException { GetJobReportResponse jobReportResponse1 = mock(GetJobReportResponse.class); when(jobReportResponse1.getJobReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "jobName-firstGen", "user", - JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null)); + JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false)); // First AM returns a report with jobName firstGen and simulates AM shutdown // on second invocation. @@ -180,7 +180,7 @@ public void testReconnectOnAMRestart() throws IOException { GetJobReportResponse jobReportResponse2 = mock(GetJobReportResponse.class); when(jobReportResponse2.getJobReport()).thenReturn( MRBuilderUtils.newJobReport(jobId, "jobName-secondGen", "user", - JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null)); + JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "anything", null, false)); // Second AM generation returns a report with jobName secondGen MRClientProtocol secondGenAMProxy = mock(MRClientProtocol.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java index f2f87b07b6..868c2d5ae3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestUberAM.java @@ -49,6 +49,7 @@ public static void setup() throws IOException { } @Override + @Test public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException { if (mrCluster != null) { @@ -84,6 +85,7 @@ protected void verifySleepJobCounters(Job job) throws InterruptedException, } @Override + @Test public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException { super.testRandomWriter(); @@ -101,6 +103,7 @@ protected void verifyRandomWriterCounters(Job job) } @Override + @Test public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting uberized testFailingMapper()."); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml index 2094a8879b..8eaef23c7e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml @@ -238,7 +238,7 @@ com.google.inject.extensions guice-servlet - 2.0 + 3.0 junit diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro deleted file mode 100644 index d36922e951..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/AMRMProtocol.genavro +++ /dev/null @@ -1,27 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol AMRMProtocol { - - import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - // Scheduler - record Priority { - int priority; - } - - record ResourceRequest { - Priority priority; - string hostName; - Resource capability; - int numContainers; - } - record AMResponse { - boolean reboot; - int responseId; - array containers; - } - - void registerApplicationMaster(ApplicationMaster applicationMaster) throws YarnRemoteException; - void finishApplicationMaster(ApplicationMaster applicationMaster) throws YarnRemoteException; - AMResponse allocate(ApplicationStatus status, array ask, array release) throws YarnRemoteException; - - } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro deleted file mode 100644 index a37fc03085..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ClientRMProtocol.genavro +++ /dev/null @@ -1,45 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol ClientRMProtocol { - - import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - record Priority { - int priority; - } - - record ApplicationSubmissionContext { - ApplicationID applicationId; - union {null, string} applicationName; - Resource masterCapability; // TODO: Needs RM validation - - //all the files required by the container to run the ApplicationMaster - //KEY-> destination dir name - //VALUE-> source path - map resources; - union {null, map} resources_todo; - - // TODO - Remove fsTokens (url encoded) - union {null, array} fsTokens; - union {null, bytes} fsTokens_todo; - - //env to be set before launching the command for ApplicationMaster - //KEY-> env variable name - //VALUE -> env variable value. - map environment; - //command-line of the container that is going to launch the ApplicationMaster. - array command; - union {null, string} queue; - union {null, Priority} priority; - string user; // TODO: Shouldn't pass it like this. - } - - record YarnClusterMetrics { - int numNodeManagers; - } - - ApplicationID getNewApplicationId() throws YarnRemoteException; - ApplicationMaster getApplicationMaster(ApplicationID applicationId) throws YarnRemoteException; - void submitApplication(ApplicationSubmissionContext context) throws YarnRemoteException; - void finishApplication(ApplicationID applicationId) throws YarnRemoteException; - YarnClusterMetrics getClusterMetrics() throws YarnRemoteException; -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro deleted file mode 100644 index 0ba1fb85f8..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/ContainerManager.genavro +++ /dev/null @@ -1,37 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol ContainerManager { - - import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - record ContainerLaunchContext { - ContainerID id; - string user; // TODO: Shouldn't pass it like this. - Resource resource; // TODO: Needs RM validation - union {null, map} resources; - - union {null, bytes} containerTokens; // FileSystem related and other application specific tokens. - union {null, map} serviceData; - - //env to be set before launching the command - //KEY-> env variable name - //VALUE -> env variable value. - map env; - - //commandline to launch the container. All resources are downloaded in the - //working directory of the command. - array command; - } - - record ContainerStatus { - ContainerID containerID; - ContainerState state; - int exitStatus; - } - - void startContainer(ContainerLaunchContext container) throws YarnRemoteException; - void stopContainer(ContainerID containerID) throws YarnRemoteException; - void cleanupContainer(ContainerID containerID) throws YarnRemoteException; - - ContainerStatus getContainerStatus(ContainerID containerID) throws YarnRemoteException; - -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro deleted file mode 100644 index 51d20773a6..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/avro/yarn-types.genavro +++ /dev/null @@ -1,109 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol types { - - record ApplicationID { - int id; - long clusterTimeStamp; - } - - record ContainerID { - ApplicationID appID; // the application id to which this container belong. - int id;// unique string for this application - } - - error YarnRemoteException { - union { null, string } message; - union { null, string } trace; //stackTrace - union { null, YarnRemoteException } cause; - } - - record Resource { - int memory; - //int diskspace; - } - - // State of the container on the ContainerManager. - enum ContainerState { - INTIALIZING, - RUNNING, - COMPLETE - } - - record ContainerToken { - bytes identifier; - bytes password; - string kind; - string service; - } - - record Container { - ContainerID id; - string hostName; - Resource resource; - ContainerState state; - union {ContainerToken, null} containerToken; - } - - enum ApplicationState { - PENDING, - ALLOCATING, - ALLOCATED, - EXPIRED_PENDING, - LAUNCHING, - LAUNCHED, - RUNNING, - PAUSED, - CLEANUP, - COMPLETED, - KILLED, - FAILED - } - - record ApplicationStatus { - int responseID; // TODO: This should be renamed as previousResponseID - ApplicationID applicationId; - float progress; - long lastSeen; - } - - record ApplicationMaster { - ApplicationID applicationId; - union { null, string } host; - int rpcPort; - int httpPort; - ApplicationStatus status; - ApplicationState state; - union { null, string } clientToken; - } - - record URL { - string scheme; - union { null, string } host; - int port; - string file; - } - - enum LocalResourceVisibility { - // accessible to applications from all users - PUBLIC, - // accessible only to applications from the submitting user - PRIVATE, - // accessible only to this application - APPLICATION - } - - enum LocalResourceType { - // an archive to be expanded - ARCHIVE, - // uninterpreted file - FILE - } - - record LocalResource { - URL resource; - long size; - long timestamp; - LocalResourceType type; - LocalResourceVisibility state; - } -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java index a9066070f5..c4d0d78ea5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java @@ -24,7 +24,6 @@ * This is the API for the applications comprising of constants that YARN sets * up for the applications and the containers. * - * TODO: Should also be defined in avro/pb IDLs * TODO: Investigate the semantics and security of each cross-boundary refs. */ public interface ApplicationConstants { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index fa4ffa1656..1133d78816 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -91,12 +91,7 @@ public class YarnConfiguration extends Configuration { public static final String RM_CLIENT_THREAD_COUNT = RM_PREFIX + "client.thread-count"; public static final int DEFAULT_RM_CLIENT_THREAD_COUNT = 10; - - /** The expiry interval for application master reporting.*/ - public static final String RM_AM_EXPIRY_INTERVAL_MS = - RM_PREFIX + "am.liveness-monitor.expiry-interval-ms"; - public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000; - + /** The Kerberos principal for the resource manager.*/ public static final String RM_PRINCIPAL = RM_PREFIX + "principal"; @@ -126,7 +121,17 @@ public class YarnConfiguration extends Configuration { public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8025; public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT; - + + /** The expiry interval for application master reporting.*/ + public static final String RM_AM_EXPIRY_INTERVAL_MS = + YARN_PREFIX + "am.liveness-monitor.expiry-interval-ms"; + public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000; + + /** How long to wait until a node manager is considered dead.*/ + public static final String RM_NM_EXPIRY_INTERVAL_MS = + YARN_PREFIX + "nm.liveness-monitor.expiry-interval-ms"; + public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000; + /** Are acls enabled.*/ public static final String YARN_ACL_ENABLE = YARN_PREFIX + "acl.enable"; @@ -160,12 +165,7 @@ public class YarnConfiguration extends Configuration { /** The keytab for the resource manager.*/ public static final String RM_KEYTAB = RM_PREFIX + "keytab"; - - /** How long to wait until a node manager is considered dead.*/ - public static final String RM_NM_EXPIRY_INTERVAL_MS = - RM_PREFIX + "nm.liveness-monitor.expiry-interval-ms"; - public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000; - + /** How long to wait until a container is considered dead.*/ public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; @@ -293,10 +293,16 @@ public class YarnConfiguration extends Configuration { public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs"; public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs"; + /** Interval at which the delayed token removal thread runs */ + public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = + RM_PREFIX + "delayed.delegation-token.removal-interval-ms"; + public static final long DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = + 30000l; + /** Whether to enable log aggregation */ - public static final String NM_LOG_AGGREGATION_ENABLED = NM_PREFIX + public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX + "log-aggregation-enable"; - public static final boolean DEFAULT_NM_LOG_AGGREGATION_ENABLED = false; + public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false; /** * Number of seconds to retain logs on the NodeManager. Only applicable if Log diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java deleted file mode 100644 index 3ad757da57..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/HadoopYarnRPC.java +++ /dev/null @@ -1,80 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.ipc; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.AvroSpecificRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.yarn.YarnException; - -/** - * This uses Hadoop RPC. Uses a tunnel AvroSpecificRpcEngine over - * Hadoop connection. - * This does not give cross-language wire compatibility, since the Hadoop - * RPC wire format is non-standard, but it does permit use of Avro's protocol - * versioning features for inter-Java RPCs. - */ -public class HadoopYarnRPC extends YarnRPC { - - private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class); - - @Override - public Object getProxy(Class protocol, InetSocketAddress addr, - Configuration conf) { - LOG.debug("Creating a HadoopYarnRpc proxy for protocol " + protocol); - RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class); - try { - return RPC.getProxy(protocol, 1, addr, conf); - } catch (IOException e) { - throw new YarnException(e); - } - } - - @Override - public void stopProxy(Object proxy, Configuration conf) { - RPC.stopProxy(proxy); - } - - @Override - public Server getServer(Class protocol, Object instance, - InetSocketAddress addr, Configuration conf, - SecretManager secretManager, - int numHandlers) { - LOG.debug("Creating a HadoopYarnRpc server for protocol " + protocol + - " with " + numHandlers + " handlers"); - RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class); - final RPC.Server hadoopServer; - try { - hadoopServer = RPC.getServer(protocol, instance, addr.getHostName(), - addr.getPort(), numHandlers, false, conf, secretManager); - } catch (IOException e) { - throw new YarnException(e); - } - return hadoopServer; - } - -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/BadRequestException.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/BadRequestException.java new file mode 100644 index 0000000000..183d06f800 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/BadRequestException.java @@ -0,0 +1,39 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.webapp; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response.Status; + +public class BadRequestException extends WebApplicationException { + + private static final long serialVersionUID = 1L; + + public BadRequestException() { + super(Status.BAD_REQUEST); + } + + public BadRequestException(java.lang.Throwable cause) { + super(cause, Status.BAD_REQUEST); + } + + public BadRequestException(String msg) { + super(new Exception(msg), Status.BAD_REQUEST); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/DefaultWrapperServlet.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/DefaultWrapperServlet.java new file mode 100644 index 0000000000..92f3a6aa31 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/DefaultWrapperServlet.java @@ -0,0 +1,50 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.webapp; + +import java.io.IOException; + +import javax.servlet.RequestDispatcher; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; + +import com.google.inject.Singleton; + +@Singleton +public class DefaultWrapperServlet extends HttpServlet { + +private static final long serialVersionUID = 1L; + +public void doGet(HttpServletRequest req, HttpServletResponse resp) +throws ServletException, IOException { + RequestDispatcher rd = getServletContext().getNamedDispatcher("default"); + + HttpServletRequest wrapped = new HttpServletRequestWrapper(req) { + public String getServletPath() { + return ""; + } + }; + + rd.forward(wrapped, resp); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java new file mode 100644 index 0000000000..8fc886536f --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/GenericExceptionHandler.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.webapp; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Map; +import java.util.TreeMap; + +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.ext.ExceptionMapper; +import javax.ws.rs.ext.Provider; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.mortbay.util.ajax.JSON; + +import com.google.inject.Singleton; + +/** + * Handle webservices jersey exceptions and create json response in the format: + * { "RemoteException" : + * { + * "exception" : , + * "javaClassName" : , + * "message" : + * } + * } + */ +@Singleton +@Provider +public class GenericExceptionHandler implements ExceptionMapper { + public static final Log LOG = LogFactory + .getLog(GenericExceptionHandler.class); + + private @Context + HttpServletResponse response; + + @Override + public Response toResponse(Exception e) { + if (LOG.isTraceEnabled()) { + LOG.trace("GOT EXCEPITION", e); + } + // Don't catch this as filter forward on 404 + // (ServletContainer.FEATURE_FILTER_FORWARD_ON_404) + // won't work and the web UI won't work! + if (e instanceof com.sun.jersey.api.NotFoundException) { + return ((com.sun.jersey.api.NotFoundException) e).getResponse(); + } + // clear content type + response.setContentType(null); + + // Convert exception + if (e instanceof RemoteException) { + e = ((RemoteException) e).unwrapRemoteException(); + } + + // Map response status + final Response.Status s; + if (e instanceof SecurityException) { + s = Response.Status.UNAUTHORIZED; + } else if (e instanceof AuthorizationException) { + s = Response.Status.UNAUTHORIZED; + } else if (e instanceof FileNotFoundException) { + s = Response.Status.NOT_FOUND; + } else if (e instanceof NotFoundException) { + s = Response.Status.NOT_FOUND; + } else if (e instanceof IOException) { + s = Response.Status.NOT_FOUND; + } else if (e instanceof UnsupportedOperationException) { + s = Response.Status.BAD_REQUEST; + } else if (e instanceof IllegalArgumentException) { + s = Response.Status.BAD_REQUEST; + } else if (e instanceof NumberFormatException) { + s = Response.Status.BAD_REQUEST; + } else if (e instanceof BadRequestException) { + s = Response.Status.BAD_REQUEST; + } else { + LOG.warn("INTERNAL_SERVER_ERROR", e); + s = Response.Status.INTERNAL_SERVER_ERROR; + } + + // convert to json + final Map m = new TreeMap(); + m.put("exception", e.getClass().getSimpleName()); + m.put("message", e.getMessage()); + m.put("javaClassName", e.getClass().getName()); + final Map m2 = new TreeMap(); + m2.put(RemoteException.class.getSimpleName(), m); + final String js = JSON.toString(m2); + + return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js) + .build(); + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroSpecificRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/NotFoundException.java similarity index 50% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroSpecificRpcEngine.java rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/NotFoundException.java index 995a13a9c7..d78930e94f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroSpecificRpcEngine.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/NotFoundException.java @@ -16,30 +16,29 @@ * limitations under the License. */ -package org.apache.hadoop.ipc; +package org.apache.hadoop.yarn.webapp; -import java.io.IOException; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response.Status; -import org.apache.avro.ipc.Responder; -import org.apache.avro.ipc.Transceiver; -import org.apache.avro.ipc.specific.SpecificRequestor; -import org.apache.avro.ipc.specific.SpecificResponder; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * AvroRpcEngine which uses Avro's "specific" APIs. The protocols generated - * via Avro IDL needs to use this Engine. +/* + * Created our own NotFoundException because com.sun.jersey.api.NotFoundException + * sets the Response and therefore won't be handled by the GenericExceptionhandler + * to fill in correct response. */ -@InterfaceStability.Evolving -public class AvroSpecificRpcEngine extends AvroRpcEngine { +public class NotFoundException extends WebApplicationException { - protected SpecificRequestor createRequestor(Class protocol, - Transceiver transeiver) throws IOException { - return new SpecificRequestor(protocol, transeiver); + private static final long serialVersionUID = 1L; + + public NotFoundException() { + super(Status.NOT_FOUND); } - protected Responder createResponder(Class iface, Object impl) { - return new SpecificResponder(iface, impl); + public NotFoundException(java.lang.Throwable cause) { + super(cause, Status.NOT_FOUND); } + public NotFoundException(String msg) { + super(new Exception(msg), Status.NOT_FOUND); + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index f83843e97e..198ab3ed21 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -18,23 +18,28 @@ package org.apache.hadoop.yarn.webapp; +import static com.google.common.base.Preconditions.checkNotNull; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.HttpServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import com.google.common.base.CharMatcher; -import static com.google.common.base.Preconditions.*; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import com.google.inject.Provides; import com.google.inject.servlet.GuiceFilter; import com.google.inject.servlet.ServletModule; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpServer; -import org.apache.hadoop.yarn.util.StringHelper; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.sun.jersey.api.core.ResourceConfig; +import com.sun.jersey.core.util.FeaturesAndProperties; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.spi.container.servlet.ServletContainer; /** * @see WebApps for a usage example @@ -45,9 +50,10 @@ public abstract class WebApp extends ServletModule { public enum HTTP { GET, POST, HEAD, PUT, DELETE }; private volatile String name; - private volatile List servePathSpecs = new ArrayList(); + private volatile List servePathSpecs = new ArrayList(); // path to redirect to if user goes to "/" private volatile String redirectPath; + private volatile String wsName; private volatile Configuration conf; private volatile HttpServer httpServer; private volatile GuiceFilter guiceFilter; @@ -104,18 +110,20 @@ public void joinThread() { void addServePathSpec(String path) { this.servePathSpecs.add(path); } - public String[] getServePathSpecs() { + public String[] getServePathSpecs() { return this.servePathSpecs.toArray(new String[this.servePathSpecs.size()]); } /** - * Set a path to redirect the user to if they just go to "/". For - * instance "/" goes to "/yarn/apps". This allows the filters to + * Set a path to redirect the user to if they just go to "/". For + * instance "/" goes to "/yarn/apps". This allows the filters to * more easily differentiate the different webapps. * @param path the path to redirect to */ void setRedirectPath(String path) { this.redirectPath = path; } + void setWebServices (String name) { this.wsName = name; } + public String getRedirectPath() { return this.redirectPath; } void setHostClass(Class cls) { @@ -129,10 +137,32 @@ void setGuiceFilter(GuiceFilter instance) { @Override public void configureServlets() { setup(); + serve("/", "/__stop").with(Dispatcher.class); + for (String path : this.servePathSpecs) { serve(path).with(Dispatcher.class); } + + // Add in the web services filters/serves if app has them. + // Using Jersey/guice integration module. If user has web services + // they must have also bound a default one in their webapp code. + if (this.wsName != null) { + // There seems to be an issue with the guice/jersey integration + // where we have to list the stuff we don't want it to serve + // through the guicecontainer. In this case its everything except + // the the web services api prefix. We can't just change the filter + // from /* below - that doesn't work. + String regex = "(?!/" + this.wsName + ")"; + serveRegex(regex).with(DefaultWrapperServlet.class); + + Map params = new HashMap(); + params.put(ResourceConfig.FEATURE_IMPLICIT_VIEWABLES, "true"); + params.put(ServletContainer.FEATURE_FILTER_FORWARD_ON_404, "true"); + params.put(FeaturesAndProperties.FEATURE_XMLROOTELEMENT_PROCESSING, "true"); + filter("/*").through(GuiceContainer.class, params); + } + } /** diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 22387f067d..425d45bc08 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -72,6 +72,7 @@ static class ServletStruct { } final String name; + final String wsName; final Class api; final T application; String bindAddress = "0.0.0.0"; @@ -82,10 +83,15 @@ static class ServletStruct { private final HashSet servlets = new HashSet(); private final HashMap attributes = new HashMap(); - Builder(String name, Class api, T application) { + Builder(String name, Class api, T application, String wsName) { this.name = name; this.api = api; this.application = application; + this.wsName = wsName; + } + + Builder(String name, Class api, T application) { + this(name, api, application, null); } public Builder at(String bindAddress) { @@ -142,6 +148,7 @@ public void setup() { }; } webapp.setName(name); + webapp.setWebServices(wsName); String basePath = "/" + name; webapp.setRedirectPath(basePath); if (basePath.equals("/")) { @@ -150,6 +157,14 @@ public void setup() { webapp.addServePathSpec(basePath); webapp.addServePathSpec(basePath + "/*"); } + if (wsName != null && !wsName.equals(basePath)) { + if (wsName.equals("/")) { + webapp.addServePathSpec("/*"); + } else { + webapp.addServePathSpec("/" + wsName); + webapp.addServePathSpec("/" + wsName + "/*"); + } + } if (conf == null) { conf = new Configuration(); } @@ -231,6 +246,20 @@ private String inferHostClass() { } } + /** + * Create a new webapp builder. + * @see WebApps for a complete example + * @param application (holding the embedded webapp) type + * @param prefix of the webapp + * @param api the api class for the application + * @param app the application instance + * @param wsPrefix the prefix for the webservice api for this app + * @return a webapp builder + */ + public static Builder $for(String prefix, Class api, T app, String wsPrefix) { + return new Builder(prefix, api, app, wsPrefix); + } + /** * Create a new webapp builder. * @see WebApps for a complete example diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java index 5698e04dc8..766bf867b9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java @@ -53,8 +53,8 @@ protected void render(Block html) { logEntity = containerId.toString(); } - if (!conf.getBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, - YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_ENABLED)) { + if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { html.h1() ._("Aggregation is not enabled. Try the nodemanager at " + nodeId) ._(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index a53175b94d..e007ad6fc6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -58,16 +58,6 @@ public class TestRPC { private static final String EXCEPTION_CAUSE = "exception cause"; private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); -// @Test -// public void testAvroRPC() throws Exception { -// test(AvroYarnRPC.class.getName()); -// } -// -// @Test -// public void testHadoopNativeRPC() throws Exception { -// test(HadoopYarnRPC.class.getName()); -// } - @Test public void testUnknownCall() { Configuration conf = new Configuration(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro deleted file mode 100644 index b1da44fc98..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/avro/ResourceTracker.genavro +++ /dev/null @@ -1,40 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol ResourceTracker { - - import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - // ResourceTracker - record NodeID { - int id; - } - - record NodeHealthStatus { - boolean isNodeHealthy; - union {string, null} healthReport; - long lastHealthReportTime; - } - - record NodeStatus { - NodeID nodeId; - int responseId; - long lastSeen; - map> containers; - NodeHealthStatus nodeHealthStatus; - } - - record RegistrationResponse { - NodeID nodeID; - union {bytes, null} secretKey; - } - - record HeartbeatResponse { - int responseId; - boolean reboot; - array containersToCleanup; - array appplicationsToCleanup; - } - - RegistrationResponse registerNodeManager(string node, org.apache.hadoop.yarn.Resource resource) throws YarnRemoteException; - HeartbeatResponse nodeHeartbeat(NodeStatus nodeStatus) throws YarnRemoteException; - -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java index 7822789eb0..25602663c5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java @@ -19,6 +19,7 @@ import java.util.List; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; @@ -33,6 +34,9 @@ public interface NodeStatus { public abstract void setContainersStatuses( List containersStatuses); + public abstract List getKeepAliveApplications(); + public abstract void setKeepAliveApplications(List appIds); + NodeHealthStatus getNodeHealthStatus(); void setNodeHealthStatus(NodeHealthStatus healthStatus); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java index 03d5e8cdbd..8b5ff01a94 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java @@ -23,13 +23,16 @@ import java.util.Iterator; import java.util.List; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeHealthStatusPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeHealthStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; @@ -37,7 +40,9 @@ import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder; import org.apache.hadoop.yarn.server.api.records.NodeStatus; -public class NodeStatusPBImpl extends ProtoBase implements NodeStatus { + +public class NodeStatusPBImpl extends ProtoBase implements + NodeStatus { NodeStatusProto proto = NodeStatusProto.getDefaultInstance(); NodeStatusProto.Builder builder = null; boolean viaProto = false; @@ -45,6 +50,7 @@ public class NodeStatusPBImpl extends ProtoBase implements Node private NodeId nodeId = null; private List containers = null; private NodeHealthStatus nodeHealthStatus = null; + private List keepAliveApplications = null; public NodeStatusPBImpl() { builder = NodeStatusProto.newBuilder(); @@ -55,15 +61,14 @@ public NodeStatusPBImpl(NodeStatusProto proto) { viaProto = true; } - public NodeStatusProto getProto() { - - mergeLocalToProto(); + public synchronized NodeStatusProto getProto() { + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } - private void mergeLocalToBuilder() { + private synchronized void mergeLocalToBuilder() { if (this.nodeId != null) { builder.setNodeId(convertToProtoFormat(this.nodeId)); } @@ -73,9 +78,12 @@ private void mergeLocalToBuilder() { if (this.nodeHealthStatus != null) { builder.setNodeHealthStatus(convertToProtoFormat(this.nodeHealthStatus)); } + if (this.keepAliveApplications != null) { + addKeepAliveApplicationsToProto(); + } } - private void mergeLocalToProto() { + private synchronized void mergeLocalToProto() { if (viaProto) maybeInitBuilder(); mergeLocalToBuilder(); @@ -84,14 +92,14 @@ private void mergeLocalToProto() { viaProto = true; } - private void maybeInitBuilder() { + private synchronized void maybeInitBuilder() { if (viaProto || builder == null) { builder = NodeStatusProto.newBuilder(proto); } viaProto = false; } - private void addContainersToProto() { + private synchronized void addContainersToProto() { maybeInitBuilder(); builder.clearContainersStatuses(); if (containers == null) @@ -124,19 +132,53 @@ public void remove() { }; builder.addAllContainersStatuses(iterable); } + + private synchronized void addKeepAliveApplicationsToProto() { + maybeInitBuilder(); + builder.clearKeepAliveApplications(); + if (keepAliveApplications == null) + return; + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator iter = keepAliveApplications.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ApplicationIdProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + + } + }; + + } + }; + builder.addAllKeepAliveApplications(iterable); + } @Override - public int getResponseId() { + public synchronized int getResponseId() { NodeStatusProtoOrBuilder p = viaProto ? proto : builder; return p.getResponseId(); } @Override - public void setResponseId(int responseId) { + public synchronized void setResponseId(int responseId) { maybeInitBuilder(); builder.setResponseId(responseId); } @Override - public NodeId getNodeId() { + public synchronized NodeId getNodeId() { NodeStatusProtoOrBuilder p = viaProto ? proto : builder; if (this.nodeId != null) { return this.nodeId; @@ -148,8 +190,9 @@ public NodeId getNodeId() { return this.nodeId; } + @Override - public void setNodeId(NodeId nodeId) { + public synchronized void setNodeId(NodeId nodeId) { maybeInitBuilder(); if (nodeId == null) builder.clearNodeId(); @@ -158,20 +201,35 @@ public void setNodeId(NodeId nodeId) { } @Override - public List getContainersStatuses() { + public synchronized List getContainersStatuses() { initContainers(); return this.containers; } @Override - public void setContainersStatuses(List containers) { + public synchronized void setContainersStatuses( + List containers) { if (containers == null) { builder.clearContainersStatuses(); } this.containers = containers; } + + @Override + public synchronized List getKeepAliveApplications() { + initKeepAliveApplications(); + return this.keepAliveApplications; + } + + @Override + public synchronized void setKeepAliveApplications(List appIds) { + if (appIds == null) { + builder.clearKeepAliveApplications(); + } + this.keepAliveApplications = appIds; + } - private void initContainers() { + private synchronized void initContainers() { if (this.containers != null) { return; } @@ -185,8 +243,22 @@ private void initContainers() { } + private synchronized void initKeepAliveApplications() { + if (this.keepAliveApplications != null) { + return; + } + NodeStatusProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getKeepAliveApplicationsList(); + this.keepAliveApplications = new ArrayList(); + + for (ApplicationIdProto c : list) { + this.keepAliveApplications.add(convertFromProtoFormat(c)); + } + + } + @Override - public NodeHealthStatus getNodeHealthStatus() { + public synchronized NodeHealthStatus getNodeHealthStatus() { NodeStatusProtoOrBuilder p = viaProto ? proto : builder; if (nodeHealthStatus != null) { return nodeHealthStatus; @@ -199,7 +271,7 @@ public NodeHealthStatus getNodeHealthStatus() { } @Override - public void setNodeHealthStatus(NodeHealthStatus healthStatus) { + public synchronized void setNodeHealthStatus(NodeHealthStatus healthStatus) { maybeInitBuilder(); if (healthStatus == null) { builder.clearNodeHealthStatus(); @@ -231,4 +303,12 @@ private ContainerStatusPBImpl convertFromProtoFormat(ContainerStatusProto c) { private ContainerStatusProto convertToProtoFormat(ContainerStatus c) { return ((ContainerStatusPBImpl)c).getProto(); } -} + + private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto c) { + return new ApplicationIdPBImpl(c); + } + + private ApplicationIdProto convertToProtoFormat(ApplicationId c) { + return ((ApplicationIdPBImpl)c).getProto(); + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto index b2e995f45a..4f5543e5ca 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto @@ -34,6 +34,7 @@ message NodeStatusProto { optional int32 response_id = 2; repeated ContainerStatusProto containersStatuses = 3; optional NodeHealthStatusProto nodeHealthStatus = 4; + repeated ApplicationIdProto keep_alive_applications = 5; } message RegistrationResponseProto { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml index fdb7cb6c5b..25e25cd0a7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml @@ -72,7 +72,7 @@ The expiry interval for application master reporting. - yarn.resourcemanager.am.liveness-monitor.expiry-interval-ms + yarn.am.liveness-monitor.expiry-interval-ms 600000 @@ -155,7 +155,7 @@ How long to wait until a node manager is considered dead. - yarn.resourcemanager.nm.liveness-monitor.expiry-interval-ms + yarn.nm.liveness-monitor.expiry-interval-ms 600000 @@ -210,6 +210,12 @@ 10000 + + Interval at which the delayed token removal thread runs + yarn.resourcemanager.delayed.delegation-token.removal-interval-ms + 30000 + + address of node manager IPC. @@ -304,7 +310,7 @@ Whether to enable log aggregation - yarn.nodemanager.log-aggregation-enable + yarn.log-aggregation-enable false diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro deleted file mode 100644 index 01e0c079c1..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/avro/LocalizationProtocol.genavro +++ /dev/null @@ -1,11 +0,0 @@ -@namespace("org.apache.hadoop.yarn") -protocol LocalizationProtocol { - - import idl "yarn/yarn-api/src/main/avro/yarn-types.genavro"; - - void successfulLocalization(string user, LocalResource resource, URL path) - throws YarnRemoteException; - - void failedLocalization(string user, LocalResource resource, YarnRemoteException path) - throws YarnRemoteException; -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 6da70f1502..f0007a657a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -20,8 +20,12 @@ import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.Random; import java.util.Map.Entry; import org.apache.avro.AvroRuntimeException; @@ -56,6 +60,7 @@ import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.AbstractService; + public class NodeStatusUpdaterImpl extends AbstractService implements NodeStatusUpdater { @@ -76,6 +81,12 @@ public class NodeStatusUpdaterImpl extends AbstractService implements private byte[] secretKeyBytes = new byte[0]; private boolean isStopped; private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + private boolean tokenKeepAliveEnabled; + private long tokenRemovalDelayMs; + /** Keeps track of when the next keep alive request should be sent for an app*/ + private Map appTokenKeepAliveMap = + new HashMap(); + private Random keepAliveDelayRandom = new Random(); private final NodeHealthCheckerService healthChecker; private final NodeManagerMetrics metrics; @@ -103,6 +114,13 @@ public synchronized void init(Configuration conf) { this.totalResource = recordFactory.newRecordInstance(Resource.class); this.totalResource.setMemory(memoryMb); metrics.addResource(totalResource); + this.tokenKeepAliveEnabled = + conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED) + && isSecurityEnabled(); + this.tokenRemovalDelayMs = + conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); super.init(conf); } @@ -139,6 +157,10 @@ public synchronized void stop() { super.stop(); } + protected boolean isSecurityEnabled() { + return UserGroupInformation.isSecurityEnabled(); + } + protected ResourceTracker getRMClient() { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); @@ -188,6 +210,29 @@ public byte[] getRMNMSharedSecret() { return this.secretKeyBytes.clone(); } + private List createKeepAliveApplicationList() { + if (!tokenKeepAliveEnabled) { + return Collections.emptyList(); + } + + List appList = new ArrayList(); + for (Iterator> i = + this.appTokenKeepAliveMap.entrySet().iterator(); i.hasNext();) { + Entry e = i.next(); + ApplicationId appId = e.getKey(); + Long nextKeepAlive = e.getValue(); + if (!this.context.getApplications().containsKey(appId)) { + // Remove if the application has finished. + i.remove(); + } else if (System.currentTimeMillis() > nextKeepAlive) { + // KeepAlive list for the next hearbeat. + appList.add(appId); + trackAppForKeepAlive(appId); + } + } + return appList; + } + private NodeStatus getNodeStatus() { NodeStatus nodeStatus = recordFactory.newRecordInstance(NodeStatus.class); @@ -231,9 +276,29 @@ private NodeStatus getNodeStatus() { } nodeStatus.setNodeHealthStatus(nodeHealthStatus); + List keepAliveAppIds = createKeepAliveApplicationList(); + nodeStatus.setKeepAliveApplications(keepAliveAppIds); + return nodeStatus; } + private void trackAppsForKeepAlive(List appIds) { + if (tokenKeepAliveEnabled && appIds != null && appIds.size() > 0) { + for (ApplicationId appId : appIds) { + trackAppForKeepAlive(appId); + } + } + } + + private void trackAppForKeepAlive(ApplicationId appId) { + // Next keepAlive request for app between 0.7 & 0.9 of when the token will + // likely expire. + long nextTime = System.currentTimeMillis() + + (long) (0.7 * tokenRemovalDelayMs + (0.2 * tokenRemovalDelayMs + * keepAliveDelayRandom.nextInt(100))/100); + appTokenKeepAliveMap.put(appId, nextTime); + } + @Override public void sendOutofBandHeartBeat() { synchronized (this.heartbeatMonitor) { @@ -245,6 +310,7 @@ protected void startStatusUpdater() { new Thread("Node Status Updater") { @Override + @SuppressWarnings("unchecked") public void run() { int lastHeartBeatID = 0; while (!isStopped) { @@ -284,6 +350,8 @@ public void run() { } List appsToCleanup = response.getApplicationsToCleanupList(); + //Only start tracking for keepAlive on FINISH_APP + trackAppsForKeepAlive(appsToCleanup); if (appsToCleanup.size() != 0) { dispatcher.getEventHandler().handle( new CMgrCompletedAppsEvent(appsToCleanup)); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 615b825c4f..3169f2f1b8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -192,8 +192,8 @@ private void addIfService(Object object) { protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) { - if (conf.getBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, - YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_ENABLED)) { + if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { return new LogAggregationService(this.dispatcher, context, deletionService, dirsHandler); } else { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java index 5cfcc0d2ea..fdd4ecb2f6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java @@ -170,6 +170,7 @@ public void run() { this.writer.closeWriter(); LOG.info("Finished aggregate log-file for app " + this.applicationId); } + try { userUgi.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java index 8d8f09c038..9a227430a9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllApplicationsPage.java @@ -28,9 +28,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.webapp.YarnWebParams; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; @@ -88,13 +88,11 @@ protected void render(Block html) { .tbody(); for (Entry entry : this.nmContext .getApplications().entrySet()) { - ApplicationId appId = entry.getKey(); - Application app = entry.getValue(); - String appIdStr = ConverterUtils.toString(appId); + AppInfo info = new AppInfo(entry.getValue()); tableBody .tr() - .td().a(url("application", appIdStr), appIdStr)._() - .td()._(app.getApplicationState()) + .td().a(url("application", info.getId()), info.getId())._() + .td()._(info.getState()) ._() ._(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java index 13c8951aa6..1bbb945d87 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/AllContainersPage.java @@ -28,9 +28,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.webapp.YarnWebParams; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; @@ -83,17 +83,14 @@ protected void render(Block html) { ._().tbody(); for (Entry entry : this.nmContext .getContainers().entrySet()) { - ContainerId containerId = entry.getKey(); - Container container = entry.getValue(); - String containerIdStr = ConverterUtils.toString(containerId); + ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue()); tableBody .tr() - .td().a(url("container", containerIdStr), containerIdStr) + .td().a(url("container", info.getId()), info.getId()) ._() - .td()._(container.getContainerState())._() + .td()._(info.getState())._() .td() - .a(url("containerlogs", containerIdStr, container.getUser()), - "logs")._() + .a(url(info.getShortLogLink()), "logs")._() ._(); } tableBody._()._()._(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java index fc02120c6e..2562fb6d52 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ApplicationPage.java @@ -23,19 +23,16 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; -import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -81,15 +78,14 @@ protected void render(Block html) { ConverterUtils.toApplicationId(this.recordFactory, $(APPLICATION_ID)); Application app = this.nmContext.getApplications().get(applicationID); - Map containers = app.getContainers(); + AppInfo info = new AppInfo(app); info("Application's information") - ._("ApplicationId", ConverterUtils.toString(app.getAppId())) - ._("ApplicationState", app.getApplicationState().toString()) - ._("User", app.getUser()); + ._("ApplicationId", info.getId()) + ._("ApplicationState", info.getState()) + ._("User", info.getUser()); TABLE containersListBody = html._(InfoBlock.class) .table("#containers"); - for (ContainerId containerId : containers.keySet()) { - String containerIdStr = ConverterUtils.toString(containerId); + for (String containerIdStr : info.getContainers()) { containersListBody .tr().td() .a(url("container", containerIdStr), containerIdStr) diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java index 6633aa0349..df06df69c1 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java @@ -18,18 +18,16 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; -import static org.apache.hadoop.yarn.util.StringHelper.ujoin; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerStatus; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.SubView; +import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -77,21 +75,16 @@ protected void render(Block html) { + "please go back to the previous page and retry.")._(); return; } - ContainerStatus containerData = container.cloneAndGetContainerStatus(); - int exitCode = containerData.getExitStatus(); - String exiStatus = - (exitCode == YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS) ? - "N/A" : String.valueOf(exitCode); + ContainerInfo info = new ContainerInfo(this.nmContext, container); + info("Container information") - ._("ContainerID", $(CONTAINER_ID)) - ._("ContainerState", container.getContainerState()) - ._("ExitStatus", exiStatus) - ._("Diagnostics", containerData.getDiagnostics()) - ._("User", container.getUser()) - ._("TotalMemoryNeeded", - container.getLaunchContext().getResource().getMemory()) - ._("logs", ujoin("containerlogs", $(CONTAINER_ID), container.getUser()), - "Link to logs"); + ._("ContainerID", info.getId()) + ._("ContainerState", info.getState()) + ._("ExitStatus", info.getExitStatus()) + ._("Diagnostics", info.getDiagnostics()) + ._("User", info.getUser()) + ._("TotalMemoryNeeded", info.getMemoryNeeded()) + ._("logs", info.getShortLogLink(), "Link to logs"); html._(InfoBlock.class); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/JAXBContextResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/JAXBContextResolver.java new file mode 100644 index 0000000000..37423bab97 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/JAXBContextResolver.java @@ -0,0 +1,62 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.nodemanager.webapp; + +import java.util.Set; +import java.util.HashSet; +import java.util.Arrays; + +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.google.inject.Singleton; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; + +@Singleton +@Provider +public class JAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = {AppInfo.class, AppsInfo.class, + ContainerInfo.class, ContainersInfo.class, NodeInfo.class}; + + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + // sets the json configuration so that the json output looks like + // the xml output + this.context = new JSONJAXBContext(JSONConfiguration.natural(). + rootUnwrapping(false).build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java index 033271afde..86e25056bb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMController.java @@ -88,8 +88,8 @@ public void logs() { containerId.getApplicationAttemptId().getApplicationId(); Application app = nmContext.getApplications().get(appId); if (app == null - && nmConf.getBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, - YarnConfiguration.DEFAULT_NM_LOG_AGGREGATION_ENABLED)) { + && nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) { String logServerUrl = nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL); String redirectUrl = null; if (logServerUrl == null || logServerUrl.isEmpty()) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java new file mode 100644 index 0000000000..9f7b90189a --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java @@ -0,0 +1,163 @@ +/** * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.webapp; + +import java.util.Map.Entry; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.UriInfo; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.ResourceView; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.NotFoundException; +import org.apache.hadoop.yarn.webapp.WebApp; + +import com.google.inject.Inject; +import com.google.inject.Singleton; + +@Singleton +@Path("/ws/v1/node") +public class NMWebServices { + private Context nmContext; + private ResourceView rview; + private WebApp webapp; + private static RecordFactory recordFactory = RecordFactoryProvider + .getRecordFactory(null); + + @javax.ws.rs.core.Context + UriInfo uriInfo; + + @Inject + public NMWebServices(final Context nm, final ResourceView view, + final WebApp webapp) { + this.nmContext = nm; + this.rview = view; + this.webapp = webapp; + } + + @GET + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodeInfo get() { + return getNodeInfo(); + } + + @GET + @Path("/info") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodeInfo getNodeInfo() { + return new NodeInfo(this.nmContext, this.rview); + } + + @GET + @Path("/apps") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppsInfo getNodeApps(@QueryParam("state") String stateQuery, + @QueryParam("user") String userQuery) { + AppsInfo allApps = new AppsInfo(); + for (Entry entry : this.nmContext + .getApplications().entrySet()) { + + AppInfo appInfo = new AppInfo(entry.getValue()); + if (stateQuery != null && !stateQuery.isEmpty()) { + ApplicationState state = ApplicationState.valueOf(stateQuery); + if (!appInfo.getState().equalsIgnoreCase(stateQuery)) { + continue; + } + } + if (userQuery != null && !userQuery.isEmpty()) { + if (!appInfo.getUser().toString().equals(userQuery)) { + continue; + } + } + allApps.add(appInfo); + } + return allApps; + } + + @GET + @Path("/apps/{appid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppInfo getNodeApp(@PathParam("appid") String appId) { + ApplicationId id = ConverterUtils.toApplicationId(recordFactory, appId); + if (id == null) { + throw new NotFoundException("app with id " + appId + " not found"); + } + Application app = this.nmContext.getApplications().get(id); + if (app == null) { + throw new NotFoundException("app with id " + appId + " not found"); + } + return new AppInfo(app); + + } + + @GET + @Path("/containers") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ContainersInfo getNodeContainers() { + ContainersInfo allContainers = new ContainersInfo(); + for (Entry entry : this.nmContext.getContainers() + .entrySet()) { + if (entry.getValue() == null) { + // just skip it + continue; + } + ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue(), + uriInfo.getBaseUri().toString(), webapp.name()); + allContainers.add(info); + } + return allContainers; + } + + @GET + @Path("/containers/{containerid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ContainerInfo getNodeContainer(@PathParam("containerid") String id) { + ContainerId containerId = null; + containerId = ConverterUtils.toContainerId(id); + if (containerId == null) { + throw new NotFoundException("container with id, " + id + + ", is empty or null"); + } + Container container = nmContext.getContainers().get(containerId); + if (container == null) { + throw new NotFoundException("container with id, " + id + ", not found"); + } + return new ContainerInfo(this.nmContext, container, uriInfo.getBaseUri() + .toString(), webapp.name()); + + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java index 93201237de..9b34e366d1 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NodePage.java @@ -23,10 +23,10 @@ import java.util.Date; -import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.yarn.util.YarnVersionInfo; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; +import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -36,6 +36,8 @@ public class NodePage extends NMView { + private static final long BYTES_IN_MB = 1024 * 1024; + @Override protected void commonPreHead(HTML<_> html) { super.commonPreHead(html); @@ -60,21 +62,22 @@ public NodeBlock(Context context, ResourceView resourceView) { @Override protected void render(Block html) { + NodeInfo info = new NodeInfo(this.context, this.resourceView); info("NodeManager information") ._("Total Vmem allocated for Containers", - this.resourceView.getVmemAllocatedForContainers() + "bytes") + StringUtils.byteDesc(info.getTotalVmemAllocated() * BYTES_IN_MB)) ._("Total Pmem allocated for Container", - this.resourceView.getPmemAllocatedForContainers() + "bytes") + StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB)) ._("NodeHealthyStatus", - this.context.getNodeHealthStatus().getIsNodeHealthy()) + info.getHealthStatus()) ._("LastNodeHealthTime", new Date( - this.context.getNodeHealthStatus().getLastHealthReportTime())) + info.getLastNodeUpdateTime())) ._("NodeHealthReport", - this.context.getNodeHealthStatus().getHealthReport()) - ._("Node Manager Version:", YarnVersionInfo.getBuildVersion() + - " on " + YarnVersionInfo.getDate()) - ._("Hadoop Version:", VersionInfo.getBuildVersion() + - " on " + VersionInfo.getDate()); + info.getHealthReport()) + ._("Node Manager Version:", info.getNMBuildVersion() + + " on " + info.getNMVersionBuiltOn()) + ._("Hadoop Version:", info.getHadoopBuildVersion() + + " on " + info.getHadoopVersionBuiltOn()); html._(InfoBlock.class); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java index f0d87414fe..7a799fd25b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java @@ -30,9 +30,10 @@ import org.apache.hadoop.yarn.server.nodemanager.ResourceView; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.service.AbstractService; -import org.apache.hadoop.yarn.webapp.YarnWebParams; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; +import org.apache.hadoop.yarn.webapp.YarnWebParams; public class WebServer extends AbstractService { @@ -61,8 +62,9 @@ public synchronized void start() { YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS); LOG.info("Instantiating NMWebApp at " + bindAddress); try { - this.webApp = WebApps.$for("node", Context.class, this.nmContext).at( - bindAddress).with(getConfig()).start(this.nmWebApp); + this.webApp = + WebApps.$for("node", Context.class, this.nmContext, "ws") + .at(bindAddress).with(getConfig()).start(this.nmWebApp); } catch (Exception e) { String msg = "NMWebapps failed to start."; LOG.error(msg, e); @@ -95,6 +97,9 @@ public NMWebApp(ResourceView resourceView, @Override public void setup() { + bind(NMWebServices.class); + bind(GenericExceptionHandler.class); + bind(JAXBContextResolver.class); bind(ResourceView.class).toInstance(this.resourceView); bind(ApplicationACLsManager.class).toInstance(this.aclsManager); bind(LocalDirsHandlerService.class).toInstance(dirsHandler); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppInfo.java new file mode 100644 index 0000000000..95e2a6537b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppInfo.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; + +import java.util.ArrayList; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.util.ConverterUtils; + +@XmlRootElement(name = "app") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppInfo { + + protected String id; + protected String state; + protected String user; + protected ArrayList containerids; + + public AppInfo() { + } // JAXB needs this + + public AppInfo(final Application app) { + this.id = ConverterUtils.toString(app.getAppId()); + this.state = app.getApplicationState().toString(); + this.user = app.getUser(); + + this.containerids = new ArrayList(); + Map appContainers = app.getContainers(); + for (ContainerId containerId : appContainers.keySet()) { + String containerIdStr = ConverterUtils.toString(containerId); + containerids.add(containerIdStr); + } + } + + public String getId() { + return this.id; + } + + public String getUser() { + return this.user; + } + + public String getState() { + return this.state; + } + + public ArrayList getContainers() { + return this.containerids; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppsInfo.java new file mode 100644 index 0000000000..919a618f0c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/AppsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "apps") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppsInfo { + + protected ArrayList app = new ArrayList(); + + public AppsInfo() { + } // JAXB needs this + + public void add(AppInfo appInfo) { + app.add(appInfo); + } + + public ArrayList getApps() { + return app; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java new file mode 100644 index 0000000000..5c79a7b52c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; + +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.ujoin; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; + +@XmlRootElement(name = "container") +@XmlAccessorType(XmlAccessType.FIELD) +public class ContainerInfo { + + protected String id; + protected String state; + protected int exitCode; + protected String diagnostics; + protected String user; + protected long totalMemoryNeededMB; + protected String containerLogsLink; + protected String nodeId; + @XmlTransient + protected String containerLogsShortLink; + @XmlTransient + protected String exitStatus; + + public ContainerInfo() { + } // JAXB needs this + + public ContainerInfo(final Context nmContext, final Container container) { + this(nmContext, container, "", ""); + } + + public ContainerInfo(final Context nmContext, final Container container, + final String requestUri, final String pathPrefix) { + + this.id = container.getContainerID().toString(); + this.nodeId = nmContext.getNodeId().toString(); + ContainerStatus containerData = container.cloneAndGetContainerStatus(); + this.exitCode = containerData.getExitStatus(); + this.exitStatus = (this.exitCode == YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS) ? "N/A" + : String.valueOf(exitCode); + this.state = container.getContainerState().toString(); + this.diagnostics = containerData.getDiagnostics(); + if (this.diagnostics == null || this.diagnostics.isEmpty()) { + this.diagnostics = ""; + } + + this.user = container.getUser(); + this.totalMemoryNeededMB = container.getLaunchContext().getResource() + .getMemory(); + this.containerLogsShortLink = ujoin("containerlogs", this.id, + container.getUser()); + this.containerLogsLink = join(requestUri, pathPrefix, + this.containerLogsShortLink); + } + + public String getId() { + return this.id; + } + + public String getNodeId() { + return this.nodeId; + } + + public String getState() { + return this.state; + } + + public int getExitCode() { + return this.exitCode; + } + + public String getExitStatus() { + return this.exitStatus; + } + + public String getDiagnostics() { + return this.diagnostics; + } + + public String getUser() { + return this.user; + } + + public String getShortLogLink() { + return this.containerLogsShortLink; + } + + public String getLogLink() { + return this.containerLogsLink; + } + + public long getMemoryNeeded() { + return this.totalMemoryNeededMB; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainersInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainersInfo.java new file mode 100644 index 0000000000..826fb568b5 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainersInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "containers") +@XmlAccessorType(XmlAccessType.FIELD) +public class ContainersInfo { + + protected ArrayList container = new ArrayList(); + + public ContainersInfo() { + } // JAXB needs this + + public void add(ContainerInfo containerInfo) { + container.add(containerInfo); + } + + public ArrayList getContainers() { + return container; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java new file mode 100644 index 0000000000..0eb1bef47a --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/NodeInfo.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.ResourceView; +import org.apache.hadoop.yarn.util.YarnVersionInfo; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class NodeInfo { + + private static final long BYTES_IN_MB = 1024 * 1024; + + protected String healthReport; + protected long totalVmemAllocatedContainersMB; + protected long totalPmemAllocatedContainersMB; + protected long lastNodeUpdateTime; + protected boolean nodeHealthy; + protected String nodeManagerVersion; + protected String nodeManagerBuildVersion; + protected String nodeManagerVersionBuiltOn; + protected String hadoopVersion; + protected String hadoopBuildVersion; + protected String hadoopVersionBuiltOn; + protected String id; + protected String nodeHostName; + + public NodeInfo() { + } // JAXB needs this + + public NodeInfo(final Context context, final ResourceView resourceView) { + + this.id = context.getNodeId().toString(); + this.nodeHostName = context.getNodeId().getHost(); + this.totalVmemAllocatedContainersMB = resourceView + .getVmemAllocatedForContainers() / BYTES_IN_MB; + this.totalPmemAllocatedContainersMB = resourceView + .getPmemAllocatedForContainers() / BYTES_IN_MB; + this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy(); + this.lastNodeUpdateTime = context.getNodeHealthStatus() + .getLastHealthReportTime(); + + this.healthReport = context.getNodeHealthStatus().getHealthReport(); + + this.nodeManagerVersion = YarnVersionInfo.getVersion(); + this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion(); + this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate(); + this.hadoopVersion = VersionInfo.getVersion(); + this.hadoopBuildVersion = VersionInfo.getBuildVersion(); + this.hadoopVersionBuiltOn = VersionInfo.getDate(); + } + + public String getNodeId() { + return this.id; + } + + public String getNodeHostName() { + return this.nodeHostName; + } + + public String getNMVersion() { + return this.nodeManagerVersion; + } + + public String getNMBuildVersion() { + return this.nodeManagerBuildVersion; + } + + public String getNMVersionBuiltOn() { + return this.nodeManagerVersionBuiltOn; + } + + public String getHadoopVersion() { + return this.hadoopVersion; + } + + public String getHadoopBuildVersion() { + return this.hadoopBuildVersion; + } + + public String getHadoopVersionBuiltOn() { + return this.hadoopVersionBuiltOn; + } + + public boolean getHealthStatus() { + return this.nodeHealthy; + } + + public long getLastNodeUpdateTime() { + return this.lastNodeUpdateTime; + } + + public String getHealthReport() { + return this.healthReport; + } + + public long getTotalVmemAllocated() { + return this.totalVmemAllocatedContainersMB; + } + + public long getTotalPmemAllocated() { + return this.totalPmemAllocatedContainersMB; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index c1462746ff..cfb32679a6 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -22,7 +22,9 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentMap; @@ -56,6 +58,7 @@ import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.RegistrationResponse; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; @@ -63,10 +66,12 @@ import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; import org.apache.hadoop.yarn.service.Service; import org.apache.hadoop.yarn.service.Service.STATE; +import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import static org.mockito.Mockito.mock; public class TestNodeStatusUpdater { @@ -216,7 +221,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) HeartbeatResponse response = recordFactory .newRecordInstance(HeartbeatResponse.class); response.setResponseId(heartBeatID); - + NodeHeartbeatResponse nhResponse = recordFactory .newRecordInstance(NodeHeartbeatResponse.class); nhResponse.setHeartbeatResponse(response); @@ -241,6 +246,48 @@ protected ResourceTracker getRMClient() { return resourceTracker; } } + + private class MyNodeStatusUpdater3 extends NodeStatusUpdaterImpl { + public ResourceTracker resourceTracker; + private Context context; + + public MyNodeStatusUpdater3(Context context, Dispatcher dispatcher, + NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics, + ContainerTokenSecretManager containerTokenSecretManager) { + super(context, dispatcher, healthChecker, metrics, + containerTokenSecretManager); + this.context = context; + this.resourceTracker = new MyResourceTracker3(this.context); + } + + @Override + protected ResourceTracker getRMClient() { + return resourceTracker; + } + + @Override + protected boolean isSecurityEnabled() { + return true; + } + } + + private class MyNodeManager extends NodeManager { + + private MyNodeStatusUpdater3 nodeStatusUpdater; + @Override + protected NodeStatusUpdater createNodeStatusUpdater(Context context, + Dispatcher dispatcher, NodeHealthCheckerService healthChecker, + ContainerTokenSecretManager containerTokenSecretManager) { + this.nodeStatusUpdater = + new MyNodeStatusUpdater3(context, dispatcher, healthChecker, metrics, + containerTokenSecretManager); + return this.nodeStatusUpdater; + } + + protected MyNodeStatusUpdater3 getNodeStatusUpdater() { + return this.nodeStatusUpdater; + } + } // private class MyResourceTracker2 implements ResourceTracker { @@ -276,6 +323,65 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) } } + private class MyResourceTracker3 implements ResourceTracker { + public NodeAction heartBeatNodeAction = NodeAction.NORMAL; + public NodeAction registerNodeAction = NodeAction.NORMAL; + private Map> keepAliveRequests = + new HashMap>(); + private ApplicationId appId = BuilderUtils.newApplicationId(1, 1); + private final Context context; + + MyResourceTracker3(Context context) { + this.context = context; + } + + @Override + public RegisterNodeManagerResponse registerNodeManager( + RegisterNodeManagerRequest request) throws YarnRemoteException { + + RegisterNodeManagerResponse response = + recordFactory.newRecordInstance(RegisterNodeManagerResponse.class); + RegistrationResponse regResponse = + recordFactory.newRecordInstance(RegistrationResponse.class); + regResponse.setNodeAction(registerNodeAction); + response.setRegistrationResponse(regResponse); + return response; + } + + @Override + public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) + throws YarnRemoteException { + LOG.info("Got heartBeatId: [" + heartBeatID +"]"); + NodeStatus nodeStatus = request.getNodeStatus(); + nodeStatus.setResponseId(heartBeatID++); + HeartbeatResponse response = + recordFactory.newRecordInstance(HeartbeatResponse.class); + response.setResponseId(heartBeatID); + response.setNodeAction(heartBeatNodeAction); + + if (nodeStatus.getKeepAliveApplications() != null + && nodeStatus.getKeepAliveApplications().size() > 0) { + for (ApplicationId appId : nodeStatus.getKeepAliveApplications()) { + List list = keepAliveRequests.get(appId); + if (list == null) { + list = new LinkedList(); + keepAliveRequests.put(appId, list); + } + list.add(System.currentTimeMillis()); + } + } + if (heartBeatID == 2) { + LOG.info("Sending FINISH_APP for application: [" + appId + "]"); + this.context.getApplications().put(appId, mock(Application.class)); + response.addAllApplicationsToCleanup(Collections.singletonList(appId)); + } + NodeHeartbeatResponse nhResponse = + recordFactory.newRecordInstance(NodeHeartbeatResponse.class); + nhResponse.setHeartbeatResponse(response); + return nhResponse; + } + } + @Before public void clearError() { nmStartError = null; @@ -456,6 +562,38 @@ public void start() { verifyNodeStartFailure("Starting of RPC Server failed"); } + @Test + public void testApplicationKeepAlive() throws Exception { + MyNodeManager nm = new MyNodeManager(); + try { + YarnConfiguration conf = createNMConfig(); + conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, + 4000l); + nm.init(conf); + nm.start(); + // HB 2 -> app cancelled by RM. + while (heartBeatID < 12) { + Thread.sleep(1000l); + } + MyResourceTracker3 rt = + (MyResourceTracker3) nm.getNodeStatusUpdater().getRMClient(); + rt.context.getApplications().remove(rt.appId); + Assert.assertEquals(1, rt.keepAliveRequests.size()); + int numKeepAliveRequests = rt.keepAliveRequests.get(rt.appId).size(); + LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]"); + Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3); + while (heartBeatID < 20) { + Thread.sleep(1000l); + } + int numKeepAliveRequests2 = rt.keepAliveRequests.get(rt.appId).size(); + Assert.assertEquals(numKeepAliveRequests, numKeepAliveRequests2); + } finally { + if (nm.getServiceState() == STATE.STARTED) + nm.stop(); + } + } + private void verifyNodeStartFailure(String errMessage) { YarnConfiguration conf = createNMConfig(); nm.init(conf); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java index a5e5eb06bc..bbee9c5848 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java @@ -68,7 +68,7 @@ public void testLogDeletion() { + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); - conf.setBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, false); + conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l); DrainDispatcher dispatcher = createDispatcher(conf); @@ -142,7 +142,7 @@ public void testDelayedDelete() { + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); - conf.setBoolean(YarnConfiguration.NM_LOG_AGGREGATION_ENABLED, false); + conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 10800l); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 4c4334c4dc..ad28d6c910 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -173,7 +173,7 @@ protected synchronized void finishApplication(ApplicationId applicationId) { } else { // Inform the DelegationTokenRenewer if (UserGroupInformation.isSecurityEnabled()) { - rmContext.getDelegationTokenRenewer().removeApplication(applicationId); + rmContext.getDelegationTokenRenewer().applicationFinished(applicationId); } completedApps.add(applicationId); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 9152317968..157e32be84 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -412,7 +412,7 @@ public void handle(RMNodeEvent event) { protected void startWepApp() { Builder builder = - WebApps.$for("cluster", masterService).at( + WebApps.$for("cluster", ApplicationMasterService.class, masterService, "ws").at( this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); if(YarnConfiguration.getRMWebAppHostAndPort(conf). diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index a01a0bf42e..58697486d5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -272,7 +272,8 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) // 4. Send status to RMNode, saving the latest response. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeStatusEvent(nodeId, remoteNodeStatus.getNodeHealthStatus(), - remoteNodeStatus.getContainersStatuses(), latestResponse)); + remoteNodeStatus.getContainersStatuses(), + remoteNodeStatus.getKeepAliveApplications(), latestResponse)); nodeHeartBeatResponse.setHeartbeatResponse(latestResponse); return nodeHeartBeatResponse; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 30109edbc0..2cadd89071 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -262,6 +262,16 @@ public List pullAppsToCleanup() { } + @Private + public List getContainersToCleanUp() { + this.readLock.lock(); + try { + return new ArrayList(containersToClean); + } finally { + this.readLock.unlock(); + } + } + @Override public List pullContainersToCleanUp() { @@ -342,7 +352,6 @@ public static class CleanUpContainerTransition implements @Override public void transition(RMNodeImpl rmNode, RMNodeEvent event) { - rmNode.containersToClean.add((( RMNodeCleanContainerEvent) event).getContainerId()); } @@ -396,8 +405,17 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { List completedContainers = new ArrayList(); for (ContainerStatus remoteContainer : statusEvent.getContainers()) { - // Process running containers ContainerId containerId = remoteContainer.getContainerId(); + + // Don't bother with containers already scheduled for cleanup, + // the scheduler doens't need to know any more about this container + if (rmNode.containersToClean.contains(containerId)) { + LOG.info("Container " + containerId + " already scheduled for " + + "cleanup, no further processing"); + continue; + } + + // Process running containers if (remoteContainer.getState() == ContainerState.RUNNING) { if (!rmNode.justLaunchedContainers.containsKey(containerId)) { // Just launched container. RM knows about it the first time. @@ -414,7 +432,9 @@ public RMNodeState transition(RMNodeImpl rmNode, RMNodeEvent event) { rmNode.context.getDispatcher().getEventHandler().handle( new NodeUpdateSchedulerEvent(rmNode, newlyLaunchedContainers, completedContainers)); - + + rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications( + statusEvent.getKeepAliveAppIds()); return RMNodeState.RUNNING; } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java index e4a2930168..1285c2bed9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java @@ -20,6 +20,7 @@ import java.util.List; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.api.records.NodeId; @@ -28,15 +29,17 @@ public class RMNodeStatusEvent extends RMNodeEvent { private final NodeHealthStatus nodeHealthStatus; - private List containersCollection; + private final List containersCollection; private final HeartbeatResponse latestResponse; + private final List keepAliveAppIds; public RMNodeStatusEvent(NodeId nodeId, NodeHealthStatus nodeHealthStatus, - List collection, + List collection, List keepAliveAppIds, HeartbeatResponse latestResponse) { super(nodeId, RMNodeEventType.STATUS_UPDATE); this.nodeHealthStatus = nodeHealthStatus; this.containersCollection = collection; + this.keepAliveAppIds = keepAliveAppIds; this.latestResponse = latestResponse; } @@ -51,4 +54,8 @@ public List getContainers() { public HeartbeatResponse getLatestResponse() { return this.latestResponse; } -} + + public List getKeepAliveAppIds() { + return this.keepAliveAppIds; + } +} \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index a7d2e4582d..d837f7d759 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -20,14 +20,19 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.HashSet; import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; import java.util.Set; import java.util.Timer; import java.util.TimerTask; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.logging.Log; @@ -40,6 +45,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.service.AbstractService; /** @@ -65,7 +71,16 @@ public class DelegationTokenRenewer extends AbstractService { // appId=>List private Set delegationTokens = Collections.synchronizedSet(new HashSet()); + + private final ConcurrentMap delayedRemovalMap = + new ConcurrentHashMap(); + private long tokenRemovalDelayMs; + + private Thread delayedRemovalThread; + + private boolean tokenKeepAliveEnabled; + public DelegationTokenRenewer() { super(DelegationTokenRenewer.class.getName()); } @@ -73,6 +88,12 @@ public DelegationTokenRenewer() { @Override public synchronized void init(Configuration conf) { super.init(conf); + this.tokenKeepAliveEnabled = + conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, + YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED); + this.tokenRemovalDelayMs = + conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); } @Override @@ -81,6 +102,12 @@ public synchronized void start() { dtCancelThread.start(); renewalTimer = new Timer(true); + if (tokenKeepAliveEnabled) { + delayedRemovalThread = + new Thread(new DelayedTokenRemovalRunnable(getConfig()), + "DelayedTokenCanceller"); + delayedRemovalThread.start(); + } } @Override @@ -94,6 +121,14 @@ public synchronized void stop() { } catch (InterruptedException e) { e.printStackTrace(); } + if (tokenKeepAliveEnabled && delayedRemovalThread != null) { + delayedRemovalThread.interrupt(); + try { + delayedRemovalThread.join(1000); + } catch (InterruptedException e) { + LOG.info("Interrupted while joining on delayed removal thread.", e); + } + } super.stop(); } @@ -343,12 +378,38 @@ private void removeFailedDelegationToken(DelegationTokenToRenew t) { if(t.timerTask!=null) t.timerTask.cancel(); } - + /** * Removing delegation token for completed applications. * @param applicationId completed application */ - public void removeApplication(ApplicationId applicationId) { + public void applicationFinished(ApplicationId applicationId) { + if (!tokenKeepAliveEnabled) { + removeApplicationFromRenewal(applicationId); + } else { + delayedRemovalMap.put(applicationId, System.currentTimeMillis() + + tokenRemovalDelayMs); + } + } + + /** + * Add a list of applications to the keep alive list. If an appId already + * exists, update it's keep-alive time. + * + * @param appIds + * the list of applicationIds to be kept alive. + * + */ + public void updateKeepAliveApplications(List appIds) { + if (tokenKeepAliveEnabled && appIds != null && appIds.size() > 0) { + for (ApplicationId appId : appIds) { + delayedRemovalMap.put(appId, System.currentTimeMillis() + + tokenRemovalDelayMs); + } + } + } + + private void removeApplicationFromRenewal(ApplicationId applicationId) { synchronized (delegationTokens) { Iterator it = delegationTokens.iterator(); while(it.hasNext()) { @@ -371,4 +432,50 @@ public void removeApplication(ApplicationId applicationId) { } } } + + /** + * Takes care of cancelling app delegation tokens after the configured + * cancellation delay, taking into consideration keep-alive requests. + * + */ + private class DelayedTokenRemovalRunnable implements Runnable { + + private long waitTimeMs; + + DelayedTokenRemovalRunnable(Configuration conf) { + waitTimeMs = + conf.getLong( + YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS); + } + + @Override + public void run() { + List toCancel = new ArrayList(); + while (!Thread.currentThread().isInterrupted()) { + Iterator> it = + delayedRemovalMap.entrySet().iterator(); + toCancel.clear(); + while (it.hasNext()) { + Entry e = it.next(); + if (e.getValue() < System.currentTimeMillis()) { + toCancel.add(e.getKey()); + } + } + for (ApplicationId appId : toCancel) { + removeApplicationFromRenewal(appId); + delayedRemovalMap.remove(appId); + } + synchronized (this) { + try { + wait(waitTimeMs); + } catch (InterruptedException e) { + LOG.info("Delayed Deletion Thread Interrupted. Shutting it down"); + return; + } + } + } + } + } + } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java index aaee9b1bf0..6eed4bfa93 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java @@ -18,10 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; import org.apache.hadoop.yarn.util.Times; -import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; @@ -30,25 +29,25 @@ public class AboutBlock extends HtmlBlock { final ResourceManager rm; - @Inject + @Inject AboutBlock(ResourceManager rm, ViewContext ctx) { super(ctx); this.rm = rm; } - + @Override protected void render(Block html) { html._(MetricsOverviewTable.class); - long ts = ResourceManager.clusterTimeStamp; ResourceManager rm = getInstance(ResourceManager.class); + ClusterInfo cinfo = new ClusterInfo(rm); info("Cluster overview"). - _("Cluster ID:", ts). - _("ResourceManager state:", rm.getServiceState()). - _("ResourceManager started on:", Times.format(ts)). - _("ResourceManager version:", YarnVersionInfo.getBuildVersion() + - " on " + YarnVersionInfo.getDate()). - _("Hadoop version:", VersionInfo.getBuildVersion() + - " on " + VersionInfo.getDate()); + _("Cluster ID:", cinfo.getClusterId()). + _("ResourceManager state:", cinfo.getState()). + _("ResourceManager started on:", Times.format(cinfo.getStartedOn())). + _("ResourceManager version:", cinfo.getRMBuildVersion() + + " on " + cinfo.getRMVersionBuiltOn()). + _("Hadoop version:", cinfo.getHadoopBuildVersion() + + " on " + cinfo.getHadoopVersionBuiltOn()); html._(InfoBlock.class); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java index 1e9215f0b8..c629718a9b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsBlock.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; @@ -56,23 +57,18 @@ class AppsBlock extends HtmlBlock { tbody(); int i = 0; for (RMApp app : list.apps.values()) { - String appId = app.getApplicationId().toString(); - String trackingUrl = app.getTrackingUrl(); - boolean trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl); - String ui = trackingUrlIsNotReady ? "UNASSIGNED" : - (app.getFinishTime() == 0 ? - "ApplicationMaster" : "History"); - String percent = String.format("%.1f", app.getProgress() * 100); + AppInfo appInfo = new AppInfo(app, true); + String percent = String.format("%.1f", appInfo.getProgress()); tbody. tr(). td(). - br().$title(String.valueOf(app.getApplicationId().getId()))._(). // for sorting - a(url("app", appId), appId)._(). - td(app.getUser().toString()). - td(app.getName().toString()). - td(app.getQueue().toString()). - td(app.getState().toString()). - td(app.getFinalApplicationStatus().toString()). + br().$title(appInfo.getAppIdNum())._(). // for sorting + a(url("app", appInfo.getAppId()), appInfo.getAppId())._(). + td(appInfo.getUser()). + td(appInfo.getName()). + td(appInfo.getQueue()). + td(appInfo.getState()). + td(appInfo.getFinalStatus()). td(). br().$title(percent)._(). // for sorting div(_PROGRESSBAR). @@ -80,9 +76,9 @@ class AppsBlock extends HtmlBlock { div(_PROGRESSBAR_VALUE). $style(join("width:", percent, '%'))._()._()._(). td(). - a(trackingUrlIsNotReady ? - "#" : join("http://", trackingUrl), ui)._(). - td(app.getDiagnostics().toString())._(); + a(!appInfo.isTrackingUrlReady()? + "#" : appInfo.getTrackingUrlPretty(), appInfo.getTrackingUI())._(). + td(appInfo.getNote())._(); if (list.rendering != Render.HTML && ++i >= 20) break; } tbody._()._(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java index 57e695c924..f3378d2747 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppsList.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.webapp.Controller.RequestContext; import org.apache.hadoop.yarn.webapp.ToJSON; import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render; @@ -54,31 +55,27 @@ void toDataTableArrays(PrintWriter out) { out.append('['); boolean first = true; for (RMApp app : apps.values()) { + AppInfo appInfo = new AppInfo(app, false); if (first) { first = false; } else { out.append(",\n"); } - String appID = app.getApplicationId().toString(); - String trackingUrl = app.getTrackingUrl(); - boolean trackingUrlIsNotReady = trackingUrl == null - || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl); - String ui = trackingUrlIsNotReady ? "UNASSIGNED" - : (app.getFinishTime() == 0 ? "ApplicationMaster" : "History"); out.append("[\""); - appendSortable(out, app.getApplicationId().getId()); - appendLink(out, appID, rc.prefix(), "app", appID).append(_SEP). - append(escapeHtml(app.getUser().toString())).append(_SEP). - append(escapeHtml(app.getName().toString())).append(_SEP). - append(escapeHtml(app.getQueue())).append(_SEP). - append(app.getState().toString()).append(_SEP). - append(app.getFinalApplicationStatus().toString()).append(_SEP); - appendProgressBar(out, app.getProgress()).append(_SEP); - appendLink(out, ui, rc.prefix(), - trackingUrlIsNotReady ? - "#" : "http://", trackingUrl). + appendSortable(out, appInfo.getAppIdNum()); + appendLink(out, appInfo.getAppId(), rc.prefix(), "app", + appInfo.getAppId()).append(_SEP). + append(escapeHtml(appInfo.getUser())).append(_SEP). + append(escapeHtml(appInfo.getName())).append(_SEP). + append(escapeHtml(appInfo.getQueue())).append(_SEP). + append(appInfo.getState()).append(_SEP). + append(appInfo.getFinalStatus()).append(_SEP); + appendProgressBar(out, appInfo.getProgress()).append(_SEP); + appendLink(out, appInfo.getTrackingUI(), rc.prefix(), + !appInfo.isTrackingUrlReady() ? + "#" : appInfo.getTrackingUrlPretty()). append(_SEP).append(escapeJavaScript(escapeHtml( - app.getDiagnostics().toString()))). + appInfo.getNote()))). append("\"]"); } out.append(']'); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index a27ba15c45..2230acd82b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -18,19 +18,23 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.inject.Inject; -import com.google.inject.servlet.RequestScoped; +import static org.apache.hadoop.yarn.util.StringHelper.join; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import static org.apache.hadoop.yarn.util.StringHelper.*; +import com.google.inject.Inject; +import com.google.inject.servlet.RequestScoped; class CapacitySchedulerPage extends RmView { static final String _Q = ".ui-state-default.ui-corner-all"; @@ -47,22 +51,21 @@ static class Parent { public static class QueueBlock extends HtmlBlock { final Parent parent; + final CapacitySchedulerInfo sinfo; @Inject QueueBlock(Parent parent) { this.parent = parent; + sinfo = new CapacitySchedulerInfo(parent.queue); } @Override public void render(Block html) { UL ul = html.ul(); - CSQueue parentQueue = parent.queue; - for (CSQueue queue : parentQueue.getChildQueues()) { - float used = queue.getUsedCapacity(); - float set = queue.getCapacity(); + for (CapacitySchedulerQueueInfo info : sinfo.getSubQueues()) { + float used = info.getUsedCapacity() / 100; + float set = info.getCapacity() / 100; float delta = Math.abs(set - used) + 0.001f; - float max = queue.getMaximumCapacity(); - if (max < EPSILON || max > 1f) max = 1f; - //String absMaxPct = percent(queue.getAbsoluteMaximumCapacity()); + float max = info.getMaxCapacity() / 100; LI> li = ul. li(). a(_Q).$style(width(max * WIDTH_F)). @@ -72,14 +75,16 @@ public void render(Block html) { span().$style(join(width(delta/max), ';', used > set ? OVER : UNDER, ';', used > set ? left(set/max) : left(used/max)))._('.')._(). - span(".q", queue.getQueuePath().substring(5))._(); - if (queue instanceof ParentQueue) { - parent.queue = queue; + span(".q", info.getQueuePath().substring(5))._(); + if (info.getQueue() instanceof ParentQueue) { + // this could be optimized better + parent.queue = info.getQueue(); li. _(QueueBlock.class); } li._(); } + ul._(); } } @@ -111,8 +116,9 @@ public void render(Block html) { } else { CSQueue root = cs.getRootQueue(); parent.queue = root; - float used = root.getUsedCapacity(); - float set = root.getCapacity(); + CapacitySchedulerInfo sinfo = new CapacitySchedulerInfo(parent.queue); + float used = sinfo.getUsedCapacity() / 100; + float set = sinfo.getCapacity() / 100; float delta = Math.abs(set - used) + 0.001f; ul. li(). diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java index 8882eea884..1bbb993ac8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java @@ -18,22 +18,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; -import com.google.inject.Inject; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; -import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import org.apache.hadoop.yarn.api.records.QueueInfo; -import org.apache.hadoop.yarn.api.records.QueueState; -import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.webapp.view.InfoBlock; -import static org.apache.hadoop.yarn.util.StringHelper.*; +import com.google.inject.Inject; class DefaultSchedulerPage extends RmView { static final String _Q = ".ui-state-default.ui-corner-all"; @@ -44,66 +42,35 @@ class DefaultSchedulerPage extends RmView { static final float EPSILON = 1e-8f; static class QueueInfoBlock extends HtmlBlock { - final RMContext rmContext; - final FifoScheduler fs; - final String qName; - final QueueInfo qInfo; + final FifoSchedulerInfo sinfo; @Inject QueueInfoBlock(RMContext context, ViewContext ctx, ResourceManager rm) { super(ctx); - this.rmContext = context; - - fs = (FifoScheduler) rm.getResourceScheduler(); - qName = fs.getQueueInfo("",false,false).getQueueName(); - qInfo = fs.getQueueInfo(qName,true,true); + sinfo = new FifoSchedulerInfo(rm); } @Override public void render(Block html) { - String minmemoryresource = - Integer.toString(fs.getMinimumResourceCapability().getMemory()); - String maxmemoryresource = - Integer.toString(fs.getMaximumResourceCapability().getMemory()); - String qstate = (qInfo.getQueueState() == QueueState.RUNNING) ? - "Running" : - (qInfo.getQueueState() == QueueState.STOPPED) ? - "Stopped" : "Unknown"; - - int usedNodeMem = 0; - int availNodeMem = 0; - int totNodeMem = 0; - int nodeContainers = 0; - - for (RMNode ni : this.rmContext.getRMNodes().values()) { - SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID()); - usedNodeMem += report.getUsedResource().getMemory(); - availNodeMem += report.getAvailableResource().getMemory(); - totNodeMem += ni.getTotalCapability().getMemory(); - nodeContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers(); - } - - info("\'" + qName + "\' Queue Status"). - _("Queue State:" , qstate). - _("Minimum Queue Memory Capacity:" , minmemoryresource). - _("Maximum Queue Memory Capacity:" , maxmemoryresource). - _("Number of Nodes:" , Integer.toString(this.rmContext.getRMNodes().size())). - _("Used Node Capacity:" , Integer.toString(usedNodeMem)). - _("Available Node Capacity:" , Integer.toString(availNodeMem)). - _("Total Node Capacity:" , Integer.toString(totNodeMem)). - _("Number of Node Containers:" , Integer.toString(nodeContainers)); + info("\'" + sinfo.getQueueName() + "\' Queue Status"). + _("Queue State:" , sinfo.getState()). + _("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())). + _("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())). + _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())). + _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())). + _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())). + _("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())). + _("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers())); html._(InfoBlock.class); } } static class QueuesBlock extends HtmlBlock { + final FifoSchedulerInfo sinfo; final FifoScheduler fs; - final String qName; - final QueueInfo qInfo; @Inject QueuesBlock(ResourceManager rm) { + sinfo = new FifoSchedulerInfo(rm); fs = (FifoScheduler) rm.getResourceScheduler(); - qName = fs.getQueueInfo("",false,false).getQueueName(); - qInfo = fs.getQueueInfo(qName,false,false); } @Override @@ -123,8 +90,8 @@ public void render(Block html) { span().$style(Q_END)._("100% ")._(). span(".q", "default")._()._(); } else { - float used = qInfo.getCurrentCapacity(); - float set = qInfo.getCapacity(); + float used = sinfo.getUsedCapacity(); + float set = sinfo.getCapacity(); float delta = Math.abs(set - used) + 0.001f; ul. li(). @@ -133,7 +100,7 @@ public void render(Block html) { span().$style(Q_END)._("100%")._(). span().$style(join(width(delta), ';', used > set ? OVER : UNDER, ';', used > set ? left(set) : left(used)))._(".")._(). - span(".q", qName)._(). + span(".q", sinfo.getQueueName())._(). _(QueueInfoBlock.class)._(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java new file mode 100644 index 0000000000..44e6c8c3ca --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import com.google.inject.Singleton; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo; + +@Singleton +@Provider +public class JAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = { AppInfo.class, ClusterInfo.class, + CapacitySchedulerQueueInfo.class, FifoSchedulerInfo.class, + SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class, + CapacitySchedulerInfo.class, ClusterMetricsInfo.class, + SchedulerInfo.class, AppsInfo.class, NodesInfo.class }; + + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + this.context = new JSONJAXBContext(JSONConfiguration.natural() + .rootUnwrapping(false).build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index 3b916a599c..4b3d33c177 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo; + import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; @@ -36,12 +36,12 @@ * current user is using on the cluster. */ public class MetricsOverviewTable extends HtmlBlock { - private static final long BYTES_IN_GB = 1024 * 1024 * 1024; - + private static final long BYTES_IN_MB = 1024 * 1024; + private final RMContext rmContext; private final ResourceManager rm; - @Inject + @Inject MetricsOverviewTable(RMContext context, ResourceManager rm, ViewContext ctx) { super(ctx); this.rmContext = context; @@ -55,22 +55,7 @@ protected void render(Block html) { //CSS in the correct spot html.style(".metrics {margin-bottom:5px}"); - ResourceScheduler rs = rm.getResourceScheduler(); - QueueMetrics metrics = rs.getRootQueueMetrics(); - ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); - - int appsSubmitted = metrics.getAppsSubmitted(); - int reservedGB = metrics.getReservedGB(); - int availableGB = metrics.getAvailableGB(); - int allocatedGB = metrics.getAllocatedGB(); - int containersAllocated = metrics.getAllocatedContainers(); - int totalGB = availableGB + reservedGB + allocatedGB; - - int totalNodes = clusterMetrics.getNumNMs(); - int lostNodes = clusterMetrics.getNumLostNMs(); - int unhealthyNodes = clusterMetrics.getUnhealthyNMs(); - int decommissionedNodes = clusterMetrics.getNumDecommisionedNMs(); - int rebootedNodes = clusterMetrics.getNumRebootedNMs(); + ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm, this.rmContext); DIV div = html.div().$class("metrics"); @@ -92,30 +77,23 @@ protected void render(Block html) { _(). tbody().$class("ui-widget-content"). tr(). - td(String.valueOf(appsSubmitted)). - td(String.valueOf(containersAllocated)). - td(StringUtils.byteDesc(allocatedGB * BYTES_IN_GB)). - td(StringUtils.byteDesc(totalGB * BYTES_IN_GB)). - td(StringUtils.byteDesc(reservedGB * BYTES_IN_GB)). - td().a(url("nodes"),String.valueOf(totalNodes))._(). - td().a(url("nodes/decommissioned"),String.valueOf(decommissionedNodes))._(). - td().a(url("nodes/lost"),String.valueOf(lostNodes))._(). - td().a(url("nodes/unhealthy"),String.valueOf(unhealthyNodes))._(). - td().a(url("nodes/rebooted"),String.valueOf(rebootedNodes))._(). + td(String.valueOf(clusterMetrics.getAppsSubmitted())). + td(String.valueOf(clusterMetrics.getContainersAllocated())). + td(StringUtils.byteDesc(clusterMetrics.getAllocatedMB() * BYTES_IN_MB)). + td(StringUtils.byteDesc(clusterMetrics.getTotalMB() * BYTES_IN_MB)). + td(StringUtils.byteDesc(clusterMetrics.getReservedMB() * BYTES_IN_MB)). + td().a(url("nodes"),String.valueOf(clusterMetrics.getTotalNodes()))._(). + td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._(). + td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._(). + td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._(). + td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._(). _(). _()._(); - + String user = request().getRemoteUser(); if (user != null) { - QueueMetrics userMetrics = metrics.getUserMetrics(user); - if(userMetrics != null) { - int myAppsSubmitted = userMetrics.getAppsSubmitted(); - int myRunningContainers = userMetrics.getAllocatedContainers(); - int myPendingContainers = userMetrics.getPendingContainers(); - int myReservedContainers = userMetrics.getReservedContainers(); - int myReservedGB = userMetrics.getReservedGB(); - int myPendingGB = userMetrics.getPendingGB(); - int myAllocatedGB = userMetrics.getAllocatedGB(); + UserMetricsInfo userMetrics = new UserMetricsInfo(this.rm, this.rmContext, user); + if (userMetrics.metricsAvailable()) { div.table("#usermetricsoverview"). thead().$class("ui-widget-header"). tr(). @@ -130,13 +108,13 @@ protected void render(Block html) { _(). tbody().$class("ui-widget-content"). tr(). - td(String.valueOf(myAppsSubmitted)). - td(String.valueOf(myRunningContainers)). - td(String.valueOf(myPendingContainers)). - td(String.valueOf(myReservedContainers)). - td(StringUtils.byteDesc(myAllocatedGB * BYTES_IN_GB)). - td(StringUtils.byteDesc(myPendingGB * BYTES_IN_GB)). - td(StringUtils.byteDesc(myReservedGB * BYTES_IN_GB)). + td(String.valueOf(userMetrics.getAppsSubmitted())). + td(String.valueOf(userMetrics.getRunningContainers())). + td(String.valueOf(userMetrics.getPendingContainers())). + td(String.valueOf(userMetrics.getReservedContainers())). + td(StringUtils.byteDesc(userMetrics.getAllocatedMB() * BYTES_IN_MB)). + td(StringUtils.byteDesc(userMetrics.getPendingMB() * BYTES_IN_MB)). + td(StringUtils.byteDesc(userMetrics.getReservedMB() * BYTES_IN_MB)). _(). _()._(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index ea6f408228..79c371211c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -25,14 +25,12 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.api.records.NodeHealthStatus; -import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; @@ -45,9 +43,9 @@ class NodesPage extends RmView { static class NodesBlock extends HtmlBlock { - private static final long BYTES_IN_MB = 1024 * 1024; final RMContext rmContext; final ResourceManager rm; + private static final long BYTES_IN_MB = 1024 * 1024; @Inject NodesBlock(RMContext context, ResourceManager rm, ViewContext ctx) { @@ -59,7 +57,7 @@ static class NodesBlock extends HtmlBlock { @Override protected void render(Block html) { html._(MetricsOverviewTable.class); - + ResourceScheduler sched = rm.getResourceScheduler(); String type = $(NODE_STATE); TBODY> tbody = html.table("#nodes"). @@ -88,27 +86,18 @@ protected void render(Block html) { continue; } } - NodeId id = ni.getNodeID(); - SchedulerNodeReport report = sched.getNodeReport(id); - int numContainers = 0; - int usedMemory = 0; - int availableMemory = 0; - if(report != null) { - numContainers = report.getNumContainers(); - usedMemory = report.getUsedResource().getMemory(); - availableMemory = report.getAvailableResource().getMemory(); - } - - NodeHealthStatus health = ni.getNodeHealthStatus(); + NodeInfo info = new NodeInfo(ni, sched); + int usedMemory = (int)info.getUsedMemory(); + int availableMemory = (int)info.getAvailableMemory(); tbody.tr(). - td(ni.getRackName()). - td(String.valueOf(ni.getState())). - td(String.valueOf(ni.getNodeID().toString())). - td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._(). - td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy"). - td(Times.format(health.getLastHealthReportTime())). - td(String.valueOf(health.getHealthReport())). - td(String.valueOf(numContainers)). + td(info.getRack()). + td(info.getState()). + td(info.getNodeId()). + td().a("http://" + info.getNodeHTTPAddress(), info.getNodeHTTPAddress())._(). + td(info.getHealthStatus()). + td(Times.format(info.getLastHealthUpdate())). + td(info.getHealthReport()). + td(String.valueOf(info.getNumContainers())). td().br().$title(String.valueOf(usedMemory))._(). _(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._(). td().br().$title(String.valueOf(usedMemory))._(). diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java index 57b9d4ae98..74266a0015 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java @@ -23,6 +23,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; /** @@ -41,6 +42,9 @@ public RMWebApp(ResourceManager rm) { @Override public void setup() { + bind(JAXBContextResolver.class); + bind(RMWebServices.class); + bind(GenericExceptionHandler.class); if (rm != null) { bind(ResourceManager.class).toInstance(rm); bind(RMContext.class).toInstance(rm.getRMContext()); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java new file mode 100644 index 0000000000..06551b21a8 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -0,0 +1,333 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import java.io.IOException; +import java.util.concurrent.ConcurrentMap; + +import javax.servlet.http.HttpServletRequest; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MediaType; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.BadRequestException; +import org.apache.hadoop.yarn.webapp.NotFoundException; + +import com.google.inject.Inject; +import com.google.inject.Singleton; + +@Singleton +@Path("/ws/v1/cluster") +public class RMWebServices { + private static final Log LOG = LogFactory.getLog(RMWebServices.class); + private final ResourceManager rm; + private static RecordFactory recordFactory = RecordFactoryProvider + .getRecordFactory(null); + private final ApplicationACLsManager aclsManager; + + @Inject + public RMWebServices(final ResourceManager rm, + final ApplicationACLsManager aclsManager) { + this.rm = rm; + this.aclsManager = aclsManager; + } + + protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) { + // Check for the authorization. + String remoteUser = hsr.getRemoteUser(); + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + if (callerUGI != null + && !this.aclsManager.checkAccess(callerUGI, + ApplicationAccessType.VIEW_APP, app.getUser(), + app.getApplicationId())) { + return false; + } + return true; + } + + @GET + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ClusterInfo get() { + return getClusterInfo(); + } + + @GET + @Path("/info") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ClusterInfo getClusterInfo() { + return new ClusterInfo(this.rm); + } + + @GET + @Path("/metrics") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public ClusterMetricsInfo getClusterMetricsInfo() { + return new ClusterMetricsInfo(this.rm, this.rm.getRMContext()); + } + + @GET + @Path("/scheduler") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public SchedulerTypeInfo getSchedulerInfo() { + ResourceScheduler rs = rm.getResourceScheduler(); + SchedulerInfo sinfo; + if (rs instanceof CapacityScheduler) { + CapacityScheduler cs = (CapacityScheduler) rs; + CSQueue root = cs.getRootQueue(); + sinfo = new CapacitySchedulerInfo(root); + } else if (rs instanceof FifoScheduler) { + sinfo = new FifoSchedulerInfo(this.rm); + } else { + throw new NotFoundException("Unknown scheduler configured"); + } + return new SchedulerTypeInfo(sinfo); + } + + @GET + @Path("/nodes") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodesInfo getNodes(@QueryParam("state") String filterState, + @QueryParam("healthy") String healthState) { + ResourceScheduler sched = this.rm.getResourceScheduler(); + if (sched == null) { + throw new NotFoundException("Null ResourceScheduler instance"); + } + + NodesInfo allNodes = new NodesInfo(); + for (RMNode ni : this.rm.getRMContext().getRMNodes().values()) { + NodeInfo nodeInfo = new NodeInfo(ni, sched); + if (filterState != null) { + RMNodeState.valueOf(filterState); + if (!(nodeInfo.getState().equalsIgnoreCase(filterState))) { + continue; + } + } + if ((healthState != null) && (!healthState.isEmpty())) { + LOG.info("heatlh state is : " + healthState); + if (!healthState.equalsIgnoreCase("true") + && !healthState.equalsIgnoreCase("false")) { + String msg = "Error: You must specify either true or false to query on health"; + throw new BadRequestException(msg); + } + if (nodeInfo.isHealthy() != Boolean.parseBoolean(healthState)) { + continue; + } + } + allNodes.add(nodeInfo); + } + return allNodes; + } + + @GET + @Path("/nodes/{nodeId}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodeInfo getNode(@PathParam("nodeId") String nodeId) { + if (nodeId == null || nodeId.isEmpty()) { + throw new NotFoundException("nodeId, " + nodeId + ", is empty or null"); + } + ResourceScheduler sched = this.rm.getResourceScheduler(); + if (sched == null) { + throw new NotFoundException("Null ResourceScheduler instance"); + } + NodeId nid = ConverterUtils.toNodeId(nodeId); + RMNode ni = this.rm.getRMContext().getRMNodes().get(nid); + if (ni == null) { + throw new NotFoundException("nodeId, " + nodeId + ", is not found"); + } + return new NodeInfo(ni, sched); + } + + @GET + @Path("/apps") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppsInfo getApps(@Context HttpServletRequest hsr, + @QueryParam("state") String stateQuery, + @QueryParam("user") String userQuery, + @QueryParam("queue") String queueQuery, + @QueryParam("limit") String count, + @QueryParam("startedTimeBegin") String startedBegin, + @QueryParam("startedTimeEnd") String startedEnd, + @QueryParam("finishedTimeBegin") String finishBegin, + @QueryParam("finishedTimeEnd") String finishEnd) { + long num = 0; + boolean checkCount = false; + boolean checkStart = false; + boolean checkEnd = false; + long countNum = 0; + + // set values suitable in case both of begin/end not specified + long sBegin = 0; + long sEnd = Long.MAX_VALUE; + long fBegin = 0; + long fEnd = Long.MAX_VALUE; + + if (count != null && !count.isEmpty()) { + checkCount = true; + countNum = Long.parseLong(count); + if (countNum <= 0) { + throw new BadRequestException("limit value must be greater then 0"); + } + } + + if (startedBegin != null && !startedBegin.isEmpty()) { + checkStart = true; + sBegin = Long.parseLong(startedBegin); + if (sBegin < 0) { + throw new BadRequestException("startedTimeBegin must be greater than 0"); + } + } + if (startedEnd != null && !startedEnd.isEmpty()) { + checkStart = true; + sEnd = Long.parseLong(startedEnd); + if (sEnd < 0) { + throw new BadRequestException("startedTimeEnd must be greater than 0"); + } + } + if (sBegin > sEnd) { + throw new BadRequestException( + "startedTimeEnd must be greater than startTimeBegin"); + } + + if (finishBegin != null && !finishBegin.isEmpty()) { + checkEnd = true; + fBegin = Long.parseLong(finishBegin); + if (fBegin < 0) { + throw new BadRequestException("finishTimeBegin must be greater than 0"); + } + } + if (finishEnd != null && !finishEnd.isEmpty()) { + checkEnd = true; + fEnd = Long.parseLong(finishEnd); + if (fEnd < 0) { + throw new BadRequestException("finishTimeEnd must be greater than 0"); + } + } + if (fBegin > fEnd) { + throw new BadRequestException( + "finishTimeEnd must be greater than finishTimeBegin"); + } + + final ConcurrentMap apps = rm.getRMContext() + .getRMApps(); + AppsInfo allApps = new AppsInfo(); + for (RMApp rmapp : apps.values()) { + if (checkCount && num == countNum) { + break; + } + AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr)); + + if (stateQuery != null && !stateQuery.isEmpty()) { + RMAppState.valueOf(stateQuery); + if (!app.getState().equalsIgnoreCase(stateQuery)) { + continue; + } + } + if (userQuery != null && !userQuery.isEmpty()) { + if (!app.getUser().equals(userQuery)) { + continue; + } + } + if (queueQuery != null && !queueQuery.isEmpty()) { + ResourceScheduler rs = rm.getResourceScheduler(); + if (rs instanceof CapacityScheduler) { + CapacityScheduler cs = (CapacityScheduler) rs; + // validate queue exists + try { + cs.getQueueInfo(queueQuery, false, false); + } catch (IOException e) { + throw new BadRequestException(e.getMessage()); + } + } + if (!app.getQueue().equals(queueQuery)) { + continue; + } + } + + if (checkStart + && (app.getStartTime() < sBegin || app.getStartTime() > sEnd)) { + continue; + } + if (checkEnd + && (app.getFinishTime() < fBegin || app.getFinishTime() > fEnd)) { + continue; + } + + allApps.add(app); + num++; + } + return allApps; + } + + @GET + @Path("/apps/{appid}") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public AppInfo getApp(@Context HttpServletRequest hsr, + @PathParam("appid") String appId) { + if (appId == null || appId.isEmpty()) { + throw new NotFoundException("appId, " + appId + ", is empty or null"); + } + ApplicationId id; + id = ConverterUtils.toApplicationId(recordFactory, appId); + if (id == null) { + throw new NotFoundException("appId is null"); + } + RMApp app = rm.getRMContext().getRMApps().get(id); + if (app == null) { + throw new NotFoundException("app with id: " + appId + " not found"); + } + return new AppInfo(app, hasAccess(app, hsr)); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java index 49410e500a..bb3ff674ed 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java @@ -26,17 +26,16 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; -import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.Apps; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.ResponseInfo; @@ -73,13 +72,14 @@ public void app() { } ApplicationId appID = Apps.toAppID(aid); RMContext context = getInstance(RMContext.class); - RMApp app = context.getRMApps().get(appID); - if (app == null) { + RMApp rmApp = context.getRMApps().get(appID); + if (rmApp == null) { // TODO: handle redirect to jobhistory server setStatus(HttpServletResponse.SC_NOT_FOUND); setTitle("Application not found: "+ aid); return; } + AppInfo app = new AppInfo(rmApp, true); // Check for the authorization. String remoteUser = request().getRemoteUser(); @@ -98,32 +98,22 @@ public void app() { } setTitle(join("Application ", aid)); - String trackingUrl = app.getTrackingUrl(); - boolean trackingUrlIsNotReady = trackingUrl == null - || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl); - String ui = trackingUrlIsNotReady ? "UNASSIGNED" : - (app.getFinishTime() == 0 ? "ApplicationMaster" : "History"); ResponseInfo info = info("Application Overview"). _("User:", app.getUser()). _("Name:", app.getName()). - _("State:", app.getState().toString()). - _("FinalStatus:", app.getFinalApplicationStatus().toString()). + _("State:", app.getState()). + _("FinalStatus:", app.getFinalStatus()). _("Started:", Times.format(app.getStartTime())). _("Elapsed:", StringUtils.formatTime( Times.elapsed(app.getStartTime(), app.getFinishTime()))). - _("Tracking URL:", trackingUrlIsNotReady ? - "#" : join("http://", trackingUrl), ui). - _("Diagnostics:", app.getDiagnostics()); - Container masterContainer = app.getCurrentAppAttempt() - .getMasterContainer(); - if (masterContainer != null) { - String url = join("http://", masterContainer.getNodeHttpAddress(), - "/node", "/containerlogs/", - ConverterUtils.toString(masterContainer.getId())); - info._("AM container logs:", url, url); + _("Tracking URL:", !app.isTrackingUrlReady() ? + "#" : app.getTrackingUrlPretty(), app.getTrackingUI()). + _("Diagnostics:", app.getNote()); + if (app.amContainerLogsExist()) { + info._("AM container logs:", app.getAMContainerLogs(), app.getAMContainerLogs()); } else { - info._("AM container logs:", "AM not yet registered with RM"); + info._("AM container logs:", ""); } render(AppPage.class); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java new file mode 100644 index 0000000000..b2600ae0ea --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import static org.apache.hadoop.yarn.util.StringHelper.join; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.Times; + +@XmlRootElement(name = "app") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppInfo { + + @XmlTransient + protected String appIdNum; + @XmlTransient + protected boolean trackingUrlIsNotReady; + @XmlTransient + protected String trackingUrlPretty; + @XmlTransient + protected boolean amContainerLogsExist = false; + @XmlTransient + protected ApplicationId applicationId; + + // these are ok for any user to see + protected String id; + protected String user; + protected String name; + protected String queue; + protected RMAppState state; + protected FinalApplicationStatus finalStatus; + protected float progress; + protected String trackingUI; + protected String trackingUrl; + protected String diagnostics; + protected long clusterId; + + // these are only allowed if acls allow + protected long startedTime; + protected long finishedTime; + protected long elapsedTime; + protected String amContainerLogs; + protected String amHostHttpAddress; + + public AppInfo() { + } // JAXB needs this + + public AppInfo(RMApp app, Boolean hasAccess, String host) { + this(app, hasAccess); + } + + public AppInfo(RMApp app, Boolean hasAccess) { + + if (app != null) { + String trackingUrl = app.getTrackingUrl(); + this.trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty() + || "N/A".equalsIgnoreCase(trackingUrl); + this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app + .getFinishTime() == 0 ? "ApplicationMaster" : "History"); + if (!trackingUrlIsNotReady) { + this.trackingUrl = join("http://", trackingUrl); + } + this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join( + "http://", trackingUrl); + this.applicationId = app.getApplicationId(); + this.appIdNum = String.valueOf(app.getApplicationId().getId()); + this.id = app.getApplicationId().toString(); + this.user = app.getUser().toString(); + this.name = app.getName().toString(); + this.queue = app.getQueue().toString(); + this.state = app.getState(); + this.progress = app.getProgress() * 100; + this.diagnostics = app.getDiagnostics().toString(); + if (diagnostics == null || diagnostics.isEmpty()) { + this.diagnostics = ""; + } + this.finalStatus = app.getFinalApplicationStatus(); + this.clusterId = ResourceManager.clusterTimeStamp; + + if (hasAccess) { + this.startedTime = app.getStartTime(); + this.finishedTime = app.getFinishTime(); + this.elapsedTime = Times.elapsed(app.getStartTime(), + app.getFinishTime()); + + RMAppAttempt attempt = app.getCurrentAppAttempt(); + if (attempt != null) { + Container masterContainer = attempt.getMasterContainer(); + if (masterContainer != null) { + this.amContainerLogsExist = true; + String url = join("http://", masterContainer.getNodeHttpAddress(), + "/node", "/containerlogs/", + ConverterUtils.toString(masterContainer.getId())); + this.amContainerLogs = url; + this.amHostHttpAddress = masterContainer.getNodeHttpAddress(); + } + } + } + } + } + + public boolean isTrackingUrlReady() { + return !this.trackingUrlIsNotReady; + } + + public ApplicationId getApplicationId() { + return this.applicationId; + } + + public String getAppId() { + return this.id; + } + + public String getAppIdNum() { + return this.appIdNum; + } + + public String getUser() { + return this.user; + } + + public String getQueue() { + return this.queue; + } + + public String getName() { + return this.name; + } + + public String getState() { + return this.state.toString(); + } + + public float getProgress() { + return this.progress; + } + + public String getTrackingUI() { + return this.trackingUI; + } + + public String getNote() { + return this.diagnostics; + } + + public String getFinalStatus() { + return this.finalStatus.toString(); + } + + public String getTrackingUrl() { + return this.trackingUrl; + } + + public String getTrackingUrlPretty() { + return this.trackingUrlPretty; + } + + public long getStartTime() { + return this.startedTime; + } + + public long getFinishTime() { + return this.finishedTime; + } + + public long getElapsedTime() { + return this.elapsedTime; + } + + public String getAMContainerLogs() { + return this.amContainerLogs; + } + + public String getAMHostHttpAddress() { + return this.amHostHttpAddress; + } + + public boolean amContainerLogsExist() { + return this.amContainerLogsExist; + } + + public long getClusterId() { + return this.clusterId; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java new file mode 100644 index 0000000000..84f68f13bf --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppsInfo.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "apps") +@XmlAccessorType(XmlAccessType.FIELD) +public class AppsInfo { + + protected ArrayList app = new ArrayList(); + + public AppsInfo() { + } // JAXB needs this + + public void add(AppInfo appinfo) { + app.add(appinfo); + } + + public ArrayList getApps() { + return app; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java new file mode 100644 index 0000000000..c66fa93887 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerInfo.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; +import javax.xml.bind.annotation.XmlType; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue; + +@XmlRootElement(name = "capacityScheduler") +@XmlType(name = "capacityScheduler") +@XmlAccessorType(XmlAccessType.FIELD) +public class CapacitySchedulerInfo extends SchedulerInfo { + + protected float capacity; + protected float usedCapacity; + protected float maxCapacity; + protected String queueName; + protected ArrayList queues; + + @XmlTransient + static final float EPSILON = 1e-8f; + + public CapacitySchedulerInfo() { + } // JAXB needs this + + public CapacitySchedulerInfo(CSQueue parent) { + this.queueName = parent.getQueueName(); + this.usedCapacity = parent.getUsedCapacity() * 100; + this.capacity = parent.getCapacity() * 100; + float max = parent.getMaximumCapacity(); + if (max < EPSILON || max > 1f) + max = 1f; + this.maxCapacity = max * 100; + + queues = getQueues(parent); + } + + public float getCapacity() { + return this.capacity; + } + + public float getUsedCapacity() { + return this.usedCapacity; + } + + public float getMaxCapacity() { + return this.maxCapacity; + } + + public String getQueueName() { + return this.queueName; + } + + public ArrayList getSubQueues() { + return this.queues; + } + + protected ArrayList getQueues(CSQueue parent) { + CSQueue parentQueue = parent; + ArrayList queuesInfo = new ArrayList(); + for (CSQueue queue : parentQueue.getChildQueues()) { + float usedCapacity = queue.getUsedCapacity() * 100; + float capacity = queue.getCapacity() * 100; + String queueName = queue.getQueueName(); + String queuePath = queue.getQueuePath(); + float max = queue.getMaximumCapacity(); + if (max < EPSILON || max > 1f) + max = 1f; + float maxCapacity = max * 100; + String state = queue.getState().toString(); + CapacitySchedulerQueueInfo info = new CapacitySchedulerQueueInfo( + capacity, usedCapacity, maxCapacity, queueName, state, queuePath); + + if (queue instanceof ParentQueue) { + info.isParent = true; + info.queue = queue; + info.subQueues = getQueues(queue); + } + queuesInfo.add(info); + } + return queuesInfo; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java new file mode 100644 index 0000000000..f346fb0c87 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class CapacitySchedulerQueueInfo { + + @XmlTransient + protected String queuePath; + @XmlTransient + protected Boolean isParent = false; + + // bit odd to store this but makes html easier for now + @XmlTransient + protected CSQueue queue; + + protected float capacity; + protected float usedCapacity; + protected float maxCapacity; + protected String queueName; + protected String state; + protected ArrayList subQueues; + + CapacitySchedulerQueueInfo() { + }; + + CapacitySchedulerQueueInfo(float cap, float used, float max, String name, + String state, String path) { + this.capacity = cap; + this.usedCapacity = used; + this.maxCapacity = max; + this.queueName = name; + this.state = state; + this.queuePath = path; + } + + public Boolean isParent() { + return this.isParent; + } + + public CSQueue getQueue() { + return this.queue; + } + + public float getCapacity() { + return this.capacity; + } + + public float getUsedCapacity() { + return this.usedCapacity; + } + + public float getMaxCapacity() { + return this.maxCapacity; + } + + public String getQueueName() { + return this.queueName; + } + + public String getQueueState() { + return this.state; + } + + public String getQueuePath() { + return this.queuePath; + } + + public ArrayList getSubQueues() { + return this.subQueues; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java new file mode 100644 index 0000000000..b4511bc176 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.util.YarnVersionInfo; + +@XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) +public class ClusterInfo { + + protected long id; + protected long startedOn; + protected String state; + protected String resourceManagerVersion; + protected String resourceManagerBuildVersion; + protected String resourceManagerVersionBuiltOn; + protected String hadoopVersion; + protected String hadoopBuildVersion; + protected String hadoopVersionBuiltOn; + + public ClusterInfo() { + } // JAXB needs this + + public ClusterInfo(ResourceManager rm) { + long ts = ResourceManager.clusterTimeStamp; + + this.id = ts; + this.state = rm.getServiceState().toString(); + this.startedOn = ts; + this.resourceManagerVersion = YarnVersionInfo.getVersion(); + this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion(); + this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate(); + this.hadoopVersion = VersionInfo.getVersion(); + this.hadoopBuildVersion = VersionInfo.getBuildVersion(); + this.hadoopVersionBuiltOn = VersionInfo.getDate(); + } + + public String getState() { + return this.state; + } + + public String getRMVersion() { + return this.resourceManagerVersion; + } + + public String getRMBuildVersion() { + return this.resourceManagerBuildVersion; + } + + public String getRMVersionBuiltOn() { + return this.resourceManagerVersionBuiltOn; + } + + public String getHadoopVersion() { + return this.hadoopVersion; + } + + public String getHadoopBuildVersion() { + return this.hadoopBuildVersion; + } + + public String getHadoopVersionBuiltOn() { + return this.hadoopVersionBuiltOn; + } + + public long getClusterId() { + return this.id; + } + + public long getStartedOn() { + return this.startedOn; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java new file mode 100644 index 0000000000..fcf878346c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +@XmlRootElement(name = "clusterMetrics") +@XmlAccessorType(XmlAccessType.FIELD) +public class ClusterMetricsInfo { + + private static final long MB_IN_GB = 1024; + + protected int appsSubmitted; + protected long reservedMB; + protected long availableMB; + protected long allocatedMB; + protected int containersAllocated; + protected long totalMB; + protected int totalNodes; + protected int lostNodes; + protected int unhealthyNodes; + protected int decommissionedNodes; + protected int rebootedNodes; + + public ClusterMetricsInfo() { + } // JAXB needs this + + public ClusterMetricsInfo(final ResourceManager rm, final RMContext rmContext) { + ResourceScheduler rs = rm.getResourceScheduler(); + QueueMetrics metrics = rs.getRootQueueMetrics(); + ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); + + this.appsSubmitted = metrics.getAppsSubmitted(); + this.reservedMB = metrics.getReservedGB() * MB_IN_GB; + this.availableMB = metrics.getAvailableGB() * MB_IN_GB; + this.allocatedMB = metrics.getAllocatedGB() * MB_IN_GB; + this.containersAllocated = metrics.getAllocatedContainers(); + this.totalMB = availableMB + reservedMB + allocatedMB; + this.totalNodes = clusterMetrics.getNumNMs(); + this.lostNodes = clusterMetrics.getNumLostNMs(); + this.unhealthyNodes = clusterMetrics.getUnhealthyNMs(); + this.decommissionedNodes = clusterMetrics.getNumDecommisionedNMs(); + this.rebootedNodes = clusterMetrics.getNumRebootedNMs(); + + } + + public int getAppsSubmitted() { + return this.appsSubmitted; + } + + public long getReservedMB() { + return this.reservedMB; + } + + public long getAvailableMB() { + return this.availableMB; + } + + public long getAllocatedMB() { + return this.allocatedMB; + } + + public int getContainersAllocated() { + return this.containersAllocated; + } + + public long getTotalMB() { + return this.totalMB; + } + + public int getTotalNodes() { + return this.totalNodes; + } + + public int getLostNodes() { + return this.lostNodes; + } + + public int getRebootedNodes() { + return this.rebootedNodes; + } + + public int getUnhealthyNodes() { + return this.unhealthyNodes; + } + + public int getDecommissionedNodes() { + return this.decommissionedNodes; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java new file mode 100644 index 0000000000..bd940d1c3c --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; +import javax.xml.bind.annotation.XmlType; + +import org.apache.hadoop.yarn.api.records.QueueInfo; +import org.apache.hadoop.yarn.api.records.QueueState; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; + +@XmlRootElement(name = "fifoScheduler") +@XmlType(name = "fifoScheduler") +@XmlAccessorType(XmlAccessType.FIELD) +public class FifoSchedulerInfo extends SchedulerInfo { + + protected float capacity; + protected float usedCapacity; + protected QueueState qstate; + protected int minQueueMemoryCapacity; + protected int maxQueueMemoryCapacity; + protected int numNodes; + protected int usedNodeCapacity; + protected int availNodeCapacity; + protected int totalNodeCapacity; + protected int numContainers; + + @XmlTransient + protected String qstateFormatted; + + @XmlTransient + protected String qName; + + public FifoSchedulerInfo() { + } // JAXB needs this + + public FifoSchedulerInfo(final ResourceManager rm) { + + RMContext rmContext = rm.getRMContext(); + + FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler(); + qName = fs.getQueueInfo("", false, false).getQueueName(); + QueueInfo qInfo = fs.getQueueInfo(qName, true, true); + + this.usedCapacity = qInfo.getCurrentCapacity(); + this.capacity = qInfo.getCapacity(); + this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemory(); + this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemory(); + this.qstate = qInfo.getQueueState(); + + this.numNodes = rmContext.getRMNodes().size(); + this.usedNodeCapacity = 0; + this.availNodeCapacity = 0; + this.totalNodeCapacity = 0; + this.numContainers = 0; + + for (RMNode ni : rmContext.getRMNodes().values()) { + SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID()); + this.usedNodeCapacity += report.getUsedResource().getMemory(); + this.availNodeCapacity += report.getAvailableResource().getMemory(); + this.totalNodeCapacity += ni.getTotalCapability().getMemory(); + this.numContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers(); + } + } + + public int getNumNodes() { + return this.numNodes; + } + + public int getUsedNodeCapacity() { + return this.usedNodeCapacity; + } + + public int getAvailNodeCapacity() { + return this.availNodeCapacity; + } + + public int getTotalNodeCapacity() { + return this.totalNodeCapacity; + } + + public int getNumContainers() { + return this.numContainers; + } + + public String getState() { + return this.qstate.toString(); + } + + public String getQueueName() { + return this.qName; + } + + public int getMinQueueMemoryCapacity() { + return this.minQueueMemoryCapacity; + } + + public int getMaxQueueMemoryCapacity() { + return this.maxQueueMemoryCapacity; + } + + public float getCapacity() { + return this.capacity; + } + + public float getUsedCapacity() { + return this.usedCapacity; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java new file mode 100644 index 0000000000..bafecbb338 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.yarn.api.records.NodeHealthStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; + +@XmlRootElement(name = "node") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodeInfo { + + protected String rack; + protected RMNodeState state; + protected String id; + protected String nodeHostName; + protected String nodeHTTPAddress; + protected String healthStatus; + protected long lastHealthUpdate; + protected String healthReport; + protected int numContainers; + protected long usedMemoryMB; + protected long availMemoryMB; + + @XmlTransient + protected boolean healthy; + + public NodeInfo() { + } // JAXB needs this + + public NodeInfo(RMNode ni, ResourceScheduler sched) { + NodeId id = ni.getNodeID(); + SchedulerNodeReport report = sched.getNodeReport(id); + NodeHealthStatus health = ni.getNodeHealthStatus(); + this.numContainers = 0; + this.usedMemoryMB = 0; + this.availMemoryMB = 0; + if (report != null) { + this.numContainers = report.getNumContainers(); + this.usedMemoryMB = report.getUsedResource().getMemory(); + this.availMemoryMB = report.getAvailableResource().getMemory(); + } + this.id = id.toString(); + this.rack = ni.getRackName(); + this.nodeHostName = ni.getHostName(); + this.state = ni.getState(); + this.nodeHTTPAddress = ni.getHttpAddress(); + this.healthy = health.getIsNodeHealthy(); + this.healthStatus = health.getIsNodeHealthy() ? "Healthy" : "Unhealthy"; + this.lastHealthUpdate = health.getLastHealthReportTime(); + this.healthReport = String.valueOf(health.getHealthReport()); + } + + public boolean isHealthy() { + return this.healthy; + } + + public String getRack() { + return this.rack; + } + + public String getState() { + return String.valueOf(this.state); + } + + public String getNodeId() { + return this.id; + } + + public String getNodeHTTPAddress() { + return this.nodeHTTPAddress; + } + + public String getHealthStatus() { + return this.healthStatus; + } + + public long getLastHealthUpdate() { + return this.lastHealthUpdate; + } + + public String getHealthReport() { + return this.healthReport; + } + + public int getNumContainers() { + return this.numContainers; + } + + public long getUsedMemory() { + return this.usedMemoryMB; + } + + public long getAvailableMemory() { + return this.availMemoryMB; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java new file mode 100644 index 0000000000..7be9a6f743 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.ArrayList; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "nodes") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodesInfo { + + protected ArrayList node = new ArrayList(); + + public NodesInfo() { + } // JAXB needs this + + public void add(NodeInfo nodeinfo) { + node.add(nodeinfo); + } + +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/AvroTestProtocol.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java similarity index 71% rename from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/AvroTestProtocol.java rename to hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java index d5d73962e3..1e042a37d8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/AvroTestProtocol.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java @@ -16,17 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.ipc; +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; -import org.apache.avro.AvroRemoteException; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlSeeAlso; + +@XmlRootElement +@XmlSeeAlso({ CapacitySchedulerInfo.class, FifoSchedulerInfo.class }) +public class SchedulerInfo { + + public SchedulerInfo() { + } // JAXB needs this -@SuppressWarnings("serial") -public interface AvroTestProtocol { - public static class Problem extends AvroRemoteException { - public Problem() {} - } - void ping(); - String echo(String value); - int add(int v1, int v2); - int error() throws Problem; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java new file mode 100644 index 0000000000..34078f120d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "scheduler") +@XmlAccessorType(XmlAccessType.FIELD) +public class SchedulerTypeInfo { + protected SchedulerInfo schedulerInfo; + + public SchedulerTypeInfo() { + } // JAXB needs this + + public SchedulerTypeInfo(final SchedulerInfo scheduler) { + this.schedulerInfo = scheduler; + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java new file mode 100644 index 0000000000..27e6a646d6 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; + +@XmlRootElement(name = "userMetrics") +@XmlAccessorType(XmlAccessType.FIELD) +public class UserMetricsInfo { + + private static final long MB_IN_GB = 1024; + + protected int appsSubmitted; + protected int runningContainers; + protected int pendingContainers; + protected int reservedContainers; + protected long reservedMB; + protected long pendingMB; + protected long allocatedMB; + + @XmlTransient + protected boolean userMetricsAvailable; + + public UserMetricsInfo() { + } // JAXB needs this + + public UserMetricsInfo(final ResourceManager rm, final RMContext rmContext, + final String user) { + ResourceScheduler rs = rm.getResourceScheduler(); + QueueMetrics metrics = rs.getRootQueueMetrics(); + QueueMetrics userMetrics = metrics.getUserMetrics(user); + this.userMetricsAvailable = false; + + if (userMetrics != null) { + this.userMetricsAvailable = true; + this.appsSubmitted = userMetrics.getAppsSubmitted(); + this.runningContainers = userMetrics.getAllocatedContainers(); + this.pendingContainers = userMetrics.getPendingContainers(); + this.reservedContainers = userMetrics.getReservedContainers(); + this.reservedMB = userMetrics.getReservedGB() * MB_IN_GB; + this.pendingMB = userMetrics.getPendingGB() * MB_IN_GB; + this.allocatedMB = userMetrics.getAllocatedGB() * MB_IN_GB; + } + } + + public boolean metricsAvailable() { + return userMetricsAvailable; + } + + public int getAppsSubmitted() { + return this.appsSubmitted; + } + + public long getReservedMB() { + return this.reservedMB; + } + + public long getAllocatedMB() { + return this.allocatedMB; + } + + public long getPendingMB() { + return this.pendingMB; + } + + public int getReservedContainers() { + return this.reservedContainers; + } + + public int getRunningContainers() { + return this.runningContainers; + } + + public int getPendingContainers() { + return this.pendingContainers; + } +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 4a840c36a1..e80e629aa5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -30,7 +30,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory; @@ -40,12 +42,16 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; - public class MockRM extends ResourceManager { public MockRM() { @@ -59,48 +65,50 @@ public MockRM(Configuration conf) { rootLogger.setLevel(Level.DEBUG); } - public void waitForState(ApplicationId appId, RMAppState finalState) + public void waitForState(ApplicationId appId, RMAppState finalState) throws Exception { RMApp app = getRMContext().getRMApps().get(appId); Assert.assertNotNull("app shouldn't be null", app); int timeoutSecs = 0; - while (!finalState.equals(app.getState()) && - timeoutSecs++ < 20) { - System.out.println("App State is : " + app.getState() + - " Waiting for state : " + finalState); + while (!finalState.equals(app.getState()) && timeoutSecs++ < 20) { + System.out.println("App State is : " + app.getState() + + " Waiting for state : " + finalState); Thread.sleep(500); } System.out.println("App State is : " + app.getState()); - Assert.assertEquals("App state is not correct (timedout)", - finalState, app.getState()); - } - - // get new application id - public GetNewApplicationResponse getNewAppId() throws Exception { - ClientRMProtocol client = getClientRMService(); - return client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class)); + Assert.assertEquals("App state is not correct (timedout)", finalState, + app.getState()); } - //client + // get new application id + public GetNewApplicationResponse getNewAppId() throws Exception { + ClientRMProtocol client = getClientRMService(); + return client.getNewApplication(Records + .newRecord(GetNewApplicationRequest.class)); + } + + // client public RMApp submitApp(int masterMemory) throws Exception { ClientRMProtocol client = getClientRMService(); - GetNewApplicationResponse resp = client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class)); + GetNewApplicationResponse resp = client.getNewApplication(Records + .newRecord(GetNewApplicationRequest.class)); ApplicationId appId = resp.getApplicationId(); - - SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class); - ApplicationSubmissionContext sub = - Records.newRecord(ApplicationSubmissionContext.class); + + SubmitApplicationRequest req = Records + .newRecord(SubmitApplicationRequest.class); + ApplicationSubmissionContext sub = Records + .newRecord(ApplicationSubmissionContext.class); sub.setApplicationId(appId); sub.setApplicationName(""); sub.setUser(""); - ContainerLaunchContext clc = - Records.newRecord(ContainerLaunchContext.class); - Resource capability = Records.newRecord(Resource.class); + ContainerLaunchContext clc = Records + .newRecord(ContainerLaunchContext.class); + Resource capability = Records.newRecord(Resource.class); capability.setMemory(masterMemory); clc.setResource(capability); sub.setAMContainerSpec(clc); req.setApplicationSubmissionContext(sub); - + client.submitApplication(req); // make sure app is immediately available after submit waitForState(appId, RMAppState.ACCEPTED); @@ -113,28 +121,54 @@ public MockNM registerNode(String nodeIdStr, int memory) throws Exception { return nm; } + public void sendNodeStarted(MockNM nm) throws Exception { + RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get( + nm.getNodeId()); + node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.STARTED)); + } + + public void NMwaitForState(NodeId nodeid, RMNodeState finalState) + throws Exception { + RMNode node = getRMContext().getRMNodes().get(nodeid); + Assert.assertNotNull("node shouldn't be null", node); + int timeoutSecs = 0; + while (!finalState.equals(node.getState()) && timeoutSecs++ < 20) { + System.out.println("Node State is : " + node.getState() + + " Waiting for state : " + finalState); + Thread.sleep(500); + } + System.out.println("Node State is : " + node.getState()); + Assert.assertEquals("Node state is not correct (timedout)", finalState, + node.getState()); + } + public void killApp(ApplicationId appId) throws Exception { ClientRMProtocol client = getClientRMService(); - KillApplicationRequest req = Records.newRecord(KillApplicationRequest.class); + KillApplicationRequest req = Records + .newRecord(KillApplicationRequest.class); req.setApplicationId(appId); client.forceKillApplication(req); } - //from AMLauncher - public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) throws Exception { + // from AMLauncher + public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) + throws Exception { MockAM am = new MockAM(getRMContext(), masterService, appAttemptId); am.waitForState(RMAppAttemptState.ALLOCATED); - getRMContext().getDispatcher().getEventHandler().handle( - new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED)); + getRMContext() + .getDispatcher() + .getEventHandler() + .handle( + new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED)); return am; } - - public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) throws Exception { + public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) + throws Exception { MockAM am = new MockAM(getRMContext(), masterService, appAttemptId); am.waitForState(RMAppAttemptState.ALLOCATED); - getRMContext().getDispatcher().getEventHandler().handle( - new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed")); + getRMContext().getDispatcher().getEventHandler() + .handle(new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed")); } @Override @@ -143,8 +177,9 @@ protected ClientRMService createClientRMService() { rmAppManager, applicationACLsManager) { @Override public void start() { - //override to not start rpc handler + // override to not start rpc handler } + @Override public void stop() { // don't do anything @@ -155,11 +190,12 @@ public void stop() { @Override protected ResourceTrackerService createResourceTrackerService() { return new ResourceTrackerService(getRMContext(), nodesListManager, - this.nmLivelinessMonitor, this.containerTokenSecretManager){ + this.nmLivelinessMonitor, this.containerTokenSecretManager) { @Override public void start() { - //override to not start rpc handler + // override to not start rpc handler } + @Override public void stop() { // don't do anything @@ -173,8 +209,9 @@ protected ApplicationMasterService createApplicationMasterService() { this.appTokenSecretManager, scheduler) { @Override public void start() { - //override to not start rpc handler + // override to not start rpc handler } + @Override public void stop() { // don't do anything @@ -184,17 +221,18 @@ public void stop() { @Override protected ApplicationMasterLauncher createAMLauncher() { - return new ApplicationMasterLauncher( - this.appTokenSecretManager, this.clientToAMSecretManager, - getRMContext()) { + return new ApplicationMasterLauncher(this.appTokenSecretManager, + this.clientToAMSecretManager, getRMContext()) { @Override public void start() { - //override to not start rpc handler + // override to not start rpc handler } + @Override - public void handle(AMLauncherEvent appEvent) { - //don't do anything + public void handle(AMLauncherEvent appEvent) { + // don't do anything } + @Override public void stop() { // don't do anything @@ -203,31 +241,31 @@ public void stop() { } @Override - protected AdminService createAdminService( - ClientRMService clientRMService, + protected AdminService createAdminService(ClientRMService clientRMService, ApplicationMasterService applicationMasterService, ResourceTrackerService resourceTrackerService) { - return new AdminService( - getConfig(), scheduler, getRMContext(), this.nodesListManager, - clientRMService, applicationMasterService, resourceTrackerService){ + return new AdminService(getConfig(), scheduler, getRMContext(), + this.nodesListManager, clientRMService, applicationMasterService, + resourceTrackerService) { @Override public void start() { - //override to not start rpc handler + // override to not start rpc handler } + @Override public void stop() { // don't do anything } }; } - + public NodesListManager getNodesListManager() { return this.nodesListManager; } @Override protected void startWepApp() { - //override to disable webapp + // override to disable webapp } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java new file mode 100644 index 0000000000..6a717a4daf --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +import java.util.Collections; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeHealthStatus; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.api.records.HeartbeatResponse; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore; +import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; +import org.apache.hadoop.yarn.util.BuilderUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class TestRMNodeTransitions { + + RMNodeImpl node; + + private RMContext rmContext; + private YarnScheduler scheduler; + + private SchedulerEventType eventType; + private List completedContainers; + + private final class TestSchedulerEventDispatcher implements + EventHandler { + @Override + public void handle(SchedulerEvent event) { + scheduler.handle(event); + } + } + + @Before + public void setUp() throws Exception { + InlineDispatcher rmDispatcher = new InlineDispatcher(); + + rmContext = + new RMContextImpl(new MemStore(), rmDispatcher, null, null, null); + scheduler = mock(YarnScheduler.class); + doAnswer( + new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + final SchedulerEvent event = (SchedulerEvent)(invocation.getArguments()[0]); + eventType = event.getType(); + if (eventType == SchedulerEventType.NODE_UPDATE) { + completedContainers = + ((NodeUpdateSchedulerEvent)event).getCompletedContainers(); + } else { + completedContainers = null; + } + return null; + } + } + ).when(scheduler).handle(any(SchedulerEvent.class)); + + rmDispatcher.register(SchedulerEventType.class, + new TestSchedulerEventDispatcher()); + + + node = new RMNodeImpl(null, rmContext, null, 0, 0, null, null); + + } + + @After + public void tearDown() throws Exception { + } + + private RMNodeStatusEvent getMockRMNodeStatusEvent() { + HeartbeatResponse response = mock(HeartbeatResponse.class); + + NodeHealthStatus healthStatus = mock(NodeHealthStatus.class); + Boolean yes = new Boolean(true); + doReturn(yes).when(healthStatus).getIsNodeHealthy(); + + RMNodeStatusEvent event = mock(RMNodeStatusEvent.class); + doReturn(healthStatus).when(event).getNodeHealthStatus(); + doReturn(response).when(event).getLatestResponse(); + doReturn(RMNodeEventType.STATUS_UPDATE).when(event).getType(); + return event; + } + + @Test + public void testExpiredContainer() { + // Start the node + node.handle(new RMNodeEvent(null, RMNodeEventType.STARTED)); + verify(scheduler).handle(any(NodeAddedSchedulerEvent.class)); + + // Expire a container + ContainerId completedContainerId = BuilderUtils.newContainerId( + BuilderUtils.newApplicationAttemptId( + BuilderUtils.newApplicationId(0, 0), 0), 0); + node.handle(new RMNodeCleanContainerEvent(null, completedContainerId)); + Assert.assertEquals(1, node.getContainersToCleanUp().size()); + + // Now verify that scheduler isn't notified of an expired container + // by checking number of 'completedContainers' it got in the previous event + RMNodeStatusEvent statusEvent = getMockRMNodeStatusEvent(); + ContainerStatus containerStatus = mock(ContainerStatus.class); + doReturn(completedContainerId).when(containerStatus).getContainerId(); + doReturn(Collections.singletonList(containerStatus)). + when(statusEvent).getContainers(); + node.handle(statusEvent); + Assert.assertEquals(0, completedContainers.size()); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index c4ef938f75..659bf55a5e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -284,6 +284,11 @@ public float getProgress() { public FinalApplicationStatus getFinalApplicationStatus() { return FinalApplicationStatus.UNDEFINED; } + + @Override + public RMAppAttempt getCurrentAppAttempt() { + return null; + } }; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 9448fe0ed4..4184465451 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -20,12 +20,12 @@ import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,6 +44,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; import org.junit.Before; @@ -328,7 +329,7 @@ public void testDTRenewal () throws Exception { ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1); delegationTokenRenewer.addApplication(applicationId_1, ts, true); - delegationTokenRenewer.removeApplication(applicationId_1); + delegationTokenRenewer.applicationFinished(applicationId_1); numberOfExpectedRenewals = Renewer.counter; // number of renewals so far try { @@ -343,7 +344,7 @@ public void testDTRenewal () throws Exception { // also renewing of the cancelled token should fail try { token4.renew(conf); - assertTrue("Renewal of canceled token didn't fail", false); + fail("Renewal of cancelled token should have failed"); } catch (InvalidToken ite) { //expected } @@ -377,7 +378,7 @@ public void testDTRenewalWithNoCancel () throws Exception { ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1); delegationTokenRenewer.addApplication(applicationId_1, ts, false); - delegationTokenRenewer.removeApplication(applicationId_1); + delegationTokenRenewer.applicationFinished(applicationId_1); int numberOfExpectedRenewals = Renewer.counter; // number of renewals so far try { @@ -393,4 +394,123 @@ public void testDTRenewalWithNoCancel () throws Exception { // been canceled token1.renew(conf); } + + /** + * Basic idea of the test: + * 0. Setup token KEEP_ALIVE + * 1. create tokens. + * 2. register them for renewal - to be cancelled on app complete + * 3. Complete app. + * 4. Verify token is alive within the KEEP_ALIVE time + * 5. Verify token has been cancelled after the KEEP_ALIVE_TIME + * @throws IOException + * @throws URISyntaxException + */ + @Test + public void testDTKeepAlive1 () throws Exception { + DelegationTokenRenewer localDtr = new DelegationTokenRenewer(); + Configuration lconf = new Configuration(conf); + lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + //Keep tokens alive for 6 seconds. + lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 6000l); + //Try removing tokens every second. + lconf.setLong( + YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, + 1000l); + localDtr.init(lconf); + localDtr.start(); + + MyFS dfs = (MyFS)FileSystem.get(lconf); + LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode()); + + Credentials ts = new Credentials(); + // get the delegation tokens + MyToken token1 = dfs.getDelegationToken(new Text("user1")); + + String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0"; + ts.addToken(new Text(nn1), token1); + + // register the tokens for renewal + ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0); + localDtr.addApplication(applicationId_0, ts, true); + localDtr.applicationFinished(applicationId_0); + + Thread.sleep(3000l); + + //Token should still be around. Renewal should not fail. + token1.renew(lconf); + + //Allow the keepalive time to run out + Thread.sleep(6000l); + + //The token should have been cancelled at this point. Renewal will fail. + try { + token1.renew(lconf); + fail("Renewal of cancelled token should have failed"); + } catch (InvalidToken ite) {} + } + + /** + * Basic idea of the test: + * 0. Setup token KEEP_ALIVE + * 1. create tokens. + * 2. register them for renewal - to be cancelled on app complete + * 3. Complete app. + * 4. Verify token is alive within the KEEP_ALIVE time + * 5. Send an explicity KEEP_ALIVE_REQUEST + * 6. Verify token KEEP_ALIVE time is renewed. + * 7. Verify token has been cancelled after the renewed KEEP_ALIVE_TIME. + * @throws IOException + * @throws URISyntaxException + */ + @Test + public void testDTKeepAlive2() throws Exception { + DelegationTokenRenewer localDtr = new DelegationTokenRenewer(); + Configuration lconf = new Configuration(conf); + lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); + //Keep tokens alive for 6 seconds. + lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 6000l); + //Try removing tokens every second. + lconf.setLong( + YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, + 1000l); + localDtr.init(lconf); + localDtr.start(); + + MyFS dfs = (MyFS)FileSystem.get(lconf); + LOG.info("dfs="+(Object)dfs.hashCode() + ";conf="+lconf.hashCode()); + + Credentials ts = new Credentials(); + // get the delegation tokens + MyToken token1 = dfs.getDelegationToken(new Text("user1")); + + String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0"; + ts.addToken(new Text(nn1), token1); + + // register the tokens for renewal + ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0); + localDtr.addApplication(applicationId_0, ts, true); + localDtr.applicationFinished(applicationId_0); + + Thread.sleep(4000l); + + //Send another keep alive. + localDtr.updateKeepAliveApplications(Collections + .singletonList(applicationId_0)); + //Renewal should not fail. + token1.renew(lconf); + + //Token should be around after this. + Thread.sleep(4500l); + //Renewal should not fail. - ~1.5 seconds for keepalive timeout. + token1.renew(lconf); + + //Allow the keepalive time to run out + Thread.sleep(3000l); + //The token should have been cancelled at this point. Renewal will fail. + try { + token1.renew(lconf); + fail("Renewal of cancelled token should have failed"); + } catch (InvalidToken ite) {} + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java new file mode 100644 index 0000000000..b19e7b54e5 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -0,0 +1,1101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.util.ArrayList; + +import javax.ws.rs.core.MediaType; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeHealthStatus; +import org.apache.hadoop.yarn.api.records.QueueState; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.codehaus.jettison.json.JSONArray; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; +import org.junit.Before; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.servlet.GuiceServletContextListener; +import com.google.inject.servlet.ServletModule; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.ClientResponse.Status; +import com.sun.jersey.api.client.UniformInterfaceException; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.test.framework.JerseyTest; +import com.sun.jersey.test.framework.WebAppDescriptor; + +public class TestRMWebServices extends JerseyTest { + + private static MockRM rm; + + private Injector injector = Guice.createInjector(new ServletModule() { + @Override + protected void configureServlets() { + bind(JAXBContextResolver.class); + bind(RMWebServices.class); + bind(GenericExceptionHandler.class); + rm = new MockRM(new Configuration()); + bind(ResourceManager.class).toInstance(rm); + bind(RMContext.class).toInstance(rm.getRMContext()); + bind(ApplicationACLsManager.class).toInstance( + rm.getApplicationACLsManager()); + serve("/*").with(GuiceContainer.class); + } + }); + + public class GuiceServletConfig extends GuiceServletContextListener { + + @Override + protected Injector getInjector() { + return injector; + } + } + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + } + + public TestRMWebServices() { + super(new WebAppDescriptor.Builder( + "org.apache.hadoop.yarn.server.resourcemanager.webapp") + .contextListenerClass(GuiceServletConfig.class) + .filterClass(com.google.inject.servlet.GuiceFilter.class) + .contextPath("jersey-guice-filter").servletPath("/").build()); + } + + @Test + public void testCluster() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterInfo(json); + } + + @Test + public void testClusterSlash() throws JSONException, Exception { + WebResource r = resource(); + // test with trailing "/" to make sure acts same as without slash + JSONObject json = r.path("ws").path("v1").path("cluster/") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterInfo(json); + } + + @Test + public void testInfo() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("info") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterInfo(json); + } + + @Test + public void testInfoSlash() throws JSONException, Exception { + WebResource r = resource(); + // test with trailing "/" to make sure acts same as without slash + JSONObject json = r.path("ws").path("v1").path("cluster").path("info/") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterInfo(json); + } + + public void verifyClusterInfo(JSONObject json) throws JSONException, + Exception { + assertEquals("correct number of elements", 1, json.length()); + JSONObject clusterinfo = json.getJSONObject("clusterInfo"); + assertEquals("correct number of elements", 9, clusterinfo.length()); + String clusterid = clusterinfo.get("id").toString(); + assertTrue("clusterId doesn't match: " + clusterid, clusterid.toString() + .matches("^\\d+")); + String startedon = clusterinfo.get("startedOn").toString(); + assertTrue("startedOn doesn't match: " + startedon, + startedon.matches("^\\d+")); + String state = clusterinfo.get("state").toString(); + assertTrue("stated doesn't match: " + state, state.matches("INITED")); + String rmVersion = clusterinfo.get("resourceManagerVersion").toString(); + assertTrue("rm version doesn't match: " + rmVersion, + rmVersion.matches(".*")); + String rmBuildVersion = clusterinfo.get("resourceManagerBuildVersion") + .toString(); + assertTrue("rm Build version doesn't match: " + rmBuildVersion, + rmBuildVersion.matches(".*")); + String rmVersionBuiltOn = clusterinfo.get("resourceManagerVersionBuiltOn") + .toString(); + assertTrue( + "rm version built on doesn't match: " + rmVersionBuiltOn, + rmVersionBuiltOn + .matches("^\\w+\\s+\\w+\\s+\\d+\\s+\\d\\d:\\d\\d:\\d\\d\\s+\\w+\\s+\\d\\d\\d\\d")); + String hadoopVersion = clusterinfo.get("hadoopVersion").toString(); + assertTrue("hadoop version doesn't match: " + hadoopVersion, + hadoopVersion.matches(".*")); + String hadoopBuildVersion = clusterinfo.get("hadoopBuildVersion") + .toString(); + assertTrue("hadoop Build version doesn't match: " + hadoopBuildVersion, + hadoopBuildVersion.matches(".*")); + String hadoopVersionBuiltOn = clusterinfo.get("hadoopVersionBuiltOn") + .toString(); + assertTrue( + "hadoop version built on doesn't match: " + hadoopVersionBuiltOn, + hadoopVersionBuiltOn + .matches("^\\w+\\s+\\w+\\s+\\d+\\s+\\d\\d:\\d\\d:\\d\\d\\s+\\w+\\s+\\d\\d\\d\\d")); + } + + @Test + public void testClusterMetrics() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("metrics") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterMetrics(json); + } + + @Test + public void testClusterMetricsSlash() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("metrics/") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterMetrics(json); + } + + public void verifyClusterMetrics(JSONObject json) throws JSONException, + Exception { + assertEquals("correct number of elements", 1, json.length()); + JSONObject clusterinfo = json.getJSONObject("clusterMetrics"); + assertEquals("correct number of elements", 11, clusterinfo.length()); + assertEquals("appsSubmitted doesn't match", 0, + clusterinfo.getInt("appsSubmitted")); + assertEquals("reservedMB doesn't match", 0, + clusterinfo.getInt("reservedMB")); + assertEquals("availableMB doesn't match", 0, + clusterinfo.getInt("availableMB")); + assertEquals("allocatedMB doesn't match", 0, + clusterinfo.getInt("allocatedMB")); + assertEquals("containersAllocated doesn't match", 0, + clusterinfo.getInt("containersAllocated")); + assertEquals("totalMB doesn't match", 0, clusterinfo.getInt("totalMB")); + assertEquals("totalNodes doesn't match", 0, + clusterinfo.getInt("totalNodes")); + assertEquals("lostNodes doesn't match", 0, clusterinfo.getInt("lostNodes")); + assertEquals("unhealthyNodes doesn't match", 0, + clusterinfo.getInt("unhealthyNodes")); + assertEquals("decommissionedNodes doesn't match", 0, + clusterinfo.getInt("decommissionedNodes")); + assertEquals("rebootedNodes doesn't match", 0, + clusterinfo.getInt("rebootedNodes")); + } + + @Test + public void testClusterSchedulerFifo() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("scheduler") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + verifyClusterSchedulerFifo(json); + } + + @Test + public void testClusterSchedulerFifoSlash() throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster") + .path("scheduler/").accept(MediaType.APPLICATION_JSON) + .get(JSONObject.class); + verifyClusterSchedulerFifo(json); + } + + public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException, + Exception { + assertEquals("correct number of elements", 1, json.length()); + JSONObject info = json.getJSONObject("scheduler"); + assertEquals("correct number of elements", 1, info.length()); + info = info.getJSONObject("schedulerInfo"); + assertEquals("correct number of elements", 11, info.length()); + assertEquals("type doesn't match", "fifoScheduler", info.getString("type")); + assertEquals("qstate doesn't match", QueueState.RUNNING.toString(), + info.getString("qstate")); + assertEquals("capacity doesn't match", 1.0, info.getDouble("capacity"), 0.0); + assertEquals("usedCapacity doesn't match", Float.NaN, + info.getDouble("usedCapacity"), 0.0); + assertEquals("minQueueMemoryCapacity doesn't match", 1024, + info.getInt("minQueueMemoryCapacity")); + assertEquals("maxQueueMemoryCapacity doesn't match", 10240, + info.getInt("maxQueueMemoryCapacity")); + assertEquals("maxQueueMemoryCapacity doesn't match", 10240, + info.getInt("maxQueueMemoryCapacity")); + + } + + @Test + public void testNodes() throws JSONException, Exception { + testNodesHelper("nodes"); + } + + @Test + public void testNodesSlash() throws JSONException, Exception { + testNodesHelper("nodes/"); + } + + @Test + public void testNodesQueryState() throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + rm.sendNodeStarted(nm1); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("state", RMNodeState.RUNNING.toString()) + .accept("application/json").get(JSONObject.class); + + assertEquals("correct number of elements", 1, json.length()); + JSONObject nodes = json.getJSONObject("nodes"); + assertEquals("correct number of elements", 1, nodes.length()); + JSONArray nodeArray = nodes.getJSONArray("node"); + assertEquals("correct number of elements", 1, nodeArray.length()); + JSONObject info = nodeArray.getJSONObject(0); + + verifyNodeInfo(info, nm1, RMNodeState.RUNNING); + } + + @Test + public void testNodesQueryStateNone() throws JSONException, Exception { + WebResource r = resource(); + rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("state", RMNodeState.DECOMMISSIONED.toString()) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + assertEquals("nodes is not null", JSONObject.NULL, json.get("nodes")); + } + + @Test + public void testNodesQueryStateInvalid() throws JSONException, Exception { + WebResource r = resource(); + rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + + try { + r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("state", "BOGUSSTATE").accept("application/json") + .get(JSONObject.class); + + fail("should have thrown exception querying invalid state"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch( + "exception message", + "No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState.BOGUSSTATE", + message); + checkStringMatch("exception type", "IllegalArgumentException", type); + checkStringMatch("exception classname", + "java.lang.IllegalArgumentException", classname); + + } finally { + rm.stop(); + } + } + + @Test + public void testNodesQueryHealthy() throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + rm.sendNodeStarted(nm1); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("healthy", "true").accept("application/json") + .get(JSONObject.class); + + assertEquals("correct number of elements", 1, json.length()); + JSONObject nodes = json.getJSONObject("nodes"); + assertEquals("correct number of elements", 1, nodes.length()); + JSONArray nodeArray = nodes.getJSONArray("node"); + assertEquals("correct number of elements", 2, nodeArray.length()); + } + + @Test + public void testNodesQueryHealthyCase() throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + rm.sendNodeStarted(nm1); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("healthy", "TRUe").accept("application/json") + .get(JSONObject.class); + + assertEquals("correct number of elements", 1, json.length()); + JSONObject nodes = json.getJSONObject("nodes"); + assertEquals("correct number of elements", 1, nodes.length()); + JSONArray nodeArray = nodes.getJSONArray("node"); + assertEquals("correct number of elements", 2, nodeArray.length()); + + } + + @Test + public void testNodesQueryHealthyAndState() throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + rm.sendNodeStarted(nm1); + rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + RMNodeImpl node = (RMNodeImpl) rm.getRMContext().getRMNodes() + .get(nm1.getNodeId()); + NodeHealthStatus nodeHealth = node.getNodeHealthStatus(); + nodeHealth.setHealthReport("test health report"); + nodeHealth.setIsNodeHealthy(false); + node.handle(new RMNodeStatusEvent(nm1.getNodeId(), nodeHealth, + new ArrayList(), null, null)); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.UNHEALTHY); + + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("healthy", "true") + .queryParam("state", RMNodeState.RUNNING.toString()) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + assertEquals("nodes is not null", JSONObject.NULL, json.get("nodes")); + } + + @Test + public void testNodesQueryHealthyFalse() throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + rm.sendNodeStarted(nm1); + rm.NMwaitForState(nm1.getNodeId(), RMNodeState.RUNNING); + rm.NMwaitForState(nm2.getNodeId(), RMNodeState.NEW); + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("healthy", "false").accept("application/json") + .get(JSONObject.class); + + assertEquals("correct number of elements", 1, json.length()); + assertEquals("nodes is not null", JSONObject.NULL, json.get("nodes")); + } + + @Test + public void testNodesQueryHealthyInvalid() throws JSONException, Exception { + WebResource r = resource(); + rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + + try { + r.path("ws").path("v1").path("cluster").path("nodes") + .queryParam("healthy", "tr").accept("application/json") + .get(JSONObject.class); + fail("should have thrown exception querying invalid healthy string"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch( + "exception message", + "java.lang.Exception: Error: You must specify either true or false to query on health", + message); + checkStringMatch("exception type", "BadRequestException", type); + checkStringMatch("exception classname", + "org.apache.hadoop.yarn.webapp.BadRequestException", classname); + + } finally { + rm.stop(); + } + } + + public void testNodesHelper(String path) throws JSONException, Exception { + WebResource r = resource(); + MockNM nm1 = rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + JSONObject json = r.path("ws").path("v1").path("cluster").path(path) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject nodes = json.getJSONObject("nodes"); + assertEquals("correct number of elements", 1, nodes.length()); + JSONArray nodeArray = nodes.getJSONArray("node"); + assertEquals("correct number of elements", 2, nodeArray.length()); + JSONObject info = nodeArray.getJSONObject(0); + String id = info.get("id").toString(); + + if (id.matches("h1:1234")) { + verifyNodeInfo(info, nm1, RMNodeState.NEW); + verifyNodeInfo(nodeArray.getJSONObject(1), nm2, RMNodeState.NEW); + } else { + verifyNodeInfo(info, nm2, RMNodeState.NEW); + verifyNodeInfo(nodeArray.getJSONObject(1), nm1, RMNodeState.NEW); + } + } + + @Test + public void testSingleNode() throws JSONException, Exception { + rm.registerNode("h1:1234", 5120); + MockNM nm2 = rm.registerNode("h2:1235", 5121); + testSingleNodeHelper("h2:1235", nm2); + } + + @Test + public void testSingleNodeSlash() throws JSONException, Exception { + MockNM nm1 = rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + testSingleNodeHelper("h1:1234/", nm1); + } + + public void testSingleNodeHelper(String nodeid, MockNM nm) + throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("nodes") + .path(nodeid).accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject info = json.getJSONObject("node"); + verifyNodeInfo(info, nm, RMNodeState.NEW); + } + + @Test + public void testNonexistNode() throws JSONException, Exception { + rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + WebResource r = resource(); + try { + r.path("ws").path("v1").path("cluster").path("nodes") + .path("node_invalid:99").accept("application/json") + .get(JSONObject.class); + + fail("should have thrown exception on non-existent nodeid"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); + + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch("exception message", + "java.lang.Exception: nodeId, node_invalid:99, is not found", message); + checkStringMatch("exception type", "NotFoundException", type); + checkStringMatch("exception classname", + "org.apache.hadoop.yarn.webapp.NotFoundException", classname); + + } finally { + rm.stop(); + } + } + + @Test + public void testInvalidNode() throws JSONException, Exception { + rm.registerNode("h1:1234", 5120); + rm.registerNode("h2:1235", 5121); + + WebResource r = resource(); + try { + r.path("ws").path("v1").path("cluster").path("nodes") + .path("node_invalid_foo").accept("application/json") + .get(JSONObject.class); + + fail("should have thrown exception on non-existent nodeid"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch("exception message", + "Invalid NodeId \\[node_invalid_foo\\]. Expected host:port", message); + checkStringMatch("exception type", "IllegalArgumentException", type); + checkStringMatch("exception classname", + "java.lang.IllegalArgumentException", classname); + } finally { + rm.stop(); + } + } + + public void verifyNodeInfo(JSONObject nodeInfo, MockNM nm, + RMNodeState expectedState) throws JSONException, Exception { + assertEquals("correct number of elements", 11, nodeInfo.length()); + String state = nodeInfo.get("state").toString(); + assertTrue("stated doesn't match: " + state, + state.matches(expectedState.toString())); + String rack = nodeInfo.get("rack").toString(); + assertTrue("rack doesn't match: " + rack, rack.matches("/default-rack")); + String healthStatus = nodeInfo.get("healthStatus").toString(); + assertTrue("healthStatus doesn't match: " + healthStatus, + healthStatus.matches("Healthy")); + String id = nodeInfo.get("id").toString(); + assertTrue("id doesn't match, got: " + id + " expected: " + + nm.getNodeId().toString(), id.matches(nm.getNodeId().toString())); + String nodeHostName = nodeInfo.get("nodeHostName").toString(); + assertTrue("hostname doesn't match, got: " + nodeHostName + " expected: " + + nm.getNodeId().getHost(), + nodeHostName.matches(nm.getNodeId().getHost())); + + String nodeHTTPAddress = nodeInfo.get("nodeHTTPAddress").toString(); + String expectedHttpAddress = nm.getNodeId().getHost() + ":" + + nm.getHttpPort(); + assertTrue("nodeHTTPAddress doesn't match, got: " + nodeHTTPAddress + + " expected: " + expectedHttpAddress, + nodeHTTPAddress.matches(expectedHttpAddress)); + // could use this for other checks + RMNode node = rm.getRMContext().getRMNodes().get(nm.getNodeId()); + long lastHealthUpdate = nodeInfo.getLong("lastHealthUpdate"); + long expectedHealthUpdate = node.getNodeHealthStatus() + .getLastHealthReportTime(); + assertEquals("lastHealthUpdate doesn't match, got: " + lastHealthUpdate + + " expected: " + expectedHealthUpdate, expectedHealthUpdate, + lastHealthUpdate); + String healthReport = nodeInfo.get("healthReport").toString(); + assertTrue("healthReport doesn't match: " + healthReport, + healthReport.matches("Healthy")); + + int numContainers = nodeInfo.getInt("numContainers"); + assertEquals("numContainers doesn't match: " + numContainers, 0, + numContainers); + + long usedMemoryMB = nodeInfo.getLong("usedMemoryMB"); + assertEquals("usedMemoryMB doesn't match: " + usedMemoryMB, 0, usedMemoryMB); + + long availMemoryMB = nodeInfo.getLong("availMemoryMB"); + assertEquals("availMemoryMB doesn't match: " + availMemoryMB, 0, + availMemoryMB); + } + + @Test + public void testApps() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + testAppsHelper("apps", app1); + rm.stop(); + + } + + @Test + public void testAppsSlash() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + testAppsHelper("apps/", app1); + rm.stop(); + + } + + public void testAppsHelper(String path, RMApp app) throws JSONException, + Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path(path) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 1, array.length()); + verifyAppInfo(array.getJSONObject(0), app); + + } + + @Test + public void testAppsQueryState() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("state", RMAppState.ACCEPTED.toString()) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 1, array.length()); + verifyAppInfo(array.getJSONObject(0), app1); + rm.stop(); + } + + @Test + public void testAppsQueryStateNone() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("state", RMAppState.RUNNING.toString()) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + assertEquals("apps is not null", JSONObject.NULL, json.get("apps")); + rm.stop(); + } + + @Test + public void testAppsQueryStateInvalid() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + + try { + r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("state", "INVALID_test").accept("application/json") + .get(JSONObject.class); + fail("should have thrown exception on invalid state query"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch( + "exception message", + "No enum const class org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState.INVALID_test", + message); + checkStringMatch("exception type", "IllegalArgumentException", type); + checkStringMatch("exception classname", + "java.lang.IllegalArgumentException", classname); + + } finally { + rm.stop(); + } + } + + @Test + public void testAppsQueryUser() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + rm.submitApp(1024); + + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + JSONObject json = r + .path("ws") + .path("v1") + .path("cluster") + .path("apps") + .queryParam("user", + UserGroupInformation.getCurrentUser().getShortUserName()) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 2, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryQueue() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + rm.submitApp(1024); + + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("queue", "default").accept("application/json") + .get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 2, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryLimit() throws JSONException, Exception { + rm.start(); + rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + rm.submitApp(1024); + rm.submitApp(1024); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("limit", "2").accept("application/json") + .get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 2, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryStartBegin() throws JSONException, Exception { + rm.start(); + long start = System.currentTimeMillis(); + Thread.sleep(1); + rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + rm.submitApp(1024); + rm.submitApp(1024); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("startedTimeBegin", String.valueOf(start)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 3, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryStartBeginSome() throws JSONException, Exception { + rm.start(); + rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + rm.submitApp(1024); + long start = System.currentTimeMillis(); + Thread.sleep(1); + rm.submitApp(1024); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("startedTimeBegin", String.valueOf(start)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 1, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryStartEnd() throws JSONException, Exception { + rm.start(); + rm.registerNode("amNM:1234", 2048); + long end = System.currentTimeMillis(); + Thread.sleep(1); + rm.submitApp(1024); + rm.submitApp(1024); + rm.submitApp(1024); + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("startedTimeEnd", String.valueOf(end)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + assertEquals("apps is not null", JSONObject.NULL, json.get("apps")); + rm.stop(); + } + + @Test + public void testAppsQueryStartBeginEnd() throws JSONException, Exception { + rm.start(); + rm.registerNode("amNM:1234", 2048); + long start = System.currentTimeMillis(); + Thread.sleep(1); + rm.submitApp(1024); + rm.submitApp(1024); + long end = System.currentTimeMillis(); + Thread.sleep(1); + rm.submitApp(1024); + + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("startedTimeBegin", String.valueOf(start)) + .queryParam("startedTimeEnd", String.valueOf(end)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 2, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryFinishBegin() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + long start = System.currentTimeMillis(); + Thread.sleep(1); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + // finish App + MockAM am = rm + .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); + am.registerAppAttempt(); + am.unregisterAppAttempt(); + rm.submitApp(1024); + rm.submitApp(1024); + + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("finishedTimeBegin", String.valueOf(start)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 1, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryFinishEnd() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + // finish App + MockAM am = rm + .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); + am.registerAppAttempt(); + am.unregisterAppAttempt(); + + rm.submitApp(1024); + rm.submitApp(1024); + long end = System.currentTimeMillis(); + + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("finishedTimeEnd", String.valueOf(end)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 3, array.length()); + rm.stop(); + } + + @Test + public void testAppsQueryFinishBeginEnd() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + long start = System.currentTimeMillis(); + Thread.sleep(1); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + // finish App + MockAM am = rm + .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); + am.registerAppAttempt(); + am.unregisterAppAttempt(); + + rm.submitApp(1024); + rm.submitApp(1024); + long end = System.currentTimeMillis(); + + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("finishedTimeBegin", String.valueOf(start)) + .queryParam("finishedTimeEnd", String.valueOf(end)) + .accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("correct number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("correct number of elements", 1, array.length()); + rm.stop(); + } + + @Test + public void testSingleApp() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + testSingleAppsHelper(app1.getApplicationId().toString(), app1); + rm.stop(); + } + + @Test + public void testSingleAppsSlash() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + RMApp app1 = rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + testSingleAppsHelper(app1.getApplicationId().toString() + "/", app1); + rm.stop(); + } + + @Test + public void testInvalidApp() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + + try { + r.path("ws").path("v1").path("cluster").path("apps") + .path("application_invalid_12").accept("application/json") + .get(JSONObject.class); + fail("should have thrown exception on invalid appid"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch("exception message", "For input string: \"invalid\"", + message); + checkStringMatch("exception type", "NumberFormatException", type); + checkStringMatch("exception classname", + "java.lang.NumberFormatException", classname); + + } finally { + rm.stop(); + } + } + + @Test + public void testNonexistApp() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("amNM:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + + try { + r.path("ws").path("v1").path("cluster").path("apps") + .path("application_00000_0099").accept("application/json") + .get(JSONObject.class); + fail("should have thrown exception on invalid appid"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + + assertEquals(Status.NOT_FOUND, response.getClientResponseStatus()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("correct number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + checkStringMatch("exception message", + "java.lang.Exception: app with id: application_00000_0099 not found", + message); + checkStringMatch("exception type", "NotFoundException", type); + checkStringMatch("exception classname", + "org.apache.hadoop.yarn.webapp.NotFoundException", classname); + } finally { + rm.stop(); + } + } + + public void testSingleAppsHelper(String path, RMApp app) + throws JSONException, Exception { + WebResource r = resource(); + JSONObject json = r.path("ws").path("v1").path("cluster").path("apps") + .path(path).accept("application/json").get(JSONObject.class); + assertEquals("correct number of elements", 1, json.length()); + verifyAppInfo(json.getJSONObject("app"), app); + } + + public void verifyAppInfo(JSONObject info, RMApp app) throws JSONException, + Exception { + + // 15 because trackingUrl not assigned yet + assertEquals("correct number of elements", 15, info.length()); + String id = info.getString("id"); + String expectedId = app.getApplicationId().toString(); + checkStringMatch("id", expectedId, id); + + String user = info.getString("user"); + String expectedUser = app.getUser(); + checkStringMatch("user", expectedUser, user); + + checkStringMatch("name", "", info.getString("name")); + checkStringMatch("queue", "default", info.getString("queue")); + checkStringMatch("state", RMAppState.ACCEPTED.toString(), + info.getString("state")); + checkStringMatch("finalStatus", + FinalApplicationStatus.UNDEFINED.toString(), + info.getString("finalStatus")); + assertEquals("progress doesn't match", 0, info.getDouble("progress"), 0.0); + checkStringMatch("trackingUI", "UNASSIGNED", info.getString("trackingUI")); + checkStringMatch("diagnostics", "", info.getString("diagnostics")); + assertEquals("clusterId doesn't match", ResourceManager.clusterTimeStamp, + info.getLong("clusterId")); + assertEquals("startedTime doesn't match", app.getStartTime(), + info.getLong("startedTime")); + assertEquals("finishedTime doesn't match", app.getFinishTime(), + info.getLong("finishedTime")); + assertTrue("elapsed time not greater than 0", + info.getLong("elapsedTime") > 0); + checkStringMatch("amHostHttpAddress", app.getCurrentAppAttempt() + .getMasterContainer().getNodeHttpAddress(), + info.getString("amHostHttpAddress")); + assertTrue("amContainerLogs doesn't match", + info.getString("amContainerLogs").startsWith("http://")); + } + + private void checkStringMatch(String print, String expected, String got) { + assertTrue( + print + " doesn't match, got: " + got + " expected: " + expected, + got.matches(expected)); + } + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm index 98e19b2c51..8fe515babe 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm @@ -193,6 +193,10 @@ Hadoop MapReduce Next Generation - Cluster Setup | | | ACLs are of for . | | | | Defaults to special value of <<*>> which means . | | | | Special value of just means no one has access. | +*-------------------------+-------------------------+------------------------+ +| <<>> | | | +| | | | +| | | Configuration to enable or disable log aggregation | *-------------------------+-------------------------+------------------------+ @@ -260,10 +264,6 @@ Hadoop MapReduce Next Generation - Cluster Setup | | are written. | | | | | Multiple paths help spread disk i/o. | *-------------------------+-------------------------+------------------------+ -| <<>> | | | -| | | | -| | | Configuration to enable or disable log aggregation | -*-------------------------+-------------------------+------------------------+ | <<>> | | | | | <10800> | | | | | Default time (in seconds) to retain log files on the NodeManager | diff --git a/hadoop-mapreduce-project/hadoop-yarn/pom.xml b/hadoop-mapreduce-project/hadoop-yarn/pom.xml index 9a1551f7ef..2c9ab3d970 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/pom.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/pom.xml @@ -71,37 +71,6 @@ protobuf-java 2.4.0a - - org.apache.avro - avro - 1.5.3 - - - org.mortbay.jetty - jetty - - - org.apache.ant - ant - - - org.jboss.netty - netty - - - org.apache.velocity - velocity - - - org.slf4j - slf4j-api - - - paranamer-ant - com.thoughtworks.paranamer - - - org.apache.hadoop hadoop-common @@ -168,7 +137,7 @@ com.google.inject.extensions guice-servlet - 2.0 + 3.0 junit @@ -185,38 +154,6 @@ clover 3.0.2 - - - org.apache.avro - avro - 1.5.3 - - - org.mortbay.jetty - jetty - - - org.apache.ant - ant - - - org.jboss.netty - netty - - - org.apache.velocity - velocity - - - org.slf4j - slf4j-api - - - paranamer-ant - com.thoughtworks.paranamer - - - com.google.protobuf protobuf-java @@ -227,10 +164,6 @@ hadoop-common ${project.version} - - org.apache.avro - avro - commons-el commons-el @@ -289,9 +222,30 @@ runtime - com.google.inject.extensions - guice-servlet - 2.0 + com.google.inject + guice + 3.0 + + + com.sun.jersey.jersey-test-framework + jersey-test-framework-core + 1.8 + test + + + com.sun.jersey.jersey-test-framework + jersey-test-framework-grizzly2 + 1.8 + + + com.sun.jersey + jersey-server + 1.8 + + + com.sun.jersey.contribs + jersey-guice + 1.8 org.jboss.netty diff --git a/hadoop-mapreduce-project/ivy.xml b/hadoop-mapreduce-project/ivy.xml index ee57f1af0e..e9b38d077e 100644 --- a/hadoop-mapreduce-project/ivy.xml +++ b/hadoop-mapreduce-project/ivy.xml @@ -99,6 +99,8 @@ rev="${yarn.version}" conf="compile->default"> + diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 7939789e69..74970dd5ee 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -142,10 +142,25 @@ hadoop-hdfs ${project.version} + + com.google.inject + guice + 3.0 + + + com.sun.jersey + jersey-server + 1.8 + + + com.sun.jersey.contribs + jersey-guice + 1.8 + com.google.inject.extensions guice-servlet - 2.0 + 3.0 junit diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml b/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml index af4e945143..4ab7b62065 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml +++ b/hadoop-mapreduce-project/src/contrib/gridmix/ivy.xml @@ -68,6 +68,8 @@ rev="${yarn.version}" conf="test->default"> + + avro 1.5.3 - - org.apache.avro - avro-ipc - 1.5.3 - net.sf.kosmosfs kfs diff --git a/hadoop-tools/hadoop-archives/pom.xml b/hadoop-tools/hadoop-archives/pom.xml new file mode 100644 index 0000000000..73f5201004 --- /dev/null +++ b/hadoop-tools/hadoop-archives/pom.xml @@ -0,0 +1,126 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 0.24.0-SNAPSHOT + ../../hadoop-project + + org.apache.hadoop + hadoop-archives + 0.24.0-SNAPSHOT + Apache Hadoop Archives + Apache Hadoop Archives + jar + + + ${project.build.directory}/log + + + + + org.apache.hadoop + hadoop-annotations + provided + + + org.apache.hadoop + hadoop-mapreduce-client-hs + test + + + org.apache.hadoop + hadoop-mapreduce-client-core + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + provided + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + test + test-jar + + + org.apache.hadoop + hadoop-common + provided + + + org.apache.hadoop + hadoop-hdfs + provided + + + org.apache.hadoop + hadoop-common + test + test-jar + + + org.apache.hadoop + hadoop-hdfs + test + test-jar + + + org.apache.hadoop + hadoop-yarn-server-tests + test-jar + test + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + create-log-dir + process-test-resources + + run + + + + + + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.hadoop.tools.HadoopArchives + + + + + + + diff --git a/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java similarity index 98% rename from hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/HadoopArchives.java rename to hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index 41d149ea95..cbe8b9cbab 100644 --- a/hadoop-mapreduce-project/src/tools/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -111,6 +111,14 @@ public void setConf(Configuration conf) { } else { this.conf = new JobConf(conf, HadoopArchives.class); } + + // This is for test purposes since MR2, different from Streaming + // here it is not possible to add a JAR to the classpath the tool + // will when running the mapreduce job. + String testJar = System.getProperty(TEST_HADOOP_ARCHIVES_JAR_PATH, null); + if (testJar != null) { + ((JobConf)conf).setJar(testJar); + } } public Configuration getConf() { @@ -868,9 +876,12 @@ public int run(String[] args) throws Exception { return 0; } + static final String TEST_HADOOP_ARCHIVES_JAR_PATH = "test.hadoop.archives.jar"; + /** the main functions **/ public static void main(String[] args) { JobConf job = new JobConf(HadoopArchives.class); + HadoopArchives harchives = new HadoopArchives(job); int ret = 0; diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java similarity index 96% rename from hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java rename to hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java index c999964ee7..a00be5701a 100644 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.ToolRunner; import org.apache.log4j.Level; @@ -46,6 +47,9 @@ * test {@link HadoopArchives} */ public class TestHadoopArchives extends TestCase { + + public static final String HADOOP_ARCHIVES_JAR = JarFinder.getJar(HadoopArchives.class); + { ((Log4JLogger)LogFactory.getLog(org.apache.hadoop.security.Groups.class) ).getLogger().setLevel(Level.OFF); @@ -136,6 +140,7 @@ public void testPathWithSpaces() throws Exception { "*", archivePath.toString() }; + System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR); final HadoopArchives har = new HadoopArchives(mapred.createJobConf()); assertEquals(0, ToolRunner.run(har, args)); diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 2347ec3e0e..aa4e20fc56 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -29,6 +29,7 @@ hadoop-streaming + hadoop-archives