Merge trunk into HA branch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1207490 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
73b3de6204
@ -3,7 +3,12 @@ Hadoop Change Log
|
|||||||
Trunk (unreleased changes)
|
Trunk (unreleased changes)
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
NEW FEATURES
|
||||||
|
|
||||||
|
HADOOP-7777 Implement a base class for DNSToSwitchMapping implementations
|
||||||
|
that can offer extra topology information. (stevel)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
|
||||||
@ -118,6 +123,9 @@ Release 0.23.1 - Unreleased
|
|||||||
HADOOP-7802. Hadoop scripts unconditionally source
|
HADOOP-7802. Hadoop scripts unconditionally source
|
||||||
"$bin"/../libexec/hadoop-config.sh. (Bruno Mahé via tomwhite)
|
"$bin"/../libexec/hadoop-config.sh. (Bruno Mahé via tomwhite)
|
||||||
|
|
||||||
|
HADOOP-7858. Drop some info logging to DEBUG level in IPC,
|
||||||
|
metrics, and HTTP. (todd via eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
@ -131,6 +139,8 @@ Release 0.23.1 - Unreleased
|
|||||||
HADOOP-6614. RunJar should provide more diags when it can't create
|
HADOOP-6614. RunJar should provide more diags when it can't create
|
||||||
a temp file. (Jonathan Hsieh via eli)
|
a temp file. (Jonathan Hsieh via eli)
|
||||||
|
|
||||||
|
HADOOP-7859. TestViewFsHdfs.testgetFileLinkStatus is failing an assert. (eli)
|
||||||
|
|
||||||
Release 0.23.0 - 2011-11-01
|
Release 0.23.0 - 2011-11-01
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
@ -1096,6 +1106,12 @@ Release 0.22.0 - Unreleased
|
|||||||
|
|
||||||
HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
|
HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
|
||||||
|
|
||||||
|
HADOOP-7358. Improve log levels when exceptions caught in RPC handler
|
||||||
|
(Todd Lipcon via shv)
|
||||||
|
|
||||||
|
HADOOP-7861. changes2html.pl generates links to HADOOP, HDFS, and MAPREDUCE
|
||||||
|
jiras. (shv)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).
|
HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).
|
||||||
|
@ -242,7 +242,11 @@
|
|||||||
|
|
||||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
print " <li>$item</li>\n";
|
print " <li>$item</li>\n";
|
||||||
}
|
}
|
||||||
|
@ -1099,18 +1099,10 @@ public FileStatus next(final AbstractFileSystem fs, final Path p)
|
|||||||
*/
|
*/
|
||||||
private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
|
private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
|
||||||
Path pathWithLink, Path target) {
|
Path pathWithLink, Path target) {
|
||||||
/* NB: makeQualified uses the target's scheme and authority, if
|
// NB: makeQualified uses the target's scheme and authority, if
|
||||||
* specified, and the scheme and authority of pathFS, if not. If
|
// specified, and the scheme and authority of pathFS, if not.
|
||||||
* the path does have a scheme and authority we assert they match
|
|
||||||
* those of pathFS since resolve updates the file system of a path
|
|
||||||
* that contains links each time a link is encountered.
|
|
||||||
*/
|
|
||||||
final String scheme = target.toUri().getScheme();
|
final String scheme = target.toUri().getScheme();
|
||||||
final String auth = target.toUri().getAuthority();
|
final String auth = target.toUri().getAuthority();
|
||||||
if (scheme != null && auth != null) {
|
|
||||||
assert scheme.equals(pathFS.getUri().getScheme());
|
|
||||||
assert auth.equals(pathFS.getUri().getAuthority());
|
|
||||||
}
|
|
||||||
return (scheme == null && auth == null)
|
return (scheme == null && auth == null)
|
||||||
? target.makeQualified(pathFS.getUri(), pathWithLink.getParent())
|
? target.makeQualified(pathFS.getUri(), pathWithLink.getParent())
|
||||||
: target;
|
: target;
|
||||||
|
@ -644,12 +644,12 @@ public void start() throws IOException {
|
|||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
port = webServer.getConnectors()[0].getLocalPort();
|
port = webServer.getConnectors()[0].getLocalPort();
|
||||||
LOG.info("Port returned by webServer.getConnectors()[0]." +
|
LOG.debug("Port returned by webServer.getConnectors()[0]." +
|
||||||
"getLocalPort() before open() is "+ port +
|
"getLocalPort() before open() is "+ port +
|
||||||
". Opening the listener on " + oriPort);
|
". Opening the listener on " + oriPort);
|
||||||
listener.open();
|
listener.open();
|
||||||
port = listener.getLocalPort();
|
port = listener.getLocalPort();
|
||||||
LOG.info("listener.getLocalPort() returned " + listener.getLocalPort() +
|
LOG.debug("listener.getLocalPort() returned " + listener.getLocalPort() +
|
||||||
" webServer.getConnectors()[0].getLocalPort() returned " +
|
" webServer.getConnectors()[0].getLocalPort() returned " +
|
||||||
webServer.getConnectors()[0].getLocalPort());
|
webServer.getConnectors()[0].getLocalPort());
|
||||||
//Workaround to handle the problem reported in HADOOP-4744
|
//Workaround to handle the problem reported in HADOOP-4744
|
||||||
|
@ -1498,7 +1498,7 @@ public Handler(int instanceNumber) {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
LOG.info(getName() + ": starting");
|
LOG.debug(getName() + ": starting");
|
||||||
SERVER.set(Server.this);
|
SERVER.set(Server.this);
|
||||||
ByteArrayOutputStream buf =
|
ByteArrayOutputStream buf =
|
||||||
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
|
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
|
||||||
@ -1536,7 +1536,16 @@ public Writable run() throws Exception {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
} catch (Throwable e) {
|
} catch (Throwable e) {
|
||||||
LOG.info(getName() + ", call: " + call + ", error: ", e);
|
String logMsg = getName() + ", call " + call + ": error: " + e;
|
||||||
|
if (e instanceof RuntimeException || e instanceof Error) {
|
||||||
|
// These exception types indicate something is probably wrong
|
||||||
|
// on the server side, as opposed to just a normal exceptional
|
||||||
|
// result.
|
||||||
|
LOG.warn(logMsg, e);
|
||||||
|
} else {
|
||||||
|
LOG.info(logMsg, e);
|
||||||
|
}
|
||||||
|
|
||||||
errorClass = e.getClass().getName();
|
errorClass = e.getClass().getName();
|
||||||
error = StringUtils.stringifyException(e);
|
error = StringUtils.stringifyException(e);
|
||||||
// Remove redundant error class name from the beginning of the stack trace
|
// Remove redundant error class name from the beginning of the stack trace
|
||||||
@ -1571,7 +1580,7 @@ public Writable run() throws Exception {
|
|||||||
LOG.info(getName() + " caught an exception", e);
|
LOG.info(getName() + " caught an exception", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.info(getName() + ": exiting");
|
LOG.debug(getName() + ": exiting");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -389,7 +389,7 @@ private void registerProtocolAndImpl(Class<?> protocolClass,
|
|||||||
}
|
}
|
||||||
protocolImplMap.put(new ProtoNameVer(protocolName, version),
|
protocolImplMap.put(new ProtoNameVer(protocolName, version),
|
||||||
new ProtoClassProtoImpl(protocolClass, protocolImpl));
|
new ProtoClassProtoImpl(protocolClass, protocolImpl));
|
||||||
LOG.info("Protocol Name = " + protocolName + " version=" + version +
|
LOG.debug("Protocol Name = " + protocolName + " version=" + version +
|
||||||
" ProtocolImpl=" + protocolImpl.getClass().getName() +
|
" ProtocolImpl=" + protocolImpl.getClass().getName() +
|
||||||
" protocolClass=" + protocolClass.getName());
|
" protocolClass=" + protocolClass.getName());
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ void registerSource(String name, String desc, MetricsSource source) {
|
|||||||
injectedTags, period, config.subset(SOURCE_KEY));
|
injectedTags, period, config.subset(SOURCE_KEY));
|
||||||
sources.put(name, sa);
|
sources.put(name, sa);
|
||||||
sa.start();
|
sa.start();
|
||||||
LOG.info("Registered source "+ name);
|
LOG.debug("Registered source "+ name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override public synchronized <T extends MetricsSink>
|
@Override public synchronized <T extends MetricsSink>
|
||||||
@ -405,8 +405,8 @@ private synchronized void stopTimer() {
|
|||||||
private synchronized void stopSources() {
|
private synchronized void stopSources() {
|
||||||
for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) {
|
for (Entry<String, MetricsSourceAdapter> entry : sources.entrySet()) {
|
||||||
MetricsSourceAdapter sa = entry.getValue();
|
MetricsSourceAdapter sa = entry.getValue();
|
||||||
LOG.info("Stopping metrics source "+ entry.getKey());
|
LOG.debug("Stopping metrics source "+ entry.getKey() +
|
||||||
LOG.debug(sa.source().getClass());
|
": class=" + sa.source().getClass());
|
||||||
sa.stop();
|
sa.stop();
|
||||||
}
|
}
|
||||||
sysSource.stop();
|
sysSource.stop();
|
||||||
@ -416,8 +416,8 @@ private synchronized void stopSources() {
|
|||||||
private synchronized void stopSinks() {
|
private synchronized void stopSinks() {
|
||||||
for (Entry<String, MetricsSinkAdapter> entry : sinks.entrySet()) {
|
for (Entry<String, MetricsSinkAdapter> entry : sinks.entrySet()) {
|
||||||
MetricsSinkAdapter sa = entry.getValue();
|
MetricsSinkAdapter sa = entry.getValue();
|
||||||
LOG.info("Stopping metrics sink "+ entry.getKey());
|
LOG.debug("Stopping metrics sink "+ entry.getKey() +
|
||||||
LOG.debug(sa.sink().getClass());
|
": class=" + sa.sink().getClass());
|
||||||
sa.stop();
|
sa.stop();
|
||||||
}
|
}
|
||||||
sinks.clear();
|
sinks.clear();
|
||||||
|
@ -0,0 +1,107 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configurable;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
|
||||||
|
* derive {@link DNSToSwitchMapping} implementations from it, but it is strongly
|
||||||
|
* recommended, as it makes it easy for the Hadoop developers to add new methods
|
||||||
|
* to this base class that are automatically picked up by all implementations.
|
||||||
|
* <p/>
|
||||||
|
*
|
||||||
|
* This class does not extend the <code>Configured</code>
|
||||||
|
* base class, and should not be changed to do so, as it causes problems
|
||||||
|
* for subclasses. The constructor of the <code>Configured</code> calls
|
||||||
|
* the {@link #setConf(Configuration)} method, which will call into the
|
||||||
|
* subclasses before they have been fully constructed.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public abstract class AbstractDNSToSwitchMapping
|
||||||
|
implements DNSToSwitchMapping, Configurable {
|
||||||
|
|
||||||
|
private Configuration conf;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an unconfigured instance
|
||||||
|
*/
|
||||||
|
protected AbstractDNSToSwitchMapping() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an instance, caching the configuration file.
|
||||||
|
* This constructor does not call {@link #setConf(Configuration)}; if
|
||||||
|
* a subclass extracts information in that method, it must call it explicitly.
|
||||||
|
* @param conf the configuration
|
||||||
|
*/
|
||||||
|
protected AbstractDNSToSwitchMapping(Configuration conf) {
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConf() {
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setConf(Configuration conf) {
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Predicate that indicates that the switch mapping is known to be
|
||||||
|
* single-switch. The base class returns false: it assumes all mappings are
|
||||||
|
* multi-rack. Subclasses may override this with methods that are more aware
|
||||||
|
* of their topologies.
|
||||||
|
*
|
||||||
|
* <p/>
|
||||||
|
*
|
||||||
|
* This method is used when parts of Hadoop need know whether to apply
|
||||||
|
* single rack vs multi-rack policies, such as during block placement.
|
||||||
|
* Such algorithms behave differently if they are on multi-switch systems.
|
||||||
|
* </p>
|
||||||
|
*
|
||||||
|
* @return true if the mapping thinks that it is on a single switch
|
||||||
|
*/
|
||||||
|
public boolean isSingleSwitch() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Query for a {@link DNSToSwitchMapping} instance being on a single
|
||||||
|
* switch.
|
||||||
|
* <p/>
|
||||||
|
* This predicate simply assumes that all mappings not derived from
|
||||||
|
* this class are multi-switch.
|
||||||
|
* @param mapping the mapping to query
|
||||||
|
* @return true if the base class says it is single switch, or the mapping
|
||||||
|
* is not derived from this class.
|
||||||
|
*/
|
||||||
|
public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
|
||||||
|
return mapping instanceof AbstractDNSToSwitchMapping
|
||||||
|
&& ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -34,9 +34,13 @@
|
|||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
|
public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
|
||||||
private Map<String, String> cache = new ConcurrentHashMap<String, String>();
|
private Map<String, String> cache = new ConcurrentHashMap<String, String>();
|
||||||
protected DNSToSwitchMapping rawMapping;
|
|
||||||
|
/**
|
||||||
|
* The uncached mapping
|
||||||
|
*/
|
||||||
|
protected final DNSToSwitchMapping rawMapping;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cache a raw DNS mapping
|
* cache a raw DNS mapping
|
||||||
@ -118,4 +122,14 @@ public List<String> resolve(List<String> names) {
|
|||||||
return getCachedHosts(names);
|
return getCachedHosts(names);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delegate the switch topology query to the raw mapping, via
|
||||||
|
* {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
|
||||||
|
* @return true iff the raw mapper is considered single-switch.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isSingleSwitch() {
|
||||||
|
return isMappingSingleSwitch(rawMapping);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,12 @@ public interface DNSToSwitchMapping {
|
|||||||
* Note the hostname/ip-address is not part of the returned path.
|
* Note the hostname/ip-address is not part of the returned path.
|
||||||
* The network topology of the cluster would determine the number of
|
* The network topology of the cluster would determine the number of
|
||||||
* components in the network path.
|
* components in the network path.
|
||||||
|
* <p/>
|
||||||
|
*
|
||||||
|
* If a name cannot be resolved to a rack, the implementation
|
||||||
|
* should return {@link NetworkTopology#DEFAULT_RACK}. This
|
||||||
|
* is what the bundled implementations do, though it is not a formal requirement
|
||||||
|
*
|
||||||
* @param names the list of hosts to resolve (can be empty)
|
* @param names the list of hosts to resolve (can be empty)
|
||||||
* @return list of resolved network paths.
|
* @return list of resolved network paths.
|
||||||
* If <i>names</i> is empty, the returned list is also empty
|
* If <i>names</i> is empty, the returned list is also empty
|
||||||
|
@ -32,16 +32,21 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* This class implements the {@link DNSToSwitchMapping} interface using a
|
* This class implements the {@link DNSToSwitchMapping} interface using a
|
||||||
* script configured via the {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY}
|
* script configured via the
|
||||||
|
* {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} option.
|
||||||
|
* <p/>
|
||||||
|
* It contains a static class <code>RawScriptBasedMapping</code> that performs
|
||||||
|
* the work: reading the configuration parameters, executing any defined
|
||||||
|
* script, handling errors and such like. The outer
|
||||||
|
* class extends {@link CachedDNSToSwitchMapping} to cache the delegated
|
||||||
|
* queries.
|
||||||
|
* <p/>
|
||||||
|
* This DNS mapper's {@link #isSingleSwitch()} predicate returns
|
||||||
|
* true if and only if a script is defined.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public final class ScriptBasedMapping extends CachedDNSToSwitchMapping
|
public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
|
||||||
implements Configurable
|
|
||||||
{
|
|
||||||
public ScriptBasedMapping() {
|
|
||||||
super(new RawScriptBasedMapping());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Minimum number of arguments: {@value}
|
* Minimum number of arguments: {@value}
|
||||||
@ -65,6 +70,18 @@ public ScriptBasedMapping() {
|
|||||||
static final String SCRIPT_ARG_COUNT_KEY =
|
static final String SCRIPT_ARG_COUNT_KEY =
|
||||||
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
|
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an instance with the default configuration.
|
||||||
|
* </p>
|
||||||
|
* Calling {@link #setConf(Configuration)} will trigger a
|
||||||
|
* re-evaluation of the configuration settings and so be used to
|
||||||
|
* set up the mapping script.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public ScriptBasedMapping() {
|
||||||
|
super(new RawScriptBasedMapping());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create an instance from the given configuration
|
* Create an instance from the given configuration
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
@ -74,14 +91,31 @@ public ScriptBasedMapping(Configuration conf) {
|
|||||||
setConf(conf);
|
setConf(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
public Configuration getConf() {
|
* Get the cached mapping and convert it to its real type
|
||||||
return ((RawScriptBasedMapping)rawMapping).getConf();
|
* @return the inner raw script mapping.
|
||||||
|
*/
|
||||||
|
private RawScriptBasedMapping getRawMapping() {
|
||||||
|
return (RawScriptBasedMapping)rawMapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConf() {
|
||||||
|
return getRawMapping().getConf();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
* <p/>
|
||||||
|
* This will get called in the superclass constructor, so a check is needed
|
||||||
|
* to ensure that the raw mapping is defined before trying to relaying a null
|
||||||
|
* configuration.
|
||||||
|
* @param conf
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void setConf(Configuration conf) {
|
public void setConf(Configuration conf) {
|
||||||
((RawScriptBasedMapping)rawMapping).setConf(conf);
|
super.setConf(conf);
|
||||||
|
getRawMapping().setConf(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -89,29 +123,26 @@ public void setConf(Configuration conf) {
|
|||||||
* by the superclass {@link CachedDNSToSwitchMapping}
|
* by the superclass {@link CachedDNSToSwitchMapping}
|
||||||
*/
|
*/
|
||||||
private static final class RawScriptBasedMapping
|
private static final class RawScriptBasedMapping
|
||||||
implements DNSToSwitchMapping {
|
extends AbstractDNSToSwitchMapping {
|
||||||
private String scriptName;
|
private String scriptName;
|
||||||
private Configuration conf;
|
|
||||||
private int maxArgs; //max hostnames per call of the script
|
private int maxArgs; //max hostnames per call of the script
|
||||||
private static Log LOG =
|
private static final Log LOG =
|
||||||
LogFactory.getLog(ScriptBasedMapping.class);
|
LogFactory.getLog(ScriptBasedMapping.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the configuration and
|
* Set the configuration and extract the configuration parameters of interest
|
||||||
* @param conf extract the configuration parameters of interest
|
* @param conf the new configuration
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setConf (Configuration conf) {
|
public void setConf (Configuration conf) {
|
||||||
this.scriptName = conf.get(SCRIPT_FILENAME_KEY);
|
super.setConf(conf);
|
||||||
this.maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
|
if (conf != null) {
|
||||||
this.conf = conf;
|
scriptName = conf.get(SCRIPT_FILENAME_KEY);
|
||||||
}
|
maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
|
||||||
|
} else {
|
||||||
/**
|
scriptName = null;
|
||||||
* Get the configuration
|
maxArgs = 0;
|
||||||
* @return the configuration
|
}
|
||||||
*/
|
|
||||||
public Configuration getConf () {
|
|
||||||
return conf;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -122,42 +153,42 @@ public RawScriptBasedMapping() {}
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<String> resolve(List<String> names) {
|
public List<String> resolve(List<String> names) {
|
||||||
List <String> m = new ArrayList<String>(names.size());
|
List<String> m = new ArrayList<String>(names.size());
|
||||||
|
|
||||||
if (names.isEmpty()) {
|
|
||||||
return m;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (scriptName == null) {
|
if (names.isEmpty()) {
|
||||||
for (int i = 0; i < names.size(); i++) {
|
return m;
|
||||||
m.add(NetworkTopology.DEFAULT_RACK);
|
|
||||||
}
|
}
|
||||||
return m;
|
|
||||||
}
|
if (scriptName == null) {
|
||||||
|
for (String name : names) {
|
||||||
String output = runResolveCommand(names);
|
m.add(NetworkTopology.DEFAULT_RACK);
|
||||||
if (output != null) {
|
}
|
||||||
StringTokenizer allSwitchInfo = new StringTokenizer(output);
|
return m;
|
||||||
while (allSwitchInfo.hasMoreTokens()) {
|
|
||||||
String switchInfo = allSwitchInfo.nextToken();
|
|
||||||
m.add(switchInfo);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m.size() != names.size()) {
|
String output = runResolveCommand(names);
|
||||||
// invalid number of entries returned by the script
|
if (output != null) {
|
||||||
LOG.error("Script " + scriptName + " returned "
|
StringTokenizer allSwitchInfo = new StringTokenizer(output);
|
||||||
+ Integer.toString(m.size()) + " values when "
|
while (allSwitchInfo.hasMoreTokens()) {
|
||||||
+ Integer.toString(names.size()) + " were expected.");
|
String switchInfo = allSwitchInfo.nextToken();
|
||||||
|
m.add(switchInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m.size() != names.size()) {
|
||||||
|
// invalid number of entries returned by the script
|
||||||
|
LOG.error("Script " + scriptName + " returned "
|
||||||
|
+ Integer.toString(m.size()) + " values when "
|
||||||
|
+ Integer.toString(names.size()) + " were expected.");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// an error occurred. return null to signify this.
|
||||||
|
// (exn was already logged in runResolveCommand)
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// an error occurred. return null to signify this.
|
return m;
|
||||||
// (exn was already logged in runResolveCommand)
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return m;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build and execute the resolution command. The command is
|
* Build and execute the resolution command. The command is
|
||||||
@ -195,10 +226,10 @@ private String runResolveCommand(List<String> args) {
|
|||||||
dir = new File(userDir);
|
dir = new File(userDir);
|
||||||
}
|
}
|
||||||
ShellCommandExecutor s = new ShellCommandExecutor(
|
ShellCommandExecutor s = new ShellCommandExecutor(
|
||||||
cmdList.toArray(new String[0]), dir);
|
cmdList.toArray(new String[cmdList.size()]), dir);
|
||||||
try {
|
try {
|
||||||
s.execute();
|
s.execute();
|
||||||
allOutput.append(s.getOutput() + " ");
|
allOutput.append(s.getOutput()).append(" ");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Exception: ", e);
|
LOG.warn("Exception: ", e);
|
||||||
return null;
|
return null;
|
||||||
@ -207,5 +238,15 @@ private String runResolveCommand(List<String> args) {
|
|||||||
}
|
}
|
||||||
return allOutput.toString();
|
return allOutput.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Declare that the mapper is single-switched if a script was not named
|
||||||
|
* in the configuration.
|
||||||
|
* @return true iff there is no script
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isSingleSwitch() {
|
||||||
|
return scriptName == null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,34 +17,80 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.net;
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
import java.util.*;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Implements the {@link DNSToSwitchMapping} via static mappings. Used
|
* Implements the {@link DNSToSwitchMapping} via static mappings. Used
|
||||||
* in testcases that simulate racks.
|
* in testcases that simulate racks, and in the
|
||||||
|
* {@link org.apache.hadoop.hdfs.MiniDFSCluster}
|
||||||
*
|
*
|
||||||
|
* A shared, static mapping is used; to reset it call {@link #resetMap()}.
|
||||||
|
*
|
||||||
|
* When an instance of the class has its {@link #setConf(Configuration)}
|
||||||
|
* method called, nodes listed in the configuration will be added to the map.
|
||||||
|
* These do not get removed when the instance is garbage collected.
|
||||||
*/
|
*/
|
||||||
public class StaticMapping extends Configured implements DNSToSwitchMapping {
|
public class StaticMapping extends AbstractDNSToSwitchMapping {
|
||||||
public void setconf(Configuration conf) {
|
|
||||||
String[] mappings = conf.getStrings("hadoop.configured.node.mapping");
|
/**
|
||||||
if (mappings != null) {
|
* key to define the node mapping as a comma-delimited list of host=rack
|
||||||
for (int i = 0; i < mappings.length; i++) {
|
* mappings, e.g. <code>host1=r1,host2=r1,host3=r2</code>.
|
||||||
String str = mappings[i];
|
* </p>
|
||||||
String host = str.substring(0, str.indexOf('='));
|
* <b>Important: </b>spaces not trimmed and are considered significant.
|
||||||
String rack = str.substring(str.indexOf('=') + 1);
|
*/
|
||||||
addNodeToRack(host, rack);
|
public static final String KEY_HADOOP_CONFIGURED_NODE_MAPPING =
|
||||||
|
"hadoop.configured.node.mapping";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configure the mapping by extracting any mappings defined in the
|
||||||
|
* {@link #KEY_HADOOP_CONFIGURED_NODE_MAPPING} field
|
||||||
|
* @param conf new configuration
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void setConf(Configuration conf) {
|
||||||
|
super.setConf(conf);
|
||||||
|
if (conf != null) {
|
||||||
|
String[] mappings = conf.getStrings(KEY_HADOOP_CONFIGURED_NODE_MAPPING);
|
||||||
|
if (mappings != null) {
|
||||||
|
for (String str : mappings) {
|
||||||
|
String host = str.substring(0, str.indexOf('='));
|
||||||
|
String rack = str.substring(str.indexOf('=') + 1);
|
||||||
|
addNodeToRack(host, rack);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Only one instance per JVM */
|
|
||||||
private static Map<String, String> nameToRackMap = new HashMap<String, String>();
|
/**
|
||||||
|
* retained lower case setter for compatibility reasons; relays to
|
||||||
static synchronized public void addNodeToRack(String name, String rackId) {
|
* {@link #setConf(Configuration)}
|
||||||
nameToRackMap.put(name, rackId);
|
* @param conf new configuration
|
||||||
|
*/
|
||||||
|
public void setconf(Configuration conf) {
|
||||||
|
setConf(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Only one instance per JVM */
|
||||||
|
private static final Map<String, String> nameToRackMap = new HashMap<String, String>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a node to the static map. The moment any entry is added to the map,
|
||||||
|
* the map goes multi-rack.
|
||||||
|
* @param name node name
|
||||||
|
* @param rackId rack ID
|
||||||
|
*/
|
||||||
|
public static void addNodeToRack(String name, String rackId) {
|
||||||
|
synchronized (nameToRackMap) {
|
||||||
|
nameToRackMap.put(name, rackId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public List<String> resolve(List<String> names) {
|
public List<String> resolve(List<String> names) {
|
||||||
List<String> m = new ArrayList<String>();
|
List<String> m = new ArrayList<String>();
|
||||||
synchronized (nameToRackMap) {
|
synchronized (nameToRackMap) {
|
||||||
@ -59,4 +105,24 @@ public List<String> resolve(List<String> names) {
|
|||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This mapping is only single switch if the map is empty
|
||||||
|
* @return the current switching status
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isSingleSwitch() {
|
||||||
|
synchronized (nameToRackMap) {
|
||||||
|
return nameToRackMap.isEmpty();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear the map and revert to being a single switch
|
||||||
|
*/
|
||||||
|
public static void resetMap() {
|
||||||
|
synchronized (nameToRackMap) {
|
||||||
|
nameToRackMap.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,30 +23,59 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
import junit.framework.TestCase;
|
import junit.framework.TestCase;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestScriptBasedMapping extends TestCase {
|
public class TestScriptBasedMapping extends TestCase {
|
||||||
|
|
||||||
private ScriptBasedMapping mapping;
|
|
||||||
private Configuration conf;
|
|
||||||
private List<String> names;
|
|
||||||
|
|
||||||
public TestScriptBasedMapping() {
|
public TestScriptBasedMapping() {
|
||||||
mapping = new ScriptBasedMapping();
|
|
||||||
|
|
||||||
conf = new Configuration();
|
|
||||||
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
|
|
||||||
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
|
|
||||||
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
|
||||||
|
|
||||||
mapping.setConf(conf);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
public void testNoArgsMeansNoResult() {
|
public void testNoArgsMeansNoResult() {
|
||||||
names = new ArrayList<String>();
|
Configuration conf = new Configuration();
|
||||||
|
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
|
||||||
|
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
|
||||||
|
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||||
|
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||||
|
ScriptBasedMapping mapping = createMapping(conf);
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
names.add("some.machine.name");
|
names.add("some.machine.name");
|
||||||
names.add("other.machine.name");
|
names.add("other.machine.name");
|
||||||
List<String> result = mapping.resolve(names);
|
List<String> result = mapping.resolve(names);
|
||||||
assertNull(result);
|
assertNull("Expected an empty list", result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNoFilenameMeansSingleSwitch() throws Throwable {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
ScriptBasedMapping mapping = createMapping(conf);
|
||||||
|
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||||
|
assertTrue("Expected to be single switch",
|
||||||
|
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFilenameMeansMultiSwitch() throws Throwable {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
|
||||||
|
ScriptBasedMapping mapping = createMapping(conf);
|
||||||
|
assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
|
||||||
|
mapping.setConf(new Configuration());
|
||||||
|
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNullConfig() throws Throwable {
|
||||||
|
ScriptBasedMapping mapping = createMapping(null);
|
||||||
|
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
|
||||||
|
|
||||||
|
}
|
||||||
|
private ScriptBasedMapping createMapping(Configuration conf) {
|
||||||
|
ScriptBasedMapping mapping = new ScriptBasedMapping();
|
||||||
|
mapping.setConf(conf);
|
||||||
|
return mapping;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,103 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test the static mapping class.
|
||||||
|
* Because the map is actually static, this map needs to be reset for every test
|
||||||
|
*/
|
||||||
|
public class TestStaticMapping extends Assert {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset the map then create a new instance of the {@link StaticMapping}
|
||||||
|
* class
|
||||||
|
* @return a new instance
|
||||||
|
*/
|
||||||
|
private StaticMapping newInstance() {
|
||||||
|
StaticMapping.resetMap();
|
||||||
|
return new StaticMapping();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStaticIsSingleSwitch() throws Throwable {
|
||||||
|
StaticMapping mapping = newInstance();
|
||||||
|
assertTrue("Empty maps are not single switch", mapping.isSingleSwitch());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCachingRelaysQueries() throws Throwable {
|
||||||
|
StaticMapping staticMapping = newInstance();
|
||||||
|
CachedDNSToSwitchMapping mapping =
|
||||||
|
new CachedDNSToSwitchMapping(staticMapping);
|
||||||
|
assertTrue("Expected single switch", mapping.isSingleSwitch());
|
||||||
|
StaticMapping.addNodeToRack("n1", "r1");
|
||||||
|
assertFalse("Expected to be multi switch",
|
||||||
|
mapping.isSingleSwitch());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAddResolveNodes() throws Throwable {
|
||||||
|
StaticMapping mapping = newInstance();
|
||||||
|
StaticMapping.addNodeToRack("n1", "r1");
|
||||||
|
List<String> l1 = new ArrayList<String>(2);
|
||||||
|
l1.add("n1");
|
||||||
|
l1.add("unknown");
|
||||||
|
List<String> mappings = mapping.resolve(l1);
|
||||||
|
assertEquals(2, mappings.size());
|
||||||
|
assertEquals("r1", mappings.get(0));
|
||||||
|
assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
|
||||||
|
assertFalse("Mapping is still single switch", mapping.isSingleSwitch());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testReadNodesFromConfig() throws Throwable {
|
||||||
|
StaticMapping mapping = newInstance();
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, "n1=r1,n2=r2");
|
||||||
|
mapping.setConf(conf);
|
||||||
|
List<String> l1 = new ArrayList<String>(3);
|
||||||
|
l1.add("n1");
|
||||||
|
l1.add("unknown");
|
||||||
|
l1.add("n2");
|
||||||
|
List<String> mappings = mapping.resolve(l1);
|
||||||
|
assertEquals(3, mappings.size());
|
||||||
|
assertEquals("r1", mappings.get(0));
|
||||||
|
assertEquals(NetworkTopology.DEFAULT_RACK, mappings.get(1));
|
||||||
|
assertEquals("r2", mappings.get(2));
|
||||||
|
assertFalse("Expected to be multi switch",
|
||||||
|
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNullConfiguration() throws Throwable {
|
||||||
|
StaticMapping mapping = newInstance();
|
||||||
|
mapping.setConf(null);
|
||||||
|
assertTrue("Null maps is not single switch", mapping.isSingleSwitch());
|
||||||
|
assertTrue("Expected to be single switch",
|
||||||
|
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,53 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test some other details of the switch mapping
|
||||||
|
*/
|
||||||
|
public class TestSwitchMapping extends Assert {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
|
||||||
|
DNSToSwitchMapping mapping = new StandaloneSwitchMapping();
|
||||||
|
assertFalse("Expected to be multi switch",
|
||||||
|
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCachingRelays() throws Throwable {
|
||||||
|
CachedDNSToSwitchMapping mapping =
|
||||||
|
new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
|
||||||
|
assertFalse("Expected to be multi switch",
|
||||||
|
mapping.isSingleSwitch());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class StandaloneSwitchMapping implements DNSToSwitchMapping {
|
||||||
|
@Override
|
||||||
|
public List<String> resolve(List<String> names) {
|
||||||
|
return names;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -11,6 +11,8 @@ Trunk (unreleased changes)
|
|||||||
|
|
||||||
HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
|
HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
|
||||||
|
|
||||||
|
HDFS-2519. Add protobuf service for DatanodeProtocol. (suresh)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
HADOOP-7524 Change RPC to allow multiple protocols including multuple
|
||||||
@ -122,6 +124,9 @@ Release 0.23.1 - UNRELEASED
|
|||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
|
HDFS-2316. [umbrella] webhdfs: a complete FileSystem implementation for
|
||||||
|
accessing HDFS over HTTP (szetszwo)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
|
HDFS-2560. Refactor BPOfferService to be a static inner class (todd)
|
||||||
|
|
||||||
@ -151,6 +156,10 @@ Release 0.23.1 - UNRELEASED
|
|||||||
|
|
||||||
HDFS-2566. Move BPOfferService to be a non-inner class. (todd)
|
HDFS-2566. Move BPOfferService to be a non-inner class. (todd)
|
||||||
|
|
||||||
|
HDFS-2552. Add Forrest doc for WebHDFS REST API. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-2587. Add apt doc for WebHDFS REST API. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
HDFS-2130. Switch default checksum to CRC32C. (todd)
|
||||||
@ -177,6 +186,10 @@ Release 0.23.1 - UNRELEASED
|
|||||||
trying to browse DFS via web UI. (harsh via eli)
|
trying to browse DFS via web UI. (harsh via eli)
|
||||||
|
|
||||||
HDFS-2575. DFSTestUtil may create empty files (todd)
|
HDFS-2575. DFSTestUtil may create empty files (todd)
|
||||||
|
|
||||||
|
HDFS-2588. hdfs jsp pages missing DOCTYPE. (Dave Vronay via mattf)
|
||||||
|
|
||||||
|
HDFS-2590. Fix the missing links in the WebHDFS forrest doc. (szetszwo)
|
||||||
|
|
||||||
Release 0.23.0 - 2011-11-01
|
Release 0.23.0 - 2011-11-01
|
||||||
|
|
||||||
|
@ -242,7 +242,11 @@
|
|||||||
|
|
||||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
print " <li>$item</li>\n";
|
print " <li>$item</li>\n";
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@ See http://forrest.apache.org/docs/linking.html for more info.
|
|||||||
<hdfs_SLG label="Synthetic Load Generator" href="SLG_user_guide.html" />
|
<hdfs_SLG label="Synthetic Load Generator" href="SLG_user_guide.html" />
|
||||||
<hdfs_imageviewer label="Offline Image Viewer" href="hdfs_imageviewer.html" />
|
<hdfs_imageviewer label="Offline Image Viewer" href="hdfs_imageviewer.html" />
|
||||||
<hdfs_editsviewer label="Offline Edits Viewer" href="hdfs_editsviewer.html" />
|
<hdfs_editsviewer label="Offline Edits Viewer" href="hdfs_editsviewer.html" />
|
||||||
|
<webhdfs label="WebHDFS REST API" href="webhdfs.html" />
|
||||||
<hftp label="HFTP" href="hftp.html"/>
|
<hftp label="HFTP" href="hftp.html"/>
|
||||||
<faultinject_framework label="Fault Injection" href="faultinject_framework.html" />
|
<faultinject_framework label="Fault Injection" href="faultinject_framework.html" />
|
||||||
<hdfs_libhdfs label="C API libhdfs" href="libhdfs.html" />
|
<hdfs_libhdfs label="C API libhdfs" href="libhdfs.html" />
|
||||||
@ -119,8 +120,33 @@ See http://forrest.apache.org/docs/linking.html for more info.
|
|||||||
</distributedcache>
|
</distributedcache>
|
||||||
</filecache>
|
</filecache>
|
||||||
<fs href="fs/">
|
<fs href="fs/">
|
||||||
<filesystem href="FileSystem.html" />
|
<FileStatus href="FileStatus.html" />
|
||||||
|
<Path href="Path.html" />
|
||||||
|
|
||||||
|
<filesystem href="FileSystem.html">
|
||||||
|
<open href="#open(org.apache.hadoop.fs.Path,%20int)" />
|
||||||
|
<getFileStatus href="#getFileStatus(org.apache.hadoop.fs.Path)" />
|
||||||
|
<listStatus href="#listStatus(org.apache.hadoop.fs.Path)" />
|
||||||
|
<getContentSummary href="#getContentSummary(org.apache.hadoop.fs.Path)" />
|
||||||
|
<getFileChecksum href="#getFileChecksum(org.apache.hadoop.fs.Path)" />
|
||||||
|
<getHomeDirectory href="#getHomeDirectory()" />
|
||||||
|
<getDelegationToken href="#getDelegationToken(org.apache.hadoop.io.Text)" />
|
||||||
|
|
||||||
|
<create href="#create(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission,%20boolean,%20int,%20short,%20long,%20org.apache.hadoop.util.Progressable)" />
|
||||||
|
<mkdirs href="#mkdirs(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
|
||||||
|
<rename href="#rename(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.Options.Rename...)" />
|
||||||
|
<setReplication href="#setReplication(org.apache.hadoop.fs.Path,%20short)" />
|
||||||
|
<setOwner href="#setOwner(org.apache.hadoop.fs.Path,%20java.lang.String,%20java.lang.String)" />
|
||||||
|
<setPermission href="#setPermission(org.apache.hadoop.fs.Path,%20org.apache.hadoop.fs.permission.FsPermission)" />
|
||||||
|
<setTimes href="#setTimes(org.apache.hadoop.fs.Path,%20long,%20long)" />
|
||||||
|
|
||||||
|
<append href="#append(org.apache.hadoop.fs.Path,%20int,%20org.apache.hadoop.util.Progressable)" />
|
||||||
|
<delete href="#delete(org.apache.hadoop.fs.Path,%20boolean)" />
|
||||||
|
</filesystem>
|
||||||
</fs>
|
</fs>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<io href="io/">
|
<io href="io/">
|
||||||
<closeable href="Closeable.html">
|
<closeable href="Closeable.html">
|
||||||
<close href="#close()" />
|
<close href="#close()" />
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -27,7 +27,7 @@
|
|||||||
//for java.io.Serializable
|
//for java.io.Serializable
|
||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
//for java.io.Serializable
|
//for java.io.Serializable
|
||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<style type=text/css>
|
<style type=text/css>
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
//for java.io.Serializable
|
//for java.io.Serializable
|
||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
int corruptFileCount = corruptFileBlocks.size();
|
int corruptFileCount = corruptFileBlocks.size();
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||||
<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
|
<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
|
|
||||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||||
|
@ -33,6 +33,7 @@ FSNamesystem fsn = nn.getNamesystem();
|
|||||||
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
|
|
||||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
//for java.io.Serializable
|
//for java.io.Serializable
|
||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
|
|
||||||
<title></title>
|
<title></title>
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
private static final long serialVersionUID = 1L;
|
private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
|
||||||
<title>Hadoop SecondaryNameNode</title>
|
<title>Hadoop SecondaryNameNode</title>
|
||||||
|
344
hadoop-hdfs-project/hadoop-hdfs/src/proto/DatanodeProtocol.proto
Normal file
344
hadoop-hdfs-project/hadoop-hdfs/src/proto/DatanodeProtocol.proto
Normal file
@ -0,0 +1,344 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This file contains protocol buffers that are used throughout HDFS -- i.e.
|
||||||
|
// by the client, server, and data transfer protocols.
|
||||||
|
|
||||||
|
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
|
||||||
|
option java_outer_classname = "DatanodeProtocolProtos";
|
||||||
|
option java_generic_services = true;
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
|
||||||
|
import "hdfs.proto";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Information to identify a datanode to a namenode
|
||||||
|
*/
|
||||||
|
message DatanodeRegistrationProto {
|
||||||
|
required DatanodeIDProto datanodeID = 1; // Datanode information
|
||||||
|
required StorageInfoProto storateInfo = 2; // Node information
|
||||||
|
required ExportedBlockKeysProto keys = 3; // Block keys
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commands sent from namenode to the datanodes
|
||||||
|
*/
|
||||||
|
message DatanodeCommandProto {
|
||||||
|
enum Type {
|
||||||
|
BalancerBandwidthCommand = 0;
|
||||||
|
BlockCommand = 1;
|
||||||
|
BlockRecoveryCommand = 2;
|
||||||
|
FinalizeCommand = 3;
|
||||||
|
KeyUpdateCommand = 4;
|
||||||
|
RegisterCommand = 5;
|
||||||
|
UpgradeCommand = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
required Type cmdType = 1; // Type of the command
|
||||||
|
|
||||||
|
// One of the following command is available when the corresponding
|
||||||
|
// cmdType is set
|
||||||
|
optional BalancerBandwidthCommandProto balancerCmd = 2;
|
||||||
|
optional BlockCommandProto blkCmd = 3;
|
||||||
|
optional BlockRecoveryCommndProto recoveryCmd = 4;
|
||||||
|
optional FinalizeCommandProto finalizeCmd = 5;
|
||||||
|
optional KeyUpdateCommandProto keyUpdateCmd = 6;
|
||||||
|
optional RegisterCommandProto registerCmd = 7;
|
||||||
|
optional UpgradeCommandProto upgradeCmd = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Command sent from namenode to datanode to set the
|
||||||
|
* maximum bandwidth to be used for balancing.
|
||||||
|
*/
|
||||||
|
message BalancerBandwidthCommandProto {
|
||||||
|
|
||||||
|
// Maximum bandwidth to be used by datanode for balancing
|
||||||
|
required uint64 bandwidth = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Command to instruct datanodes to perform certain action
|
||||||
|
* on the given set of blocks.
|
||||||
|
*/
|
||||||
|
message BlockCommandProto {
|
||||||
|
enum Action {
|
||||||
|
UNKNOWN = 0; // Unknown action
|
||||||
|
TRANSFER = 1; // Transfer blocks to another datanode
|
||||||
|
INVALIDATE = 2; // Invalidate blocks
|
||||||
|
SHUTDOWN = 3; // Shutdown node
|
||||||
|
}
|
||||||
|
required uint32 action = 1;
|
||||||
|
required string blockPoolId = 2;
|
||||||
|
repeated BlockProto blocks = 3;
|
||||||
|
repeated DatanodeIDsProto targets = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List of blocks to be recovered by the datanode
|
||||||
|
*/
|
||||||
|
message BlockRecoveryCommndProto {
|
||||||
|
repeated RecoveringBlockProto blocks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finalize the upgrade at the datanode
|
||||||
|
*/
|
||||||
|
message FinalizeCommandProto {
|
||||||
|
required string blockPoolId = 1; // Block pool to be finalized
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the block keys at the datanode
|
||||||
|
*/
|
||||||
|
message KeyUpdateCommandProto {
|
||||||
|
required ExportedBlockKeysProto keys = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Instruct datanode to register with the namenode
|
||||||
|
*/
|
||||||
|
message RegisterCommandProto {
|
||||||
|
// void
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic distributed upgrade Command
|
||||||
|
*/
|
||||||
|
message UpgradeCommandProto {
|
||||||
|
enum Action {
|
||||||
|
UNKNOWN = 0; // Unknown action
|
||||||
|
REPORT_STATUS = 100; // Report upgrade status
|
||||||
|
START_UPGRADE = 101; // Start upgrade
|
||||||
|
}
|
||||||
|
required uint32 action = 1; // Upgrade action
|
||||||
|
required uint32 version = 2; // Version of the upgrade
|
||||||
|
required uint32 upgradeStatus = 3; // % completed in range 0 & 100
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - Information of the datanode registering with the namenode
|
||||||
|
*/
|
||||||
|
message RegisterDatanodeRequestProto {
|
||||||
|
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - Update registration of the datanode that successfully
|
||||||
|
* registered. StorageInfo will be updated to include new
|
||||||
|
* storage ID if the datanode did not have one in the request.
|
||||||
|
*/
|
||||||
|
message RegisterDatanodeResponseProto {
|
||||||
|
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - datanode registration information
|
||||||
|
* capacity - total storage capacity available at the datanode
|
||||||
|
* dfsUsed - storage used by HDFS
|
||||||
|
* remaining - remaining storage available for HDFS
|
||||||
|
* blockPoolUsed - storage used by the block pool
|
||||||
|
* xmitsInProgress - number of transfers from this datanode to others
|
||||||
|
* xceiverCount - number of active transceiver threads
|
||||||
|
* failedVolumes - number of failed volumes
|
||||||
|
*/
|
||||||
|
message HeartbeatRequestProto {
|
||||||
|
required DatanodeRegistrationProto registration = 1; // Datanode info
|
||||||
|
required uint64 capacity = 2;
|
||||||
|
required uint64 dfsUsed = 3;
|
||||||
|
required uint64 remaining = 4;
|
||||||
|
required uint64 blockPoolUsed = 5;
|
||||||
|
required uint32 xmitsInProgress = 6;
|
||||||
|
required uint32 xceiverCount = 7;
|
||||||
|
required uint32 failedVolumes = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cmds - Commands from namenode to datanode.
|
||||||
|
*/
|
||||||
|
message HeartbeatResponseProto {
|
||||||
|
repeated DatanodeCommandProto cmds = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - datanode registration information
|
||||||
|
* blockPoolID - block pool ID of the reported blocks
|
||||||
|
* blocks - each block is represented as two longs in the array.
|
||||||
|
* first long represents block ID
|
||||||
|
* second long represents length
|
||||||
|
*/
|
||||||
|
message BlockReportRequestProto {
|
||||||
|
required DatanodeRegistrationProto registration = 1;
|
||||||
|
required string blockPoolId = 2;
|
||||||
|
repeated uint64 blocks = 3 [packed=true];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cmd - Command from namenode to the datanode
|
||||||
|
*/
|
||||||
|
message BlockReportResponseProto {
|
||||||
|
required DatanodeCommandProto cmd = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Data structure to send received or deleted block information
|
||||||
|
* from datanode to namenode.
|
||||||
|
*
|
||||||
|
* deleteHint set to "-" indicates block deletion.
|
||||||
|
* other deleteHint indicates block addition.
|
||||||
|
*/
|
||||||
|
message ReceivedDeletedBlockInfoProto {
|
||||||
|
required BlockProto block = 1;
|
||||||
|
optional string deleteHint = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registration - datanode registration information
|
||||||
|
* blockPoolID - block pool ID of the reported blocks
|
||||||
|
* blocks - Received/deleted block list
|
||||||
|
*/
|
||||||
|
message BlockReceivedAndDeletedRequestProto {
|
||||||
|
required DatanodeRegistrationProto registration = 1;
|
||||||
|
required string blockPoolId = 2;
|
||||||
|
repeated ReceivedDeletedBlockInfoProto blocks = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message BlockReceivedAndDeletedResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* registartion - Datanode reporting the error
|
||||||
|
* errorCode - error code indicating the error
|
||||||
|
* msg - Free text description of the error
|
||||||
|
*/
|
||||||
|
message ErrorReportRequestProto {
|
||||||
|
enum ErrorCode {
|
||||||
|
NOTIFY = 0; // Error report to be logged at the namenode
|
||||||
|
DISK_ERROR = 1; // DN has disk errors but still has valid volumes
|
||||||
|
INVALID_BLOCK = 2; // Command from namenode has invalid block ID
|
||||||
|
FATAL_DISK_ERROR = 3; // No valid volumes left on datanode
|
||||||
|
}
|
||||||
|
required DatanodeRegistrationProto registartion = 1; // Registartion info
|
||||||
|
required uint32 errorCode = 2; // Error code
|
||||||
|
required string msg = 3; // Error message
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message ErrorReportResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cmd - Upgrade command sent from datanode to namenode
|
||||||
|
*/
|
||||||
|
message ProcessUpgradeRequestProto {
|
||||||
|
optional UpgradeCommandProto cmd = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cmd - Upgrade command sent from namenode to datanode
|
||||||
|
*/
|
||||||
|
message ProcessUpgradeResponseProto {
|
||||||
|
optional UpgradeCommandProto cmd = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blocks - list of blocks that are reported as corrupt
|
||||||
|
*/
|
||||||
|
message ReportBadBlocksRequestProto {
|
||||||
|
repeated LocatedBlockProto blocks = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message ReportBadBlocksResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit block synchronization request during lease recovery
|
||||||
|
*/
|
||||||
|
message CommitBlockSynchronizationRequestProto {
|
||||||
|
required ExtendedBlockProto block = 1;
|
||||||
|
required uint64 newGenStamp = 2;
|
||||||
|
required uint64 newLength = 3;
|
||||||
|
required bool closeFile = 4;
|
||||||
|
required bool deleteBlock = 5;
|
||||||
|
repeated DatanodeIDProto newTaragets = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* void response
|
||||||
|
*/
|
||||||
|
message CommitBlockSynchronizationResponseProto {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol used from datanode to the namenode
|
||||||
|
* See the request and response for details of rpc call.
|
||||||
|
*/
|
||||||
|
service DatanodeProtocolService {
|
||||||
|
/**
|
||||||
|
* Register a datanode at a namenode
|
||||||
|
*/
|
||||||
|
rpc registerDatanode(RegisterDatanodeRequestProto)
|
||||||
|
returns(RegisterDatanodeResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send heartbeat from datanode to namenode
|
||||||
|
*/
|
||||||
|
rpc sendHeartbeat(HeartbeatRequestProto) returns(HeartbeatResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report blocks at a given datanode to the namenode
|
||||||
|
*/
|
||||||
|
rpc blockReport(BlockReportRequestProto) returns(BlockReportResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report from datanode about recently received or deleted block
|
||||||
|
*/
|
||||||
|
rpc blockReceivedAndDeleted(BlockReceivedAndDeletedRequestProto)
|
||||||
|
returns(BlockReceivedAndDeletedResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report from a datanode of an error to the active namenode.
|
||||||
|
* Used for debugging.
|
||||||
|
*/
|
||||||
|
rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic way to send commands from datanode to namenode during
|
||||||
|
* distributed upgrade process.
|
||||||
|
*/
|
||||||
|
rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Report corrupt blocks at the specified location
|
||||||
|
*/
|
||||||
|
rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit block synchronization during lease recovery.
|
||||||
|
*/
|
||||||
|
rpc commitBlockSynchronization(CommitBlockSynchronizationRequestProto)
|
||||||
|
returns(CommitBlockSynchronizationResponseProto);
|
||||||
|
}
|
@ -41,6 +41,8 @@ Trunk (unreleased changes)
|
|||||||
(tucu)
|
(tucu)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
MAPREDUCE-3412. Fix 'ant docs'. (amarrk)
|
||||||
|
|
||||||
MAPREDUCE-3346. [Rumen] LoggedTaskAttempt#getHostName() returns null.
|
MAPREDUCE-3346. [Rumen] LoggedTaskAttempt#getHostName() returns null.
|
||||||
(amarrk)
|
(amarrk)
|
||||||
|
|
||||||
@ -106,6 +108,17 @@ Release 0.23.1 - Unreleased
|
|||||||
MAPREDUCE-3372. HADOOP_PREFIX cannot be overridden.
|
MAPREDUCE-3372. HADOOP_PREFIX cannot be overridden.
|
||||||
(Bruno Mahé via tomwhite)
|
(Bruno Mahé via tomwhite)
|
||||||
|
|
||||||
|
MAPREDUCE-3411. Performance Upgrade for jQuery (Jonathan Eagles via
|
||||||
|
mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3371. Review and improve the yarn-api javadocs. (Ravi Prakash
|
||||||
|
via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3238. Small cleanup in SchedulerApp. (Todd Lipcon via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3413. RM web ui applications not sorted in any order by default.
|
||||||
|
(Jonathan Eagles via mahadev)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
@ -159,6 +172,20 @@ Release 0.23.1 - Unreleased
|
|||||||
|
|
||||||
MAPREDUCE-3444. trunk/0.23 builds broken (Hitesh Shah via mahadev)
|
MAPREDUCE-3444. trunk/0.23 builds broken (Hitesh Shah via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3454. [Gridmix] TestDistCacheEmulation is broken (Hitesh Shah
|
||||||
|
via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3408. yarn-daemon.sh unconditionnaly sets yarn.root.logger
|
||||||
|
(Bruno Mahe via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-3329. Fixed CapacityScheduler to ensure maximum-capacity cannot
|
||||||
|
be lesser than capacity for any queue. (acmurthy)
|
||||||
|
|
||||||
|
MAPREDUCE-3464. mapreduce jsp pages missing DOCTYPE. (Dave Vronay via mattf)
|
||||||
|
|
||||||
|
MAPREDUCE-3265. Removed debug logs during job submission to LOG.debug to
|
||||||
|
cut down noise. (acmurthy)
|
||||||
|
|
||||||
Release 0.23.0 - 2011-11-01
|
Release 0.23.0 - 2011-11-01
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -932,10 +932,6 @@
|
|||||||
<style basedir="${mapred.src.dir}" destdir="${build.docs}"
|
<style basedir="${mapred.src.dir}" destdir="${build.docs}"
|
||||||
includes="mapred-default.xml" style="conf/configuration.xsl"/>
|
includes="mapred-default.xml" style="conf/configuration.xsl"/>
|
||||||
<antcall target="changes-to-html"/>
|
<antcall target="changes-to-html"/>
|
||||||
<subant target="docs">
|
|
||||||
<property name="build.docs" value="${build.docs}"/>
|
|
||||||
<fileset file="${contrib.dir}/build.xml"/>
|
|
||||||
</subant>
|
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="javadoc-dev" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc for hadoop developers">
|
<target name="javadoc-dev" depends="compile, ivy-retrieve-javadoc" description="Generate javadoc for hadoop developers">
|
||||||
|
@ -51,6 +51,8 @@ protected Class<? extends SubView> content() {
|
|||||||
|
|
||||||
private String jobsTableInit() {
|
private String jobsTableInit() {
|
||||||
return tableInit().
|
return tableInit().
|
||||||
|
// Sort by id upon page load
|
||||||
|
append(", aaSorting: [[0, 'asc']]").
|
||||||
append(",aoColumns:[{sType:'title-numeric'},").
|
append(",aoColumns:[{sType:'title-numeric'},").
|
||||||
append("null,null,{sType:'title-numeric', bSearchable:false},null,").
|
append("null,null,{sType:'title-numeric', bSearchable:false},null,").
|
||||||
append("null,{sType:'title-numeric',bSearchable:false}, null, null]}").
|
append("null,{sType:'title-numeric',bSearchable:false}, null, null]}").
|
||||||
|
@ -119,6 +119,9 @@ protected Collection<TaskAttempt> getTaskAttempts() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private String attemptsTableInit() {
|
private String attemptsTableInit() {
|
||||||
return tableInit().append("}").toString();
|
return tableInit().
|
||||||
|
// Sort by id upon page load
|
||||||
|
append(", aaSorting: [[0, 'asc']]").
|
||||||
|
append("}").toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,8 @@ public class TasksPage extends AppView {
|
|||||||
|
|
||||||
private String tasksTableInit() {
|
private String tasksTableInit() {
|
||||||
return tableInit().
|
return tableInit().
|
||||||
|
// Sort by id upon page load
|
||||||
|
append(", aaSorting: [[0, 'asc']]").
|
||||||
append(",aoColumns:[{sType:'title-numeric'},{sType:'title-numeric',").
|
append(",aoColumns:[{sType:'title-numeric'},{sType:'title-numeric',").
|
||||||
append("bSearchable:false},null,{sType:'title-numeric'},").
|
append("bSearchable:false},null,{sType:'title-numeric'},").
|
||||||
append("{sType:'title-numeric'},{sType:'title-numeric'}]}").toString();
|
append("{sType:'title-numeric'},{sType:'title-numeric'}]}").toString();
|
||||||
|
@ -72,7 +72,7 @@ public class TaskLog {
|
|||||||
if (!LOG_DIR.exists()) {
|
if (!LOG_DIR.exists()) {
|
||||||
boolean b = LOG_DIR.mkdirs();
|
boolean b = LOG_DIR.mkdirs();
|
||||||
if (!b) {
|
if (!b) {
|
||||||
LOG.warn("mkdirs failed. Ignoring.");
|
LOG.debug("mkdirs failed. Ignoring.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ private void initialize(InetSocketAddress jobTrackAddr, Configuration conf)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
LOG.info("Cannot pick " + provider.getClass().getName()
|
LOG.debug("Cannot pick " + provider.getClass().getName()
|
||||||
+ " as the ClientProtocolProvider - returned null protocol");
|
+ " as the ClientProtocolProvider - returned null protocol");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,8 +296,12 @@ private String attemptsTableInit() {
|
|||||||
} else { //MAP
|
} else { //MAP
|
||||||
b.append(", 5");
|
b.append(", 5");
|
||||||
}
|
}
|
||||||
b.append(" ] }");
|
b.append(" ] }]");
|
||||||
b.append("]}");
|
|
||||||
|
// Sort by id upon page load
|
||||||
|
b.append(", aaSorting: [[0, 'asc']]");
|
||||||
|
|
||||||
|
b.append("}");
|
||||||
return b.toString();
|
return b.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,8 +74,12 @@ private String tasksTableInit() {
|
|||||||
} else { //MAP
|
} else { //MAP
|
||||||
b.append(", 7");
|
b.append(", 7");
|
||||||
}
|
}
|
||||||
b.append(" ] }");
|
b.append(" ] }]");
|
||||||
b.append("]}");
|
|
||||||
|
// Sort by id upon page load
|
||||||
|
b.append(", aaSorting: [[0, 'asc']]");
|
||||||
|
|
||||||
|
b.append("}");
|
||||||
return b.toString();
|
return b.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,11 +84,17 @@ protected Class<? extends SubView> content() {
|
|||||||
*/
|
*/
|
||||||
private String jobsTableInit() {
|
private String jobsTableInit() {
|
||||||
return tableInit().
|
return tableInit().
|
||||||
append(",aoColumnDefs:[").
|
// Sort by id upon page load
|
||||||
append("{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 6 ] }").
|
append(", aaSorting: [[2, 'asc']]").
|
||||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7 ] }").
|
append(", aoColumnDefs:[").
|
||||||
|
// Maps Total
|
||||||
|
append("{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 7 ] }").
|
||||||
|
// Maps Completed
|
||||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 8 ] }").
|
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 8 ] }").
|
||||||
|
// Reduces Total
|
||||||
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 9 ] }").
|
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 9 ] }").
|
||||||
|
// Reduces Completed
|
||||||
|
append(",{'sType':'numeric', 'bSearchable': false, 'aTargets': [ 10 ] }").
|
||||||
append("]}").
|
append("]}").
|
||||||
toString();
|
toString();
|
||||||
}
|
}
|
||||||
|
@ -30,12 +30,9 @@
|
|||||||
import org.apache.hadoop.mapreduce.JobID;
|
import org.apache.hadoop.mapreduce.JobID;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
|
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
|
||||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||||
import org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SecurityInfo;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.yarn.YarnException;
|
import org.apache.hadoop.yarn.YarnException;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
|
||||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||||
|
|
||||||
public class ClientCache {
|
public class ClientCache {
|
||||||
@ -79,9 +76,9 @@ private MRClientProtocol instantiateHistoryProxy()
|
|||||||
if (StringUtils.isEmpty(serviceAddr)) {
|
if (StringUtils.isEmpty(serviceAddr)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
LOG.info("Connecting to HistoryServer at: " + serviceAddr);
|
LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
|
||||||
final YarnRPC rpc = YarnRPC.create(conf);
|
final YarnRPC rpc = YarnRPC.create(conf);
|
||||||
LOG.info("Connected to HistoryServer at: " + serviceAddr);
|
LOG.debug("Connected to HistoryServer at: " + serviceAddr);
|
||||||
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
|
||||||
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
|
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -143,7 +143,7 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
|
|||||||
|| YarnApplicationState.RUNNING == application
|
|| YarnApplicationState.RUNNING == application
|
||||||
.getYarnApplicationState()) {
|
.getYarnApplicationState()) {
|
||||||
if (application == null) {
|
if (application == null) {
|
||||||
LOG.info("Could not get Job info from RM for job " + jobId
|
LOG.debug("Could not get Job info from RM for job " + jobId
|
||||||
+ ". Redirecting to job history server.");
|
+ ". Redirecting to job history server.");
|
||||||
return checkAndGetHSProxy(null, JobState.NEW);
|
return checkAndGetHSProxy(null, JobState.NEW);
|
||||||
}
|
}
|
||||||
@ -169,8 +169,8 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
|
|||||||
+ ":" + addr.getPort()));
|
+ ":" + addr.getPort()));
|
||||||
UserGroupInformation.getCurrentUser().addToken(clientToken);
|
UserGroupInformation.getCurrentUser().addToken(clientToken);
|
||||||
}
|
}
|
||||||
LOG.info("Tracking Url of JOB is " + application.getTrackingUrl());
|
LOG.info("The url to track the job: " + application.getTrackingUrl());
|
||||||
LOG.info("Connecting to " + serviceAddr);
|
LOG.debug("Connecting to " + serviceAddr);
|
||||||
realProxy = instantiateAMProxy(serviceAddr);
|
realProxy = instantiateAMProxy(serviceAddr);
|
||||||
return realProxy;
|
return realProxy;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@ -187,7 +187,7 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
|
|||||||
}
|
}
|
||||||
application = rm.getApplicationReport(appId);
|
application = rm.getApplicationReport(appId);
|
||||||
if (application == null) {
|
if (application == null) {
|
||||||
LOG.info("Could not get Job info from RM for job " + jobId
|
LOG.debug("Could not get Job info from RM for job " + jobId
|
||||||
+ ". Redirecting to job history server.");
|
+ ". Redirecting to job history server.");
|
||||||
return checkAndGetHSProxy(null, JobState.RUNNING);
|
return checkAndGetHSProxy(null, JobState.RUNNING);
|
||||||
}
|
}
|
||||||
@ -281,16 +281,13 @@ private synchronized Object invoke(String method, Class argClass,
|
|||||||
LOG.debug("Tracing remote error ", e.getTargetException());
|
LOG.debug("Tracing remote error ", e.getTargetException());
|
||||||
throw (YarnRemoteException) e.getTargetException();
|
throw (YarnRemoteException) e.getTargetException();
|
||||||
}
|
}
|
||||||
LOG.info("Failed to contact AM/History for job " + jobId +
|
LOG.debug("Failed to contact AM/History for job " + jobId +
|
||||||
" retrying..");
|
" retrying..", e.getTargetException());
|
||||||
LOG.debug("Failed exception on AM/History contact",
|
|
||||||
e.getTargetException());
|
|
||||||
// Force reconnection by setting the proxy to null.
|
// Force reconnection by setting the proxy to null.
|
||||||
realProxy = null;
|
realProxy = null;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.info("Failed to contact AM/History for job " + jobId
|
LOG.debug("Failed to contact AM/History for job " + jobId
|
||||||
+ " Will retry..");
|
+ " Will retry..", e);
|
||||||
LOG.debug("Failing to contact application master", e);
|
|
||||||
// Force reconnection by setting the proxy to null.
|
// Force reconnection by setting the proxy to null.
|
||||||
realProxy = null;
|
realProxy = null;
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
@ -40,11 +39,9 @@
|
|||||||
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SecurityInfo;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
|
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
|
||||||
@ -56,6 +53,7 @@
|
|||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
|
||||||
|
import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||||
@ -67,13 +65,13 @@
|
|||||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||||
import org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo;
|
|
||||||
|
|
||||||
|
|
||||||
// TODO: This should be part of something like yarn-client.
|
// TODO: This should be part of something like yarn-client.
|
||||||
public class ResourceMgrDelegate {
|
public class ResourceMgrDelegate {
|
||||||
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
|
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
|
||||||
|
|
||||||
|
private final String rmAddress;
|
||||||
private YarnConfiguration conf;
|
private YarnConfiguration conf;
|
||||||
ClientRMProtocol applicationsManager;
|
ClientRMProtocol applicationsManager;
|
||||||
private ApplicationId applicationId;
|
private ApplicationId applicationId;
|
||||||
@ -92,21 +90,25 @@ public ResourceMgrDelegate(YarnConfiguration conf) {
|
|||||||
YarnConfiguration.DEFAULT_RM_ADDRESS),
|
YarnConfiguration.DEFAULT_RM_ADDRESS),
|
||||||
YarnConfiguration.DEFAULT_RM_PORT,
|
YarnConfiguration.DEFAULT_RM_PORT,
|
||||||
YarnConfiguration.RM_ADDRESS);
|
YarnConfiguration.RM_ADDRESS);
|
||||||
LOG.info("Connecting to ResourceManager at " + rmAddress);
|
this.rmAddress = rmAddress.toString();
|
||||||
|
LOG.debug("Connecting to ResourceManager at " + rmAddress);
|
||||||
applicationsManager =
|
applicationsManager =
|
||||||
(ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
|
(ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
|
||||||
rmAddress, this.conf);
|
rmAddress, this.conf);
|
||||||
LOG.info("Connected to ResourceManager at " + rmAddress);
|
LOG.debug("Connected to ResourceManager at " + rmAddress);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used for injecting applicationsManager, mostly for testing.
|
* Used for injecting applicationsManager, mostly for testing.
|
||||||
* @param conf the configuration object
|
* @param conf the configuration object
|
||||||
* @param applicationsManager the handle to talk the resource managers {@link ClientRMProtocol}.
|
* @param applicationsManager the handle to talk the resource managers
|
||||||
|
* {@link ClientRMProtocol}.
|
||||||
*/
|
*/
|
||||||
public ResourceMgrDelegate(YarnConfiguration conf, ClientRMProtocol applicationsManager) {
|
public ResourceMgrDelegate(YarnConfiguration conf,
|
||||||
|
ClientRMProtocol applicationsManager) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.applicationsManager = applicationsManager;
|
this.applicationsManager = applicationsManager;
|
||||||
|
this.rmAddress = applicationsManager.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
|
public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
|
||||||
@ -295,18 +297,22 @@ public long renewDelegationToken(Token<DelegationTokenIdentifier> arg0)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public ApplicationId submitApplication(ApplicationSubmissionContext appContext)
|
public ApplicationId submitApplication(
|
||||||
|
ApplicationSubmissionContext appContext)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
appContext.setApplicationId(applicationId);
|
appContext.setApplicationId(applicationId);
|
||||||
SubmitApplicationRequest request = recordFactory.newRecordInstance(SubmitApplicationRequest.class);
|
SubmitApplicationRequest request =
|
||||||
|
recordFactory.newRecordInstance(SubmitApplicationRequest.class);
|
||||||
request.setApplicationSubmissionContext(appContext);
|
request.setApplicationSubmissionContext(appContext);
|
||||||
applicationsManager.submitApplication(request);
|
applicationsManager.submitApplication(request);
|
||||||
LOG.info("Submitted application " + applicationId + " to ResourceManager");
|
LOG.info("Submitted application " + applicationId + " to ResourceManager" +
|
||||||
|
" at " + rmAddress);
|
||||||
return applicationId;
|
return applicationId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void killApplication(ApplicationId applicationId) throws IOException {
|
public void killApplication(ApplicationId applicationId) throws IOException {
|
||||||
KillApplicationRequest request = recordFactory.newRecordInstance(KillApplicationRequest.class);
|
KillApplicationRequest request =
|
||||||
|
recordFactory.newRecordInstance(KillApplicationRequest.class);
|
||||||
request.setApplicationId(applicationId);
|
request.setApplicationId(applicationId);
|
||||||
applicationsManager.forceKillApplication(request);
|
applicationsManager.forceKillApplication(request);
|
||||||
LOG.info("Killing application " + applicationId);
|
LOG.info("Killing application " + applicationId);
|
||||||
|
@ -276,7 +276,7 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
|
|||||||
Resource capability = recordFactory.newRecordInstance(Resource.class);
|
Resource capability = recordFactory.newRecordInstance(Resource.class);
|
||||||
capability.setMemory(conf.getInt(MRJobConfig.MR_AM_VMEM_MB,
|
capability.setMemory(conf.getInt(MRJobConfig.MR_AM_VMEM_MB,
|
||||||
MRJobConfig.DEFAULT_MR_AM_VMEM_MB));
|
MRJobConfig.DEFAULT_MR_AM_VMEM_MB));
|
||||||
LOG.info("AppMaster capability = " + capability);
|
LOG.debug("AppMaster capability = " + capability);
|
||||||
|
|
||||||
// Setup LocalResources
|
// Setup LocalResources
|
||||||
Map<String, LocalResource> localResources =
|
Map<String, LocalResource> localResources =
|
||||||
@ -352,7 +352,7 @@ public ApplicationSubmissionContext createApplicationSubmissionContext(
|
|||||||
}
|
}
|
||||||
vargsFinal.add(mergedCommand.toString());
|
vargsFinal.add(mergedCommand.toString());
|
||||||
|
|
||||||
LOG.info("Command to launch container for ApplicationMaster is : "
|
LOG.debug("Command to launch container for ApplicationMaster is : "
|
||||||
+ mergedCommand);
|
+ mergedCommand);
|
||||||
|
|
||||||
// Setup the CLASSPATH in environment
|
// Setup the CLASSPATH in environment
|
||||||
|
@ -87,8 +87,8 @@ fi
|
|||||||
|
|
||||||
# some variables
|
# some variables
|
||||||
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
|
export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
|
||||||
export YARN_ROOT_LOGGER="INFO,DRFA"
|
export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
|
||||||
export YARN_JHS_LOGGER="INFO,JSA"
|
export YARN_JHS_LOGGER=${YARN_JHS_LOGGER:-INFO,JSA}
|
||||||
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
|
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
|
||||||
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
|
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
|
||||||
|
|
||||||
|
@ -91,8 +91,8 @@ public FinishApplicationMasterResponse finishApplicationMaster(
|
|||||||
*
|
*
|
||||||
* <p>This also doubles up as a <em>heartbeat</em> to let the
|
* <p>This also doubles up as a <em>heartbeat</em> to let the
|
||||||
* <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
|
* <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
|
||||||
* is alive. Thus, applications should use periodically make this call to
|
* is alive. Thus, applications should periodically make this call to be kept
|
||||||
* be kept alive.</p>
|
* alive. The frequency depends on ??</p>
|
||||||
*
|
*
|
||||||
* <p>The <code>ResourceManager</code> responds with list of allocated
|
* <p>The <code>ResourceManager</code> responds with list of allocated
|
||||||
* {@link Container}, status of completed containers and headroom information
|
* {@link Container}, status of completed containers and headroom information
|
||||||
|
@ -68,7 +68,8 @@ public interface ClientRMProtocol {
|
|||||||
* {@link GetNewApplicationResponse}.</p>
|
* {@link GetNewApplicationResponse}.</p>
|
||||||
*
|
*
|
||||||
* @param request request to get a new <code>ApplicationId</code>
|
* @param request request to get a new <code>ApplicationId</code>
|
||||||
* @return new <code>ApplicationId</code> to be used to submit an application
|
* @return response containing the new <code>ApplicationId</code> to be used
|
||||||
|
* to submit an application
|
||||||
* @throws YarnRemoteException
|
* @throws YarnRemoteException
|
||||||
* @see #submitApplication(SubmitApplicationRequest)
|
* @see #submitApplication(SubmitApplicationRequest)
|
||||||
*/
|
*/
|
||||||
@ -216,7 +217,7 @@ public GetQueueInfoResponse getQueueInfo(
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>The interface used by clients to get information about <em>queue
|
* <p>The interface used by clients to get information about <em>queue
|
||||||
* acls</em> for <em>current users</em> from the <code>ResourceManager</code>.
|
* acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
* <p>The <code>ResourceManager</code> responds with queue acls for all
|
* <p>The <code>ResourceManager</code> responds with queue acls for all
|
||||||
|
@ -79,7 +79,7 @@ StartContainerResponse startContainer(StartContainerRequest request)
|
|||||||
* to <em>stop</em> a {@link Container} allocated to it using this interface.
|
* to <em>stop</em> a {@link Container} allocated to it using this interface.
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
* <p>The <code>ApplicationMaster</code></p> sends a
|
* <p>The <code>ApplicationMaster</code> sends a
|
||||||
* {@link StopContainerRequest} which includes the {@link ContainerId} of the
|
* {@link StopContainerRequest} which includes the {@link ContainerId} of the
|
||||||
* container to be stopped.</p>
|
* container to be stopped.</p>
|
||||||
*
|
*
|
||||||
@ -105,8 +105,8 @@ StopContainerResponse stopContainer(StopContainerRequest request)
|
|||||||
* current status of a <code>Container</code> from the
|
* current status of a <code>Container</code> from the
|
||||||
* <code>NodeManager</code>.</p>
|
* <code>NodeManager</code>.</p>
|
||||||
*
|
*
|
||||||
* <p>The <code>ApplicationMaster</code></p> sends a
|
* <p>The <code>ApplicationMaster</code> sends a
|
||||||
* {@link GetContainerStatusRequest} which includes the {@link ContainerId} of
|
* {@link GetContainerStatusRequest} which includes the {@link ContainerId} of
|
||||||
* the container whose status is needed.</p>
|
* the container whose status is needed.</p>
|
||||||
*
|
*
|
||||||
*<p>The <code>NodeManager</code> responds with
|
*<p>The <code>NodeManager</code> responds with
|
||||||
@ -115,7 +115,8 @@ StopContainerResponse stopContainer(StopContainerRequest request)
|
|||||||
*
|
*
|
||||||
* @param request request to get <code>ContainerStatus</code> of a container
|
* @param request request to get <code>ContainerStatus</code> of a container
|
||||||
* with the specified <code>ContainerId</code>
|
* with the specified <code>ContainerId</code>
|
||||||
* @return <code>ContainerStatus</code> of the container
|
* @return response containing the <code>ContainerStatus</code> of the
|
||||||
|
* container
|
||||||
* @throws YarnRemoteException
|
* @throws YarnRemoteException
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -50,7 +50,6 @@
|
|||||||
* <li>
|
* <li>
|
||||||
* A list of unused {@link Container} which are being returned.
|
* A list of unused {@link Container} which are being returned.
|
||||||
* </li>
|
* </li>
|
||||||
* <li></li>
|
|
||||||
* </ul>
|
* </ul>
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
@ -81,7 +80,7 @@ public interface AllocateRequest {
|
|||||||
void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
|
void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the <em>response id</em>.
|
* Get the <em>response id</em> used to track duplicate responses.
|
||||||
* @return <em>response id</em>
|
* @return <em>response id</em>
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -89,7 +88,7 @@ public interface AllocateRequest {
|
|||||||
int getResponseId();
|
int getResponseId();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the <em>response id</em>
|
* Set the <em>response id</em> used to track duplicate responses.
|
||||||
* @param id <em>response id</em>
|
* @param id <em>response id</em>
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -113,7 +112,7 @@ public interface AllocateRequest {
|
|||||||
void setProgress(float progress);
|
void setProgress(float progress);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the list of <code>ResourceRequest</code> to upate the
|
* Get the list of <code>ResourceRequest</code> to update the
|
||||||
* <code>ResourceManager</code> about the application's resource requirements.
|
* <code>ResourceManager</code> about the application's resource requirements.
|
||||||
* @return the list of <code>ResourceRequest</code>
|
* @return the list of <code>ResourceRequest</code>
|
||||||
*/
|
*/
|
||||||
@ -130,9 +129,9 @@ public interface AllocateRequest {
|
|||||||
int getAskCount();
|
int getAskCount();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add list of <code>ResourceRequest</code> to upate the
|
* Add list of <code>ResourceRequest</code> to update the
|
||||||
* <code>ResourceManager</code> about the application's resource requirements.
|
* <code>ResourceManager</code> about the application's resource requirements.
|
||||||
* @param resourceRequest list of <code>ResourceRequest</code> to upate the
|
* @param resourceRequest list of <code>ResourceRequest</code> to update the
|
||||||
* <code>ResourceManager</code> about the application's
|
* <code>ResourceManager</code> about the application's
|
||||||
* resource requirements
|
* resource requirements
|
||||||
*/
|
*/
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
* <ul>
|
* <ul>
|
||||||
* <li>Response ID to track duplicate responses.</li>
|
* <li>Response ID to track duplicate responses.</li>
|
||||||
* <li>
|
* <li>
|
||||||
* A reboot flag to let the <code>ApplicationMaster</code> that its
|
* A reboot flag to let the <code>ApplicationMaster</code> know that its
|
||||||
* horribly out of sync and needs to reboot.</li>
|
* horribly out of sync and needs to reboot.</li>
|
||||||
* <li>A list of newly allocated {@link Container}.</li>
|
* <li>A list of newly allocated {@link Container}.</li>
|
||||||
* <li>A list of completed {@link Container}.</li>
|
* <li>A list of completed {@link Container}.</li>
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
* <p>The request from clients to get a report of all Applications
|
* <p>The request from clients to get a report of all Applications
|
||||||
* in the cluster from the <code>ResourceManager</code>.</p>
|
* in the cluster from the <code>ResourceManager</code>.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ClientRMProtocol#getAllApplications(GetAllApplicationsRequest)
|
* @see ClientRMProtocol#getAllApplications(GetAllApplicationsRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
/**
|
/**
|
||||||
* <p>The request sent by clients to get cluster metrics from the
|
* <p>The request sent by clients to get cluster metrics from the
|
||||||
* <code>ResourceManager</code>.</p>
|
* <code>ResourceManager</code>.</p>
|
||||||
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
*
|
*
|
||||||
* @see ClientRMProtocol#getClusterMetrics(GetClusterMetricsRequest)
|
* @see ClientRMProtocol#getClusterMetrics(GetClusterMetricsRequest)
|
||||||
*/
|
*/
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
* <p>The request from clients to get a report of all nodes
|
* <p>The request from clients to get a report of all nodes
|
||||||
* in the cluster from the <code>ResourceManager</code>.</p>
|
* in the cluster from the <code>ResourceManager</code>.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ClientRMProtocol#getClusterNodes(GetClusterNodesRequest)
|
* @see ClientRMProtocol#getClusterNodes(GetClusterNodesRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>The response sent by the <code>ResourceManager</code> to a client
|
* <p>The response sent by the <code>ResourceManager</code> to a client
|
||||||
* requesting an {@link NodeReport} for all nodes.</p>
|
* requesting a {@link NodeReport} for all nodes.</p>
|
||||||
*
|
*
|
||||||
* <p>The <code>NodeReport</code> contains per-node information such as
|
* <p>The <code>NodeReport</code> contains per-node information such as
|
||||||
* available resources, number of containers, tracking url, rack name, health
|
* available resources, number of containers, tracking url, rack name, health
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>The response sent by the <code>NodeManager</code> to the
|
* <p>The response sent by the <code>NodeManager</code> to the
|
||||||
* <code>ApplicationMaster</code> when asked to obtainer <em>status</em>
|
* <code>ApplicationMaster</code> when asked to obtain the <em>status</em>
|
||||||
* of a container.</p>
|
* of a container.</p>
|
||||||
*
|
*
|
||||||
* @see ContainerManager#getContainerStatus(GetContainerStatusRequest)
|
* @see ContainerManager#getContainerStatus(GetContainerStatusRequest)
|
||||||
|
@ -27,6 +27,8 @@
|
|||||||
* <p>The request sent by clients to get a new {@link ApplicationId} for
|
* <p>The request sent by clients to get a new {@link ApplicationId} for
|
||||||
* submitting an application.</p>
|
* submitting an application.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>The response sent by the <code>ResourceManager</code> to the client for
|
* <p>The response sent by the <code>ResourceManager</code> to the client for
|
||||||
* a request to a new {@link ApplicationId} for submitting applications.</p>
|
* a request to get a new {@link ApplicationId} for submitting applications.</p>
|
||||||
*
|
*
|
||||||
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
* @see ClientRMProtocol#getNewApplication(GetNewApplicationRequest)
|
||||||
*/
|
*/
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
* <p>The request sent by clients to the <code>ResourceManager</code> to
|
* <p>The request sent by clients to the <code>ResourceManager</code> to
|
||||||
* get queue acls for the <em>current user</em>.</p>
|
* get queue acls for the <em>current user</em>.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ClientRMProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)
|
* @see ClientRMProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -35,11 +35,53 @@
|
|||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
public interface StartContainerResponse {
|
public interface StartContainerResponse {
|
||||||
|
/**
|
||||||
|
* <p>Get the responses from all auxiliary services running on the
|
||||||
|
* <code>NodeManager</code>.</p>
|
||||||
|
* <p>The responses are returned as a Map between the auxiliary service names
|
||||||
|
* and their corresponding opaque blob <code>ByteBuffer</code>s</p>
|
||||||
|
* @return a Map between the auxiliary service names and their outputs
|
||||||
|
*/
|
||||||
Map<String, ByteBuffer> getAllServiceResponse();
|
Map<String, ByteBuffer> getAllServiceResponse();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the response from a single auxiliary service running on the
|
||||||
|
* <code>NodeManager</code>
|
||||||
|
*
|
||||||
|
* @param key The auxiliary service name whose response is desired.
|
||||||
|
* @return The opaque blob <code>ByteBuffer</code> returned by the auxiliary
|
||||||
|
* service.
|
||||||
|
*/
|
||||||
ByteBuffer getServiceResponse(String key);
|
ByteBuffer getServiceResponse(String key);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add to the list of auxiliary services which have been started on the
|
||||||
|
* <code>NodeManager</code>. This is done only once when the
|
||||||
|
* <code>NodeManager</code> starts up
|
||||||
|
* @param serviceResponse A map from auxiliary service names to the opaque
|
||||||
|
* blob <code>ByteBuffer</code>s for that auxiliary service
|
||||||
|
*/
|
||||||
void addAllServiceResponse(Map<String, ByteBuffer> serviceResponse);
|
void addAllServiceResponse(Map<String, ByteBuffer> serviceResponse);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add to the list of auxiliary services which have been started on the
|
||||||
|
* <code>NodeManager</code>. This is done only once when the
|
||||||
|
* <code>NodeManager</code> starts up
|
||||||
|
*
|
||||||
|
* @param key The auxiliary service name
|
||||||
|
* @param value The opaque blob <code>ByteBuffer</code> managed by the
|
||||||
|
* auxiliary service
|
||||||
|
*/
|
||||||
void setServiceResponse(String key, ByteBuffer value);
|
void setServiceResponse(String key, ByteBuffer value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove a single auxiliary service from the StartContainerResponse object
|
||||||
|
* @param key The auxiliary service to remove
|
||||||
|
*/
|
||||||
void removeServiceResponse(String key);
|
void removeServiceResponse(String key);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove all the auxiliary services from the StartContainerResponse object
|
||||||
|
*/
|
||||||
void clearServiceResponse();
|
void clearServiceResponse();
|
||||||
}
|
}
|
||||||
|
@ -27,6 +27,8 @@
|
|||||||
* <code>ApplicationMaster</code> when asked to <em>stop</em> an
|
* <code>ApplicationMaster</code> when asked to <em>stop</em> an
|
||||||
* allocated container.</p>
|
* allocated container.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ContainerManager#stopContainer(StopContainerRequest)
|
* @see ContainerManager#stopContainer(StopContainerRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
* <p>The response sent by the <code>ResourceManager</code> to a client on
|
* <p>The response sent by the <code>ResourceManager</code> to a client on
|
||||||
* application submission.</p>
|
* application submission.</p>
|
||||||
*
|
*
|
||||||
|
* <p>Currently, this is empty.</p>
|
||||||
|
*
|
||||||
* @see ClientRMProtocol#submitApplication(SubmitApplicationRequest)
|
* @see ClientRMProtocol#submitApplication(SubmitApplicationRequest)
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
* <ul>
|
* <ul>
|
||||||
* <li>Response ID to track duplicate responses.</li>
|
* <li>Response ID to track duplicate responses.</li>
|
||||||
* <li>
|
* <li>
|
||||||
* A reboot flag to let the <code>ApplicationMaster</code> that its
|
* A reboot flag to let the <code>ApplicationMaster</code> know that its
|
||||||
* horribly out of sync and needs to reboot.</li>
|
* horribly out of sync and needs to reboot.</li>
|
||||||
* <li>A list of newly allocated {@link Container}.</li>
|
* <li>A list of newly allocated {@link Container}.</li>
|
||||||
* <li>A list of completed {@link Container}.</li>
|
* <li>A list of completed {@link Container}.</li>
|
||||||
@ -100,7 +100,7 @@ public interface AMResponse {
|
|||||||
/**
|
/**
|
||||||
* Get the <em>available headroom</em> for resources in the cluster for the
|
* Get the <em>available headroom</em> for resources in the cluster for the
|
||||||
* application.
|
* application.
|
||||||
* @return limit available headroom for resources in the cluster for the
|
* @return limit of available headroom for resources in the cluster for the
|
||||||
* application
|
* application
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
* <li>Applications user.</li>
|
* <li>Applications user.</li>
|
||||||
* <li>Application queue.</li>
|
* <li>Application queue.</li>
|
||||||
* <li>Application name.</li>
|
* <li>Application name.</li>
|
||||||
* <li>Host on which the <code>ApplicationMaster</code>is running.</li>
|
* <li>Host on which the <code>ApplicationMaster</code> is running.</li>
|
||||||
* <li>RPC port of the <code>ApplicationMaster</code>.</li>
|
* <li>RPC port of the <code>ApplicationMaster</code>.</li>
|
||||||
* <li>Tracking URL.</li>
|
* <li>Tracking URL.</li>
|
||||||
* <li>{@link YarnApplicationState} of the application.</li>
|
* <li>{@link YarnApplicationState} of the application.</li>
|
||||||
@ -215,6 +215,7 @@ public interface ApplicationReport {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the <em>final finish status</em> of the application.
|
* Get the <em>final finish status</em> of the application.
|
||||||
|
* @return <em>final finish status</em> of the application
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
|
@ -33,22 +33,43 @@
|
|||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
public interface ApplicationResourceUsageReport {
|
public interface ApplicationResourceUsageReport {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the number of used containers
|
||||||
|
* @return the number of used containers
|
||||||
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
int getNumUsedContainers();
|
int getNumUsedContainers();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the number of used containers
|
||||||
|
* @param num_containers the number of used containers
|
||||||
|
*/
|
||||||
@Private
|
@Private
|
||||||
@Unstable
|
@Unstable
|
||||||
void setNumUsedContainers(int num_containers);
|
void setNumUsedContainers(int num_containers);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the number of reserved containers
|
||||||
|
* @return the number of reserved containers
|
||||||
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
int getNumReservedContainers();
|
int getNumReservedContainers();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the number of reserved containers
|
||||||
|
* @param num_reserved_containers the number of reserved containers
|
||||||
|
*/
|
||||||
@Private
|
@Private
|
||||||
@Unstable
|
@Unstable
|
||||||
void setNumReservedContainers(int num_reserved_containers);
|
void setNumReservedContainers(int num_reserved_containers);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the used <code>Resource</code>
|
||||||
|
* @return the used <code>Resource</code>
|
||||||
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
Resource getUsedResources();
|
Resource getUsedResources();
|
||||||
@ -57,6 +78,10 @@ public interface ApplicationResourceUsageReport {
|
|||||||
@Unstable
|
@Unstable
|
||||||
void setUsedResources(Resource resources);
|
void setUsedResources(Resource resources);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the reserved <code>Resource</code>
|
||||||
|
* @return the reserved <code>Resource</code>
|
||||||
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
Resource getReservedResources();
|
Resource getReservedResources();
|
||||||
@ -65,6 +90,10 @@ public interface ApplicationResourceUsageReport {
|
|||||||
@Unstable
|
@Unstable
|
||||||
void setReservedResources(Resource reserved_resources);
|
void setReservedResources(Resource reserved_resources);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the needed <code>Resource</code>
|
||||||
|
* @return the needed <code>Resource</code>
|
||||||
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
Resource getNeededResources();
|
Resource getNeededResources();
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p><code>ApplicationSubmissionContext</code> represents the all of the
|
* <p><code>ApplicationSubmissionContext</code> represents all of the
|
||||||
* information needed by the <code>ResourceManager</code> to launch
|
* information needed by the <code>ResourceManager</code> to launch
|
||||||
* the <code>ApplicationMaster</code> for an application.</p>
|
* the <code>ApplicationMaster</code> for an application.</p>
|
||||||
*
|
*
|
||||||
|
@ -38,8 +38,7 @@
|
|||||||
* <ul>
|
* <ul>
|
||||||
* <li>{@link ContainerId} for the container, which is globally unique.</li>
|
* <li>{@link ContainerId} for the container, which is globally unique.</li>
|
||||||
* <li>
|
* <li>
|
||||||
* {@link NodeId} of the node on which identifies the node on which it
|
* {@link NodeId} of the node on which it is allocated.
|
||||||
* is allocated.
|
|
||||||
* </li>
|
* </li>
|
||||||
* <li>HTTP uri of the node.</li>
|
* <li>HTTP uri of the node.</li>
|
||||||
* <li>{@link Resource} allocated to the container.</li>
|
* <li>{@link Resource} allocated to the container.</li>
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
import org.apache.hadoop.yarn.api.ContainerManager;
|
import org.apache.hadoop.yarn.api.ContainerManager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p><code>ContainerLaunchContext</code> represents the all of the information
|
* <p><code>ContainerLaunchContext</code> represents all of the information
|
||||||
* needed by the <code>NodeManager</code> to launch a container.</p>
|
* needed by the <code>NodeManager</code> to launch a container.</p>
|
||||||
*
|
*
|
||||||
* <p>It includes details such as:
|
* <p>It includes details such as:
|
||||||
@ -127,7 +127,8 @@ public interface ContainerLaunchContext {
|
|||||||
Map<String, LocalResource> getLocalResources();
|
Map<String, LocalResource> getLocalResources();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set <code>LocalResource</code> required by the container.
|
* Set <code>LocalResource</code> required by the container. All pre-existing
|
||||||
|
* Map entries are cleared before adding the new Map
|
||||||
* @param localResources <code>LocalResource</code> required by the container
|
* @param localResources <code>LocalResource</code> required by the container
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -143,7 +144,8 @@ public interface ContainerLaunchContext {
|
|||||||
Map<String, ByteBuffer> getServiceData();
|
Map<String, ByteBuffer> getServiceData();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set application-specific binary <em>service data</em>.
|
* Set application-specific binary <em>service data</em>. All pre-existing Map
|
||||||
|
* entries are preserved.
|
||||||
* @param serviceData application-specific binary <em>service data</em>
|
* @param serviceData application-specific binary <em>service data</em>
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -159,7 +161,8 @@ public interface ContainerLaunchContext {
|
|||||||
Map<String, String> getEnvironment();
|
Map<String, String> getEnvironment();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add <em>environment variables</em> for the container.
|
* Add <em>environment variables</em> for the container. All pre-existing Map
|
||||||
|
* entries are cleared before adding the new Map
|
||||||
* @param environment <em>environment variables</em> for the container
|
* @param environment <em>environment variables</em> for the container
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -175,7 +178,8 @@ public interface ContainerLaunchContext {
|
|||||||
List<String> getCommands();
|
List<String> getCommands();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add the list of <em>commands</em> for launching the container.
|
* Add the list of <em>commands</em> for launching the container. All
|
||||||
|
* pre-existing List entries are cleared before adding the new List
|
||||||
* @param commands the list of <em>commands</em> for launching the container
|
* @param commands the list of <em>commands</em> for launching the container
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@ -191,8 +195,9 @@ public interface ContainerLaunchContext {
|
|||||||
public Map<ApplicationAccessType, String> getApplicationACLs();
|
public Map<ApplicationAccessType, String> getApplicationACLs();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the <code>ApplicationACL</code>s for the application.
|
* Set the <code>ApplicationACL</code>s for the application. All pre-existing
|
||||||
* @param acls
|
* Map entries are cleared before adding the new Map
|
||||||
|
* @param acls <code>ApplicationACL</code>s for the application
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
|
@ -35,8 +35,6 @@
|
|||||||
* </li>
|
* </li>
|
||||||
* <li>The previous time at which the health status was reported.</li>
|
* <li>The previous time at which the health status was reported.</li>
|
||||||
* <li>A diagnostic report on the health status.</li>
|
* <li>A diagnostic report on the health status.</li>
|
||||||
* <li></li>
|
|
||||||
* <li></li>
|
|
||||||
* </ul>
|
* </ul>
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
|
@ -18,10 +18,23 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.yarn.api.records;
|
package org.apache.hadoop.yarn.api.records;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The priority assigned to a ResourceRequest or Application or Container
|
||||||
|
* allocation
|
||||||
|
*
|
||||||
|
*/
|
||||||
public interface Priority extends Comparable<Priority> {
|
public interface Priority extends Comparable<Priority> {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the assigned priority
|
||||||
|
* @return the assigned priority
|
||||||
|
*/
|
||||||
public abstract int getPriority();
|
public abstract int getPriority();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the assigned priority
|
||||||
|
* @param priority the assigned priority
|
||||||
|
*/
|
||||||
public abstract void setPriority(int priority);
|
public abstract void setPriority(int priority);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>QueueInfo</p> is a report of the runtime information of the queue.</p>
|
* <p>QueueInfo is a report of the runtime information of the queue.</p>
|
||||||
*
|
*
|
||||||
* <p>It includes information such as:
|
* <p>It includes information such as:
|
||||||
* <ul>
|
* <ul>
|
||||||
|
@ -93,7 +93,7 @@
|
|||||||
* to inform the <code>ResourceManager</code> that it is up and alive. The {@link AMRMProtocol#allocate} to the
|
* to inform the <code>ResourceManager</code> that it is up and alive. The {@link AMRMProtocol#allocate} to the
|
||||||
* <code>ResourceManager</code> from the <code>ApplicationMaster</code> acts as a heartbeat.
|
* <code>ResourceManager</code> from the <code>ApplicationMaster</code> acts as a heartbeat.
|
||||||
*
|
*
|
||||||
* <p> For the actual handling of the job, the <code>ApplicationMaster</code> has to request for the
|
* <p> For the actual handling of the job, the <code>ApplicationMaster</code> has to request the
|
||||||
* <code>ResourceManager</code> via {@link AllocateRequest} for the required no. of containers using {@link ResourceRequest}
|
* <code>ResourceManager</code> via {@link AllocateRequest} for the required no. of containers using {@link ResourceRequest}
|
||||||
* with the necessary resource specifications such as node location, computational (memory/disk/cpu) resource requirements.
|
* with the necessary resource specifications such as node location, computational (memory/disk/cpu) resource requirements.
|
||||||
* The <code>ResourceManager</code> responds with an {@link AllocateResponse} that informs the <code>ApplicationMaster</code>
|
* The <code>ResourceManager</code> responds with an {@link AllocateResponse} that informs the <code>ApplicationMaster</code>
|
||||||
|
@ -37,12 +37,12 @@
|
|||||||
*/
|
*/
|
||||||
public class HadoopYarnProtoRPC extends YarnRPC {
|
public class HadoopYarnProtoRPC extends YarnRPC {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(HadoopYarnRPC.class);
|
private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Object getProxy(Class protocol, InetSocketAddress addr,
|
public Object getProxy(Class protocol, InetSocketAddress addr,
|
||||||
Configuration conf) {
|
Configuration conf) {
|
||||||
LOG.info("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
|
LOG.debug("Creating a HadoopYarnProtoRpc proxy for protocol " + protocol);
|
||||||
return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
|
return RpcFactoryProvider.getClientFactory(conf).getClient(protocol, 1,
|
||||||
addr, conf);
|
addr, conf);
|
||||||
}
|
}
|
||||||
@ -57,11 +57,11 @@ public Server getServer(Class protocol, Object instance,
|
|||||||
InetSocketAddress addr, Configuration conf,
|
InetSocketAddress addr, Configuration conf,
|
||||||
SecretManager<? extends TokenIdentifier> secretManager,
|
SecretManager<? extends TokenIdentifier> secretManager,
|
||||||
int numHandlers) {
|
int numHandlers) {
|
||||||
LOG.info("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
|
LOG.debug("Creating a HadoopYarnProtoRpc server for protocol " + protocol +
|
||||||
" with " + numHandlers + " handlers");
|
" with " + numHandlers + " handlers");
|
||||||
|
|
||||||
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol, instance,
|
return RpcFactoryProvider.getServerFactory(conf).getServer(protocol,
|
||||||
addr, conf, secretManager, numHandlers);
|
instance, addr, conf, secretManager, numHandlers);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ public class HadoopYarnRPC extends YarnRPC {
|
|||||||
@Override
|
@Override
|
||||||
public Object getProxy(Class protocol, InetSocketAddress addr,
|
public Object getProxy(Class protocol, InetSocketAddress addr,
|
||||||
Configuration conf) {
|
Configuration conf) {
|
||||||
LOG.info("Creating a HadoopYarnRpc proxy for protocol " + protocol);
|
LOG.debug("Creating a HadoopYarnRpc proxy for protocol " + protocol);
|
||||||
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
||||||
try {
|
try {
|
||||||
return RPC.getProxy(protocol, 1, addr, conf);
|
return RPC.getProxy(protocol, 1, addr, conf);
|
||||||
@ -64,7 +64,7 @@ public Server getServer(Class protocol, Object instance,
|
|||||||
InetSocketAddress addr, Configuration conf,
|
InetSocketAddress addr, Configuration conf,
|
||||||
SecretManager<? extends TokenIdentifier> secretManager,
|
SecretManager<? extends TokenIdentifier> secretManager,
|
||||||
int numHandlers) {
|
int numHandlers) {
|
||||||
LOG.info("Creating a HadoopYarnRpc server for protocol " + protocol +
|
LOG.debug("Creating a HadoopYarnRpc server for protocol " + protocol +
|
||||||
" with " + numHandlers + " handlers");
|
" with " + numHandlers + " handlers");
|
||||||
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
RPC.setProtocolEngine(conf, protocol, AvroSpecificRpcEngine.class);
|
||||||
final RPC.Server hadoopServer;
|
final RPC.Server hadoopServer;
|
||||||
|
@ -46,7 +46,8 @@ public abstract Server getServer(Class protocol, Object instance,
|
|||||||
int numHandlers);
|
int numHandlers);
|
||||||
|
|
||||||
public static YarnRPC create(Configuration conf) {
|
public static YarnRPC create(Configuration conf) {
|
||||||
LOG.info("Creating YarnRPC for " + conf.get(YarnConfiguration.IPC_RPC_IMPL));
|
LOG.debug("Creating YarnRPC for " +
|
||||||
|
conf.get(YarnConfiguration.IPC_RPC_IMPL));
|
||||||
String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
|
String clazzName = conf.get(YarnConfiguration.IPC_RPC_IMPL);
|
||||||
if (clazzName == null) {
|
if (clazzName == null) {
|
||||||
clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL;
|
clazzName = YarnConfiguration.DEFAULT_IPC_RPC_IMPL;
|
||||||
|
@ -39,9 +39,9 @@ public Token<ApplicationTokenIdentifier> selectToken(Text service,
|
|||||||
if (service == null) {
|
if (service == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
LOG.info("Looking for a token with service " + service.toString());
|
LOG.debug("Looking for a token with service " + service.toString());
|
||||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||||
LOG.info("Token kind is " + token.getKind().toString()
|
LOG.debug("Token kind is " + token.getKind().toString()
|
||||||
+ " and the token's service name is " + token.getService());
|
+ " and the token's service name is " + token.getService());
|
||||||
if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind())
|
if (ApplicationTokenIdentifier.KIND_NAME.equals(token.getKind())
|
||||||
&& service.equals(token.getService())) {
|
&& service.equals(token.getService())) {
|
||||||
|
@ -39,9 +39,9 @@ public Token<ClientTokenIdentifier> selectToken(Text service,
|
|||||||
if (service == null) {
|
if (service == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
LOG.info("Looking for a token with service " + service.toString());
|
LOG.debug("Looking for a token with service " + service.toString());
|
||||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||||
LOG.info("Token kind is " + token.getKind().toString()
|
LOG.debug("Token kind is " + token.getKind().toString()
|
||||||
+ " and the token's service name is " + token.getService());
|
+ " and the token's service name is " + token.getService());
|
||||||
if (ClientTokenIdentifier.KIND_NAME.equals(token.getKind())
|
if (ClientTokenIdentifier.KIND_NAME.equals(token.getKind())
|
||||||
&& service.equals(token.getService())) {
|
&& service.equals(token.getService())) {
|
||||||
|
@ -79,11 +79,11 @@ public enum Render {
|
|||||||
@Override
|
@Override
|
||||||
protected void render(Block html) {
|
protected void render(Block html) {
|
||||||
html.
|
html.
|
||||||
link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/themes/",
|
link(join("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/themes/",
|
||||||
getTheme(), "/jquery-ui.css")).
|
getTheme(), "/jquery-ui.css")).
|
||||||
link(root_url("static/dt-1.7.5/css/jui-dt.css")).
|
link(root_url("static/dt-1.7.5/css/jui-dt.css")).
|
||||||
script("https://ajax.googleapis.com/ajax/libs/jquery/1.4.4/jquery.min.js").
|
script("https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js").
|
||||||
script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.9/jquery-ui.min.js").
|
script("https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.16/jquery-ui.min.js").
|
||||||
script(root_url("static/dt-1.7.5/js/jquery.dataTables.min.js")).
|
script(root_url("static/dt-1.7.5/js/jquery.dataTables.min.js")).
|
||||||
script(root_url("static/yarn.dt.plugins.js")).
|
script(root_url("static/yarn.dt.plugins.js")).
|
||||||
script(root_url("static/themeswitcher.js")).
|
script(root_url("static/themeswitcher.js")).
|
||||||
@ -224,7 +224,7 @@ public static String initSelector(String name) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static StringBuilder tableInit() {
|
public static StringBuilder tableInit() {
|
||||||
return new StringBuilder("{bJQueryUI:true, aaSorting:[], ").
|
return new StringBuilder("{bJQueryUI:true, ").
|
||||||
append("sPaginationType: 'full_numbers', iDisplayLength:20, ").
|
append("sPaginationType: 'full_numbers', iDisplayLength:20, ").
|
||||||
append("aLengthMenu:[20, 40, 60, 80, 100]");
|
append("aLengthMenu:[20, 40, 60, 80, 100]");
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ Collection<AuxiliaryService> getServices() {
|
|||||||
/**
|
/**
|
||||||
* @return the meta data for all registered services, that have been started.
|
* @return the meta data for all registered services, that have been started.
|
||||||
* If a service has not been started no metadata will be available. The key
|
* If a service has not been started no metadata will be available. The key
|
||||||
* the the name of the service as defined in the configuration.
|
* is the name of the service as defined in the configuration.
|
||||||
*/
|
*/
|
||||||
public Map<String, ByteBuffer> getMeta() {
|
public Map<String, ByteBuffer> getMeta() {
|
||||||
Map<String, ByteBuffer> metaClone = new HashMap<String, ByteBuffer>(
|
Map<String, ByteBuffer> metaClone = new HashMap<String, ByteBuffer>(
|
||||||
|
@ -51,6 +51,8 @@ public class AllApplicationsPage extends NMView {
|
|||||||
|
|
||||||
private String appsTableInit() {
|
private String appsTableInit() {
|
||||||
return tableInit().
|
return tableInit().
|
||||||
|
// Sort by id upon page load
|
||||||
|
append(", aaSorting: [[0, 'asc']]").
|
||||||
// applicationid, applicationstate
|
// applicationid, applicationstate
|
||||||
append(", aoColumns:[null, null]} ").toString();
|
append(", aoColumns:[null, null]} ").toString();
|
||||||
}
|
}
|
||||||
|
@ -53,6 +53,14 @@
|
|||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerReservedEvent;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
|
|
||||||
|
import com.google.common.collect.HashMultiset;
|
||||||
|
import com.google.common.collect.Multiset;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an Application from the viewpoint of the scheduler.
|
||||||
|
* Each running Application in the RM corresponds to one instance
|
||||||
|
* of this class.
|
||||||
|
*/
|
||||||
public class SchedulerApp {
|
public class SchedulerApp {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(SchedulerApp.class);
|
private static final Log LOG = LogFactory.getLog(SchedulerApp.class);
|
||||||
@ -76,11 +84,16 @@ public class SchedulerApp {
|
|||||||
final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
|
final Map<Priority, Map<NodeId, RMContainer>> reservedContainers =
|
||||||
new HashMap<Priority, Map<NodeId, RMContainer>>();
|
new HashMap<Priority, Map<NodeId, RMContainer>>();
|
||||||
|
|
||||||
Map<Priority, Integer> schedulingOpportunities =
|
/**
|
||||||
new HashMap<Priority, Integer>();
|
* Count how many times the application has been given an opportunity
|
||||||
|
* to schedule a task at each priority. Each time the scheduler
|
||||||
|
* asks the application for a task at this priority, it is incremented,
|
||||||
|
* and each time the application successfully schedules a task, it
|
||||||
|
* is reset to 0.
|
||||||
|
*/
|
||||||
|
Multiset<Priority> schedulingOpportunities = HashMultiset.create();
|
||||||
|
|
||||||
Map<Priority, Integer> reReservations =
|
Multiset<Priority> reReservations = HashMultiset.create();
|
||||||
new HashMap<Priority, Integer>();
|
|
||||||
|
|
||||||
Resource currentReservation = recordFactory
|
Resource currentReservation = recordFactory
|
||||||
.newRecordInstance(Resource.class);
|
.newRecordInstance(Resource.class);
|
||||||
@ -282,49 +295,33 @@ public synchronized RMContainer getRMContainer(ContainerId id) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
synchronized public void resetSchedulingOpportunities(Priority priority) {
|
synchronized public void resetSchedulingOpportunities(Priority priority) {
|
||||||
this.schedulingOpportunities.put(priority, Integer.valueOf(0));
|
this.schedulingOpportunities.setCount(priority, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized public void addSchedulingOpportunity(Priority priority) {
|
synchronized public void addSchedulingOpportunity(Priority priority) {
|
||||||
Integer schedulingOpportunities =
|
this.schedulingOpportunities.setCount(priority,
|
||||||
this.schedulingOpportunities.get(priority);
|
schedulingOpportunities.count(priority) + 1);
|
||||||
if (schedulingOpportunities == null) {
|
|
||||||
schedulingOpportunities = 0;
|
|
||||||
}
|
|
||||||
++schedulingOpportunities;
|
|
||||||
this.schedulingOpportunities.put(priority, schedulingOpportunities);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the number of times the application has been given an opportunity
|
||||||
|
* to schedule a task at the given priority since the last time it
|
||||||
|
* successfully did so.
|
||||||
|
*/
|
||||||
synchronized public int getSchedulingOpportunities(Priority priority) {
|
synchronized public int getSchedulingOpportunities(Priority priority) {
|
||||||
Integer schedulingOpportunities =
|
return this.schedulingOpportunities.count(priority);
|
||||||
this.schedulingOpportunities.get(priority);
|
|
||||||
if (schedulingOpportunities == null) {
|
|
||||||
schedulingOpportunities = 0;
|
|
||||||
this.schedulingOpportunities.put(priority, schedulingOpportunities);
|
|
||||||
}
|
|
||||||
return schedulingOpportunities;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void resetReReservations(Priority priority) {
|
synchronized void resetReReservations(Priority priority) {
|
||||||
this.reReservations.put(priority, Integer.valueOf(0));
|
this.reReservations.setCount(priority, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void addReReservation(Priority priority) {
|
synchronized void addReReservation(Priority priority) {
|
||||||
Integer reReservations = this.reReservations.get(priority);
|
this.reReservations.add(priority);
|
||||||
if (reReservations == null) {
|
|
||||||
reReservations = 0;
|
|
||||||
}
|
|
||||||
++reReservations;
|
|
||||||
this.reReservations.put(priority, reReservations);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized public int getReReservations(Priority priority) {
|
synchronized public int getReReservations(Priority priority) {
|
||||||
Integer reReservations = this.reReservations.get(priority);
|
return this.reReservations.count(priority);
|
||||||
if (reReservations == null) {
|
|
||||||
reReservations = 0;
|
|
||||||
this.reReservations.put(priority, reReservations);
|
|
||||||
}
|
|
||||||
return reReservations;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized int getNumReservedContainers(Priority priority) {
|
public synchronized int getNumReservedContainers(Priority priority) {
|
||||||
|
@ -0,0 +1,34 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||||
|
|
||||||
|
class CSQueueUtils {
|
||||||
|
|
||||||
|
public static void checkMaxCapacity(String queueName,
|
||||||
|
float capacity, float maximumCapacity) {
|
||||||
|
if (maximumCapacity != CapacitySchedulerConfiguration.UNDEFINED &&
|
||||||
|
maximumCapacity < capacity) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"Illegal call to setMaxCapacity. " +
|
||||||
|
"Queue '" + queueName + "' has " +
|
||||||
|
"capacity (" + capacity + ") greater than " +
|
||||||
|
"maximumCapacity (" + maximumCapacity + ")" );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -160,6 +160,10 @@ public int getMaximumCapacity(String queue) {
|
|||||||
return maxCapacity;
|
return maxCapacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setMaximumCapacity(String queue, int maxCapacity) {
|
||||||
|
setInt(getQueuePrefix(queue) + MAXIMUM_CAPACITY, maxCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
public int getUserLimit(String queue) {
|
public int getUserLimit(String queue) {
|
||||||
int userLimit =
|
int userLimit =
|
||||||
getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
|
getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT);
|
||||||
|
@ -211,16 +211,19 @@ private int computeMaxActiveApplicationsPerUser(int maxActiveApplications,
|
|||||||
|
|
||||||
private synchronized void setupQueueConfigs(
|
private synchronized void setupQueueConfigs(
|
||||||
float capacity, float absoluteCapacity,
|
float capacity, float absoluteCapacity,
|
||||||
float maxCapacity, float absoluteMaxCapacity,
|
float maximumCapacity, float absoluteMaxCapacity,
|
||||||
int userLimit, float userLimitFactor,
|
int userLimit, float userLimitFactor,
|
||||||
int maxApplications, int maxApplicationsPerUser,
|
int maxApplications, int maxApplicationsPerUser,
|
||||||
int maxActiveApplications, int maxActiveApplicationsPerUser,
|
int maxActiveApplications, int maxActiveApplicationsPerUser,
|
||||||
QueueState state, Map<QueueACL, AccessControlList> acls)
|
QueueState state, Map<QueueACL, AccessControlList> acls)
|
||||||
{
|
{
|
||||||
|
// Sanity check
|
||||||
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
|
this.absoluteCapacity = parent.getAbsoluteCapacity() * capacity;
|
||||||
|
|
||||||
this.maximumCapacity = maxCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
this.absoluteMaxCapacity = absoluteMaxCapacity;
|
||||||
|
|
||||||
this.userLimit = userLimit;
|
this.userLimit = userLimit;
|
||||||
@ -236,9 +239,9 @@ private synchronized void setupQueueConfigs(
|
|||||||
|
|
||||||
this.acls = acls;
|
this.acls = acls;
|
||||||
|
|
||||||
this.queueInfo.setCapacity(capacity);
|
this.queueInfo.setCapacity(this.capacity);
|
||||||
this.queueInfo.setMaximumCapacity(maximumCapacity);
|
this.queueInfo.setMaximumCapacity(this.maximumCapacity);
|
||||||
this.queueInfo.setQueueState(state);
|
this.queueInfo.setQueueState(this.state);
|
||||||
|
|
||||||
StringBuilder aclsString = new StringBuilder();
|
StringBuilder aclsString = new StringBuilder();
|
||||||
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
||||||
@ -250,7 +253,7 @@ private synchronized void setupQueueConfigs(
|
|||||||
" [= (float) configuredCapacity / 100 ]" + "\n" +
|
" [= (float) configuredCapacity / 100 ]" + "\n" +
|
||||||
"asboluteCapacity = " + absoluteCapacity +
|
"asboluteCapacity = " + absoluteCapacity +
|
||||||
" [= parentAbsoluteCapacity * capacity ]" + "\n" +
|
" [= parentAbsoluteCapacity * capacity ]" + "\n" +
|
||||||
"maxCapacity = " + maxCapacity +
|
"maxCapacity = " + maximumCapacity +
|
||||||
" [= configuredMaxCapacity ]" + "\n" +
|
" [= configuredMaxCapacity ]" + "\n" +
|
||||||
"absoluteMaxCapacity = " + absoluteMaxCapacity +
|
"absoluteMaxCapacity = " + absoluteMaxCapacity +
|
||||||
" [= Float.MAX_VALUE if maximumCapacity undefined, " +
|
" [= Float.MAX_VALUE if maximumCapacity undefined, " +
|
||||||
@ -394,6 +397,9 @@ synchronized void setUsedCapacity(float usedCapacity) {
|
|||||||
* @param maximumCapacity new max capacity
|
* @param maximumCapacity new max capacity
|
||||||
*/
|
*/
|
||||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||||
|
// Sanity check
|
||||||
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
this.absoluteMaxCapacity =
|
this.absoluteMaxCapacity =
|
||||||
(maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
|
(maximumCapacity == CapacitySchedulerConfiguration.UNDEFINED) ?
|
||||||
|
@ -153,6 +153,9 @@ private synchronized void setupQueueConfigs(
|
|||||||
float maximumCapacity, float absoluteMaxCapacity,
|
float maximumCapacity, float absoluteMaxCapacity,
|
||||||
QueueState state, Map<QueueACL, AccessControlList> acls
|
QueueState state, Map<QueueACL, AccessControlList> acls
|
||||||
) {
|
) {
|
||||||
|
// Sanity check
|
||||||
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
|
||||||
this.capacity = capacity;
|
this.capacity = capacity;
|
||||||
this.absoluteCapacity = absoluteCapacity;
|
this.absoluteCapacity = absoluteCapacity;
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
@ -162,9 +165,9 @@ private synchronized void setupQueueConfigs(
|
|||||||
|
|
||||||
this.acls = acls;
|
this.acls = acls;
|
||||||
|
|
||||||
this.queueInfo.setCapacity(capacity);
|
this.queueInfo.setCapacity(this.capacity);
|
||||||
this.queueInfo.setMaximumCapacity(maximumCapacity);
|
this.queueInfo.setMaximumCapacity(this.maximumCapacity);
|
||||||
this.queueInfo.setQueueState(state);
|
this.queueInfo.setQueueState(this.state);
|
||||||
|
|
||||||
StringBuilder aclsString = new StringBuilder();
|
StringBuilder aclsString = new StringBuilder();
|
||||||
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
for (Map.Entry<QueueACL, AccessControlList> e : acls.entrySet()) {
|
||||||
@ -484,6 +487,9 @@ synchronized void setUtilization(float utilization) {
|
|||||||
* @param maximumCapacity new max capacity
|
* @param maximumCapacity new max capacity
|
||||||
*/
|
*/
|
||||||
synchronized void setMaxCapacity(float maximumCapacity) {
|
synchronized void setMaxCapacity(float maximumCapacity) {
|
||||||
|
// Sanity check
|
||||||
|
CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity);
|
||||||
|
|
||||||
this.maximumCapacity = maximumCapacity;
|
this.maximumCapacity = maximumCapacity;
|
||||||
float parentAbsoluteCapacity =
|
float parentAbsoluteCapacity =
|
||||||
(rootQueue) ? 100.0f : parent.getAbsoluteCapacity();
|
(rootQueue) ? 100.0f : parent.getAbsoluteCapacity();
|
||||||
|
@ -61,6 +61,10 @@ private String appsTableInit() {
|
|||||||
StringBuilder init = tableInit().
|
StringBuilder init = tableInit().
|
||||||
append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
|
append(", aoColumns:[{sType:'title-numeric'}, null, null, null, null,").
|
||||||
append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
|
append("null,{sType:'title-numeric', bSearchable:false}, null, null]");
|
||||||
|
|
||||||
|
// Sort by id upon page load
|
||||||
|
init.append(", aaSorting: [[0, 'asc']]");
|
||||||
|
|
||||||
String rows = $("rowlimit");
|
String rows = $("rowlimit");
|
||||||
int rowLimit = rows.isEmpty() ? MAX_DISPLAY_ROWS : Integer.parseInt(rows);
|
int rowLimit = rows.isEmpty() ? MAX_DISPLAY_ROWS : Integer.parseInt(rows);
|
||||||
if (list.apps.size() < rowLimit) {
|
if (list.apps.size() < rowLimit) {
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||||
@ -35,7 +37,6 @@ public void testQueueParsing() throws Exception {
|
|||||||
|
|
||||||
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
||||||
capacityScheduler.reinitialize(conf, null, null);
|
capacityScheduler.reinitialize(conf, null, null);
|
||||||
//capacityScheduler.g
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
|
private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
|
||||||
@ -104,4 +105,48 @@ public void testRootQueueParsing() throws Exception {
|
|||||||
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
CapacityScheduler capacityScheduler = new CapacityScheduler();
|
||||||
capacityScheduler.reinitialize(conf, null, null);
|
capacityScheduler.reinitialize(conf, null, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMaxCapacity() throws Exception {
|
||||||
|
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
|
||||||
|
|
||||||
|
conf.setQueues(CapacityScheduler.ROOT, new String[] {"a", "b", "c"});
|
||||||
|
conf.setCapacity(CapacityScheduler.ROOT, 100);
|
||||||
|
|
||||||
|
final String A = CapacityScheduler.ROOT + ".a";
|
||||||
|
conf.setCapacity(A, 50);
|
||||||
|
conf.setMaximumCapacity(A, 60);
|
||||||
|
|
||||||
|
final String B = CapacityScheduler.ROOT + ".b";
|
||||||
|
conf.setCapacity(B, 50);
|
||||||
|
conf.setMaximumCapacity(B, 45); // Should throw an exception
|
||||||
|
|
||||||
|
|
||||||
|
boolean fail = false;
|
||||||
|
CapacityScheduler capacityScheduler;
|
||||||
|
try {
|
||||||
|
capacityScheduler = new CapacityScheduler();
|
||||||
|
capacityScheduler.reinitialize(conf, null, null);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
fail = true;
|
||||||
|
}
|
||||||
|
Assert.assertTrue("Didn't throw IllegalArgumentException for wrong maxCap",
|
||||||
|
fail);
|
||||||
|
|
||||||
|
conf.setMaximumCapacity(B, 60);
|
||||||
|
|
||||||
|
// Now this should work
|
||||||
|
capacityScheduler = new CapacityScheduler();
|
||||||
|
capacityScheduler.reinitialize(conf, null, null);
|
||||||
|
|
||||||
|
fail = false;
|
||||||
|
try {
|
||||||
|
LeafQueue a = (LeafQueue)capacityScheduler.getQueue(A);
|
||||||
|
a.setMaxCapacity(45);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
fail = true;
|
||||||
|
}
|
||||||
|
Assert.assertTrue("Didn't throw IllegalArgumentException for wrong " +
|
||||||
|
"setMaxCap", fail);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
~~ limitations under the License. See accompanying LICENSE file.
|
~~ limitations under the License. See accompanying LICENSE file.
|
||||||
|
|
||||||
---
|
---
|
||||||
Hadoop Map Reduce Next Generation-${project.version} - Cluster Setup
|
Hadoop Distributed File System-${project.version} - Federation
|
||||||
---
|
---
|
||||||
---
|
---
|
||||||
${maven.build.timestamp}
|
${maven.build.timestamp}
|
||||||
@ -57,12 +57,12 @@ HDFS Federation
|
|||||||
* Storage - is provided by datanodes by storing blocks on the local file
|
* Storage - is provided by datanodes by storing blocks on the local file
|
||||||
system and allows read/write access.
|
system and allows read/write access.
|
||||||
|
|
||||||
The current HDFS architecture allows only a single namespace for the
|
The prior HDFS architecture allows only a single namespace for the
|
||||||
entire cluster. A single Namenode manages this namespace. HDFS
|
entire cluster. A single Namenode manages this namespace. HDFS
|
||||||
Federation addresses limitation of current architecture by adding
|
Federation addresses limitation of the prior architecture by adding
|
||||||
support multiple Namenodes/namespaces to HDFS file system.
|
support multiple Namenodes/namespaces to HDFS file system.
|
||||||
|
|
||||||
* {HDFS Federation}
|
* {Multiple Namenodes/Namespaces}
|
||||||
|
|
||||||
In order to scale the name service horizontally, federation uses multiple
|
In order to scale the name service horizontally, federation uses multiple
|
||||||
independent Namenodes/namespaces. The Namenodes are federated, that is, the
|
independent Namenodes/namespaces. The Namenodes are federated, that is, the
|
||||||
@ -103,9 +103,9 @@ HDFS Federation
|
|||||||
of small files benefit from scaling the namespace by adding more
|
of small files benefit from scaling the namespace by adding more
|
||||||
Namenodes to the cluster
|
Namenodes to the cluster
|
||||||
|
|
||||||
* Performance - File system operation throughput is currently limited
|
* Performance - File system operation throughput is limited by a single
|
||||||
by a single Namenode. Adding more Namenodes to the cluster scales the
|
Namenode in the prior architecture. Adding more Namenodes to the cluster
|
||||||
file system read/write operations throughput.
|
scales the file system read/write operations throughput.
|
||||||
|
|
||||||
* Isolation - A single Namenode offers no isolation in multi user
|
* Isolation - A single Namenode offers no isolation in multi user
|
||||||
environment. An experimental application can overload the Namenode
|
environment. An experimental application can overload the Namenode
|
||||||
@ -265,7 +265,7 @@ HDFS Federation
|
|||||||
> $HADOOP_PREFIX_HOME/bin/start-dfs.sh
|
> $HADOOP_PREFIX_HOME/bin/start-dfs.sh
|
||||||
----
|
----
|
||||||
|
|
||||||
To start the cluster run the following command:
|
To stop the cluster run the following command:
|
||||||
|
|
||||||
----
|
----
|
||||||
> $HADOOP_PREFIX_HOME/bin/stop-dfs.sh
|
> $HADOOP_PREFIX_HOME/bin/stop-dfs.sh
|
||||||
@ -300,7 +300,7 @@ HDFS Federation
|
|||||||
** Decommissioning
|
** Decommissioning
|
||||||
|
|
||||||
Decommissioning is similar to prior releases. The nodes that need to be
|
Decommissioning is similar to prior releases. The nodes that need to be
|
||||||
decommissioning are added to the exclude file at all the Namenode. Each
|
decomissioned are added to the exclude file at all the Namenode. Each
|
||||||
Namenode decommissions its Block Pool. When all the Namenodes finish
|
Namenode decommissions its Block Pool. When all the Namenodes finish
|
||||||
decommissioning a datanode, the datanode is considered to be decommissioned.
|
decommissioning a datanode, the datanode is considered to be decommissioned.
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -64,6 +64,10 @@
|
|||||||
rev="${yarn.version}" conf="common->default"/>
|
rev="${yarn.version}" conf="common->default"/>
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
||||||
rev="${yarn.version}" conf="common->default"/>
|
rev="${yarn.version}" conf="common->default"/>
|
||||||
|
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-jobclient"
|
||||||
|
rev="${yarn.version}" conf="test->default">
|
||||||
|
<artifact name="hadoop-mapreduce-client-jobclient" type="tests" ext="jar" m:classifier="tests"/>
|
||||||
|
</dependency>
|
||||||
<dependency org="commons-logging"
|
<dependency org="commons-logging"
|
||||||
name="commons-logging"
|
name="commons-logging"
|
||||||
rev="${commons-logging.version}"
|
rev="${commons-logging.version}"
|
||||||
|
@ -1,52 +0,0 @@
|
|||||||
<?xml version="1.0"?>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Before you can run these subtargets directly, you need
|
|
||||||
to call at top-level: ant deploy-contrib compile-core-test
|
|
||||||
-->
|
|
||||||
<project name="streaming" default="jar">
|
|
||||||
|
|
||||||
<import file="../build-contrib.xml"/>
|
|
||||||
|
|
||||||
<!-- Override jar target to specify main class -->
|
|
||||||
<target name="jar" depends="compile">
|
|
||||||
<jar
|
|
||||||
jarfile="${build.dir}/hadoop-${version}-${name}.jar"
|
|
||||||
basedir="${build.classes}"
|
|
||||||
>
|
|
||||||
<manifest>
|
|
||||||
<attribute name="Main-Class" value="org.apache.hadoop.streaming.HadoopStreaming"/>
|
|
||||||
</manifest>
|
|
||||||
</jar>
|
|
||||||
</target>
|
|
||||||
|
|
||||||
<!-- Run all unit tests. superdottest -->
|
|
||||||
<target name="test">
|
|
||||||
<antcall target="hadoopbuildcontrib.test">
|
|
||||||
</antcall>
|
|
||||||
</target>
|
|
||||||
|
|
||||||
<!--Run all system tests.-->
|
|
||||||
<target name="test-system">
|
|
||||||
<antcall target="hadoopbuildcontrib.test-system">
|
|
||||||
</antcall>
|
|
||||||
</target>
|
|
||||||
|
|
||||||
</project>
|
|
@ -1,98 +0,0 @@
|
|||||||
<?xml version="1.0" ?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
|
|
||||||
<info organisation="org.apache.hadoop" module="${ant.project.name}">
|
|
||||||
<license name="Apache 2.0"/>
|
|
||||||
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
|
|
||||||
<description>
|
|
||||||
Apache Hadoop
|
|
||||||
</description>
|
|
||||||
</info>
|
|
||||||
<configurations defaultconfmapping="default">
|
|
||||||
<!--these match the Maven configurations-->
|
|
||||||
<conf name="default" extends="master,runtime"/>
|
|
||||||
<conf name="master" description="contains the artifact but no dependencies"/>
|
|
||||||
<conf name="runtime" description="runtime but not the artifact" />
|
|
||||||
|
|
||||||
<conf name="common" visibility="private"
|
|
||||||
extends="runtime"
|
|
||||||
description="artifacts needed to compile/test the application"/>
|
|
||||||
<conf name="test" visibility="private" extends="runtime"/>
|
|
||||||
</configurations>
|
|
||||||
|
|
||||||
<publications>
|
|
||||||
<!--get the artifact from our module name-->
|
|
||||||
<artifact conf="master"/>
|
|
||||||
</publications>
|
|
||||||
<dependencies>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="common->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
|
||||||
rev="${hadoop-common.version}" conf="common->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
|
||||||
rev="${hadoop-common.version}" conf="test->default">
|
|
||||||
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
|
||||||
rev="${hadoop-hdfs.version}" conf="common->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
|
||||||
rev="${hadoop-hdfs.version}" conf="test->default">
|
|
||||||
<artifact name="hadoop-hdfs" type="tests" ext="jar" m:classifier="tests"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
|
|
||||||
rev="${yarn.version}" conf="common->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
|
||||||
rev="${yarn.version}" conf="common->default"/>
|
|
||||||
|
|
||||||
<dependency org="commons-cli" name="commons-cli"
|
|
||||||
rev="${commons-cli.version}" conf="common->default"/>
|
|
||||||
<dependency org="commons-logging" name="commons-logging"
|
|
||||||
rev="${commons-logging.version}" conf="common->default"/>
|
|
||||||
<dependency org="junit" name="junit"
|
|
||||||
rev="${junit.version}" conf="common->default"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="jetty-util"
|
|
||||||
rev="${jetty-util.version}" conf="common->master"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="jetty"
|
|
||||||
rev="${jetty.version}" conf="common->master"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="jsp-api-2.1"
|
|
||||||
rev="${jetty.version}" conf="common->master"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="jsp-2.1"
|
|
||||||
rev="${jetty.version}" conf="common->master"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="servlet-api-2.5"
|
|
||||||
rev="${servlet-api-2.5.version}" conf="common->master"/>
|
|
||||||
<dependency org="commons-httpclient" name="commons-httpclient"
|
|
||||||
rev="${commons-httpclient.version}" conf="common->default"/>
|
|
||||||
<dependency org="log4j" name="log4j"
|
|
||||||
rev="${log4j.version}" conf="common->master"/>
|
|
||||||
<dependency org="org.apache.avro" name="avro"
|
|
||||||
rev="${avro.version}" conf="common->default">
|
|
||||||
<exclude module="ant"/>
|
|
||||||
<exclude module="jetty"/>
|
|
||||||
<exclude module="slf4j-simple"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.slf4j" name="slf4j-api"
|
|
||||||
rev="${slf4j-api.version}" conf="common->master"/>
|
|
||||||
|
|
||||||
<!-- Exclusions for transitive dependencies pulled in by log4j -->
|
|
||||||
<exclude org="com.sun.jdmk"/>
|
|
||||||
<exclude org="com.sun.jmx"/>
|
|
||||||
<exclude org="javax.jms"/>
|
|
||||||
<exclude org="javax.mail"/>
|
|
||||||
|
|
||||||
</dependencies>
|
|
||||||
</ivy-module>
|
|
@ -1,17 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
#This properties file lists the versions of the various artifacts used by streaming.
|
|
||||||
#It drives ivy and the generation of a maven POM
|
|
||||||
|
|
||||||
#Please list the dependencies name with version if they are different from the ones
|
|
||||||
#listed in the global libraries.properties file (in alphabetical order)
|
|
@ -242,7 +242,11 @@
|
|||||||
|
|
||||||
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
$item =~ s:\s*(\([^)"]+?\))\s*$:<br />$1:; # Separate attribution
|
||||||
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
$item =~ s:\n{2,}:\n<p/>\n:g; # Keep paragraph breaks
|
||||||
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA
|
$item =~ s{(?:${jira_url_prefix})?(HADOOP-\d+)} # Link to JIRA Common
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(HDFS-\d+)} # Link to JIRA Hdfs
|
||||||
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
|
$item =~ s{(?:${jira_url_prefix})?(MAPREDUCE-\d+)} # Link to JIRA MR
|
||||||
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
{<a href="${jira_url_prefix}$1">$1</a>}g;
|
||||||
print " <li>$item</li>\n";
|
print " <li>$item</li>\n";
|
||||||
}
|
}
|
||||||
|
@ -2060,9 +2060,7 @@
|
|||||||
<p>Hadoop comes configured with a single mandatory queue, called
|
<p>Hadoop comes configured with a single mandatory queue, called
|
||||||
'default'. Queue names are defined in the
|
'default'. Queue names are defined in the
|
||||||
<code>mapred.queue.names</code> property of the Hadoop site
|
<code>mapred.queue.names</code> property of the Hadoop site
|
||||||
configuration. Some job schedulers, such as the
|
configuration.</p>
|
||||||
<a href="capacity_scheduler.html">Capacity Scheduler</a>,
|
|
||||||
support multiple queues.</p>
|
|
||||||
|
|
||||||
<p>A job defines the queue it needs to be submitted to through the
|
<p>A job defines the queue it needs to be submitted to through the
|
||||||
<code>mapreduce.job.queuename</code> property.
|
<code>mapreduce.job.queuename</code> property.
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
%>
|
%>
|
||||||
<%! private static final long serialVersionUID = 1L;
|
<%! private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html><body>
|
<html><body>
|
||||||
<%
|
<%
|
||||||
String logFile = request.getParameter("logFile");
|
String logFile = request.getParameter("logFile");
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
<%! private static final long serialVersionUID = 1L;
|
<%! private static final long serialVersionUID = 1L;
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<title>Error: User cannot access this Job</title>
|
<title>Error: User cannot access this Job</title>
|
||||||
|
@ -74,6 +74,7 @@
|
|||||||
}
|
}
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<title>Hadoop <%=jobId%>'s black-listed tasktrackers</title>
|
<title>Hadoop <%=jobId%>'s black-listed tasktrackers</title>
|
||||||
<body>
|
<body>
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
%>
|
%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
|
|
||||||
<title>Job Configuration: JobId - <%= jobId %></title>
|
<title>Job Configuration: JobId - <%= jobId %></title>
|
||||||
|
@ -49,6 +49,7 @@
|
|||||||
|
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
|
|
||||||
<title>Job Configuration: JobId - <%= jobId %></title>
|
<title>Job Configuration: JobId - <%= jobId %></title>
|
||||||
|
@ -267,6 +267,7 @@
|
|||||||
%>
|
%>
|
||||||
|
|
||||||
<%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%>
|
<%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%>
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<%
|
<%
|
||||||
|
@ -60,6 +60,7 @@
|
|||||||
reasonforFailure = job.getErrorInfo();
|
reasonforFailure = job.getErrorInfo();
|
||||||
%>
|
%>
|
||||||
|
|
||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<title>Hadoop Job <%=jobid%> on History Viewer</title>
|
<title>Hadoop Job <%=jobid%> on History Viewer</title>
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user