diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 1dfd089829..d32dc611f9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -231,10 +231,20 @@ Release 2.0.0 - UNRELEASED HADOOP-8216. Address log4j.properties inconsistencies btw main and template dirs. (Patrick Hunt via eli) + HADOOP-8149. Cap space usage of default log4j rolling policy. + (Patrick Hunt via eli) + + HADOOP-8211. Update commons-net version to 3.1. (eli) + + HADOOP-8236. haadmin should have configurable timeouts for failover + commands. (todd) + OPTIMIZATIONS BUG FIXES + HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh) + HADOOP-7635. RetryInvocationHandler should release underlying resources on close. (atm) @@ -295,6 +305,9 @@ Release 2.0.0 - UNRELEASED HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock (todd) + HADOOP-8238. NetUtils#getHostNameOfIP blows up if given ip:port + string w/o port. (eli) + BREAKDOWN OF HADOOP-7454 SUBTASKS HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh) @@ -445,6 +458,8 @@ Release 0.23.2 - UNRELEASED HADOOP-8088. User-group mapping cache incorrectly does negative caching on transient failures (Khiwal Lee via bobby) + HADOOP-8208. Disallow self failover. (eli) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh index abd4dfd478..42d32cf9a0 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh @@ -107,8 +107,8 @@ fi # some variables export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log -export HADOOP_ROOT_LOGGER="INFO,DRFA" -export HADOOP_SECURITY_LOGGER="INFO,DRFAS" +export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"} +export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"} log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid diff --git a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh index f91d9afef0..9d579b29af 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/start-all.sh @@ -18,7 +18,7 @@ # Start all hadoop daemons. Run this on master node. -echo "This script is Deprecated. Instead use start-dfs.sh and start-mapred.sh" +echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh" bin=`dirname "${BASH_SOURCE-$0}"` bin=`cd "$bin"; pwd` @@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} . $HADOOP_LIBEXEC_DIR/hadoop-config.sh # start hdfs daemons if hdfs is present -if [ -f "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh ]; then - "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh --config $HADOOP_CONF_DIR +if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR +fi + +# start yarn daemons if yarn is present +if [ -f "${YARN_HOME}"/sbin/start-dfs.sh ]; then + "${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR fi diff --git a/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh b/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh index 7d8bd591ea..9a2fe98fc0 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh @@ -18,7 +18,7 @@ # Stop all hadoop daemons. Run this on master node. -echo "This script is Deprecated. Instead use stop-dfs.sh and stop-mapred.sh" +echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh" bin=`dirname "${BASH_SOURCE-$0}"` bin=`cd "$bin"; pwd` @@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR} . $HADOOP_LIBEXEC_DIR/hadoop-config.sh # stop hdfs daemons if hdfs is present -if [ -f "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh ]; then - "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh --config $HADOOP_CONF_DIR +if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR +fi + +# stop yarn daemons if yarn is present +if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then + "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR fi diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index cbc31ad5e5..3470b3ef1b 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console hadoop.log.dir=. hadoop.log.file=hadoop.log - # Define the root logger to the system property "hadoop.root.logger". log4j.rootLogger=${hadoop.root.logger}, EventCounter @@ -31,6 +30,25 @@ log4j.threshold=ALL # Null Appender log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender +# +# Rolling File Appender - cap space usage at 5gb. +# +hadoop.log.maxfilesize=256MB +hadoop.log.maxbackupindex=20 +log4j.appender.RFA=org.apache.log4j.RollingFileAppender +log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} + +log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize} +log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex} + +log4j.appender.RFA.layout=org.apache.log4j.PatternLayout + +# Pattern format: Date LogLevel LoggerName LogMessage +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# Debugging Pattern format +#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + # # Daily Rolling File Appender # @@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n #Security appender # hadoop.security.logger=INFO,console +hadoop.security.log.maxfilesize=256MB +hadoop.security.log.maxbackupindex=20 log4j.category.SecurityLogger=${hadoop.security.logger} hadoop.security.log.file=SecurityAuth.audit +log4j.appender.RFAS=org.apache.log4j.RollingFileAppender +log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} +log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} + +# +# Daily Rolling Security appender +# log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - # # hdfs audit logging # hdfs.audit.logger=INFO,console +hdfs.audit.log.maxfilesize=256MB +hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log -log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd +log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender +log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log +log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize} +log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex} # # mapred audit logging # mapred.audit.logger=INFO,console +mapred.audit.log.maxfilesize=256MB +mapred.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false -log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd - -# -# Rolling File Appender -# - -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - +log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize} +log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex} # Custom Logging levels @@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter # Job Summary Appender # # Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: +# hadoop.mapreduce.jobsummary.log.file : # hadoop.mapreduce.jobsummary.logger=INFO,JSA # hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log -log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender +hadoop.mapreduce.jobsummary.log.maxfilesize=256MB +hadoop.mapreduce.jobsummary.log.maxbackupindex=20 +log4j.appender.JSA=org.apache.log4j.RollingFileAppender log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file} +log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize} +log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex} log4j.appender.JSA.layout=org.apache.log4j.PatternLayout log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n -log4j.appender.JSA.DatePattern=.yyyy-MM-dd log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger} log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false @@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false # Set the ResourceManager summary log level and appender #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -# Appender for ResourceManager Application Summary Log - rolled daily +# Appender for ResourceManager Application Summary Log # Requires the following properties to be set # - hadoop.log.dir (Hadoop Log directory) # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) @@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false -#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender +#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +#log4j.appender.RMSUMMARY.MaxFileSize=256MB +#log4j.appender.RMSUMMARY.MaxBackupIndex=20 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 54478c87f8..52cb1f3c9a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -145,5 +145,21 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final String HA_HM_RPC_TIMEOUT_KEY = "ha.health-monitor.rpc-timeout.ms"; public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000; + + /* Timeout that the FC waits for the new active to become active */ + public static final String HA_FC_NEW_ACTIVE_TIMEOUT_KEY = + "ha.failover-controller.new-active.rpc-timeout.ms"; + public static final int HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT = 60000; + + /* Timeout that the FC waits for the old active to go to standby */ + public static final String HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY = + "ha.failover-controller.graceful-fence.rpc-timeout.ms"; + public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000; + + /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */ + public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY = + "ha.failover-controller.cli-check.rpc-timeout.ms"; + public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000; + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index cf3c90e542..d1dad18954 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ipc.RPC; @@ -42,7 +43,22 @@ public class FailoverController { private static final Log LOG = LogFactory.getLog(FailoverController.class); - private static final int GRACEFUL_FENCE_TIMEOUT = 5000; + private final int gracefulFenceTimeout; + private final int rpcTimeoutToNewActive; + + private final Configuration conf; + + + public FailoverController(Configuration conf) { + this.conf = conf; + + this.gracefulFenceTimeout = conf.getInt( + CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY, + CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT); + this.rpcTimeoutToNewActive = conf.getInt( + CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_KEY, + CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT); + } /** * Perform pre-failover checks on the given service we plan to @@ -54,18 +70,25 @@ public class FailoverController { * allow it to become active, eg because it triggers a log roll * so the standby can learn about new blocks and leave safemode. * + * @param from currently active service * @param target service to make active * @param forceActive ignore toSvc if it reports that it is not ready * @throws FailoverFailedException if we should avoid failover */ - private static void preFailoverChecks(HAServiceTarget target, - boolean forceActive) + private void preFailoverChecks(HAServiceTarget from, + HAServiceTarget target, + boolean forceActive) throws FailoverFailedException { HAServiceStatus toSvcStatus; HAServiceProtocol toSvc; + if (from.getAddress().equals(target.getAddress())) { + throw new FailoverFailedException( + "Can't failover a service to itself"); + } + try { - toSvc = target.getProxy(); + toSvc = target.getProxy(conf, rpcTimeoutToNewActive); toSvcStatus = toSvc.getServiceStatus(); } catch (IOException e) { String msg = "Unable to get service state for " + target; @@ -108,11 +131,10 @@ private static void preFailoverChecks(HAServiceTarget target, * and no retries. Its only purpose is to avoid fencing a node that * has already restarted. */ - static boolean tryGracefulFence(Configuration conf, - HAServiceTarget svc) { + boolean tryGracefulFence(HAServiceTarget svc) { HAServiceProtocol proxy = null; try { - proxy = svc.getProxy(conf, GRACEFUL_FENCE_TIMEOUT); + proxy = svc.getProxy(conf, gracefulFenceTimeout); proxy.transitionToStandby(); return true; } catch (ServiceFailedException sfe) { @@ -139,19 +161,19 @@ static boolean tryGracefulFence(Configuration conf, * @param forceActive try to make toSvc active even if it is not ready * @throws FailoverFailedException if the failover fails */ - public static void failover(HAServiceTarget fromSvc, - HAServiceTarget toSvc, - boolean forceFence, - boolean forceActive) + public void failover(HAServiceTarget fromSvc, + HAServiceTarget toSvc, + boolean forceFence, + boolean forceActive) throws FailoverFailedException { Preconditions.checkArgument(fromSvc.getFencer() != null, "failover requires a fencer"); - preFailoverChecks(toSvc, forceActive); + preFailoverChecks(fromSvc, toSvc, forceActive); // Try to make fromSvc standby boolean tryFence = true; - if (tryGracefulFence(new Configuration(), fromSvc)) { + if (tryGracefulFence(fromSvc)) { tryFence = forceFence; } @@ -167,7 +189,8 @@ public static void failover(HAServiceTarget fromSvc, boolean failed = false; Throwable cause = null; try { - HAServiceProtocolHelper.transitionToActive(toSvc.getProxy()); + HAServiceProtocolHelper.transitionToActive( + toSvc.getProxy(conf, rpcTimeoutToNewActive)); } catch (ServiceFailedException sfe) { LOG.error("Unable to make " + toSvc + " active (" + sfe.getMessage() + "). Failing back."); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java index e5048e04cc..75c0c11eee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java @@ -30,7 +30,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -49,6 +51,8 @@ public abstract class HAAdmin extends Configured implements Tool { private static final String FORCEACTIVE = "forceactive"; private static final Log LOG = LogFactory.getLog(HAAdmin.class); + private int rpcTimeoutForChecks = -1; + private static Map USAGE = ImmutableMap.builder() .put("-transitionToActive", @@ -165,9 +169,10 @@ private int failover(final String[] argv) HAServiceTarget fromNode = resolveTarget(args[0]); HAServiceTarget toNode = resolveTarget(args[1]); + FailoverController fc = new FailoverController(getConf()); + try { - FailoverController.failover(fromNode, toNode, - forceFence, forceActive); + fc.failover(fromNode, toNode, forceFence, forceActive); out.println("Failover from "+args[0]+" to "+args[1]+" successful"); } catch (FailoverFailedException ffe) { errOut.println("Failover failed: " + ffe.getLocalizedMessage()); @@ -184,7 +189,8 @@ private int checkHealth(final String[] argv) return -1; } - HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(); + HAServiceProtocol proto = resolveTarget(argv[1]).getProxy( + getConf(), rpcTimeoutForChecks); try { HAServiceProtocolHelper.monitorHealth(proto); } catch (HealthCheckFailedException e) { @@ -202,7 +208,8 @@ private int getServiceState(final String[] argv) return -1; } - HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(); + HAServiceProtocol proto = resolveTarget(argv[1]).getProxy( + getConf(), rpcTimeoutForChecks); out.println(proto.getServiceStatus().getState()); return 0; } @@ -215,6 +222,16 @@ protected String getServiceAddr(String serviceId) { return serviceId; } + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + if (conf != null) { + rpcTimeoutForChecks = conf.getInt( + CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY, + CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT); + } + } + @Override public int run(String[] argv) throws Exception { try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 917c9d32b4..2e6dd48b21 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -330,8 +330,8 @@ public void fenceOldActive(byte[] data) { HAServiceTarget target = dataToTarget(data); LOG.info("Should fence: " + target); - boolean gracefulWorked = - FailoverController.tryGracefulFence(conf, target); + boolean gracefulWorked = new FailoverController(conf) + .tryGracefulFence(target); if (gracefulWorked) { // It's possible that it's in standby but just about to go into active, // no? Is there some race here? diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index 0bfeaa7cfc..90c5f169c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -570,31 +570,29 @@ public static void verifyHostnames(String[] names) throws UnknownHostException { } } - private static final Pattern ipPattern = // Pattern for matching hostname to ip:port - Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*"); + private static final Pattern ipPortPattern = // Pattern for matching ip[:port] + Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?"); /** - * Attempt to obtain the host name of a name specified by ip address. - * Check that the node name is an ip addr and if so, attempt to determine - * its host name. If the name is not an IP addr, or the actual name cannot - * be determined, return null. + * Attempt to obtain the host name of the given string which contains + * an IP address and an optional port. * - * @return Host name or null + * @param ipPort string of form ip[:port] + * @return Host name or null if the name can not be determined */ - public static String getHostNameOfIP(String ip) { - // If name is not an ip addr, don't bother looking it up - if(!ipPattern.matcher(ip).matches()) - return null; - - String hostname = ""; - try { - String n = ip.substring(0, ip.indexOf(':')); - hostname = InetAddress.getByName(n).getHostName(); - } catch (UnknownHostException e) { + public static String getHostNameOfIP(String ipPort) { + if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) { return null; } - return hostname; + try { + int colonIdx = ipPort.indexOf(':'); + String ip = (-1 == colonIdx) ? ipPort + : ipPort.substring(0, ipPort.indexOf(':')); + return InetAddress.getByName(ip).getHostName(); + } catch (UnknownHostException e) { + return null; + } } /** diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh index 84220dab49..e7f16fee7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh @@ -48,10 +48,10 @@ done export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS" # Command specific options appended to HADOOP_OPTS when specified -export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS" -HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS" +export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS" +export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS" -export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" +export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS" # The following applies to multiple commands (fs, dfs, fsck, distcp etc) export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS" diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties index cbc31ad5e5..3470b3ef1b 100644 --- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties @@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console hadoop.log.dir=. hadoop.log.file=hadoop.log - # Define the root logger to the system property "hadoop.root.logger". log4j.rootLogger=${hadoop.root.logger}, EventCounter @@ -31,6 +30,25 @@ log4j.threshold=ALL # Null Appender log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender +# +# Rolling File Appender - cap space usage at 5gb. +# +hadoop.log.maxfilesize=256MB +hadoop.log.maxbackupindex=20 +log4j.appender.RFA=org.apache.log4j.RollingFileAppender +log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} + +log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize} +log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex} + +log4j.appender.RFA.layout=org.apache.log4j.PatternLayout + +# Pattern format: Date LogLevel LoggerName LogMessage +log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# Debugging Pattern format +#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n + + # # Daily Rolling File Appender # @@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n #Security appender # hadoop.security.logger=INFO,console +hadoop.security.log.maxfilesize=256MB +hadoop.security.log.maxbackupindex=20 log4j.category.SecurityLogger=${hadoop.security.logger} hadoop.security.log.file=SecurityAuth.audit +log4j.appender.RFAS=org.apache.log4j.RollingFileAppender +log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} +log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} +log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} + +# +# Daily Rolling Security appender +# log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - # # hdfs audit logging # hdfs.audit.logger=INFO,console +hdfs.audit.log.maxfilesize=256MB +hdfs.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger} log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false -log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log -log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd +log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender +log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log +log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout +log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n +log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize} +log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex} # # mapred audit logging # mapred.audit.logger=INFO,console +mapred.audit.log.maxfilesize=256MB +mapred.audit.log.maxbackupindex=20 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger} log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false -log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender +log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd - -# -# Rolling File Appender -# - -#log4j.appender.RFA=org.apache.log4j.RollingFileAppender -#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Logfile size and and 30-day backups -#log4j.appender.RFA.MaxFileSize=1MB -#log4j.appender.RFA.MaxBackupIndex=30 - -#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - +log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize} +log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex} # Custom Logging levels @@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter # Job Summary Appender # # Use following logger to send summary to separate file defined by -# hadoop.mapreduce.jobsummary.log.file rolled daily: +# hadoop.mapreduce.jobsummary.log.file : # hadoop.mapreduce.jobsummary.logger=INFO,JSA # hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger} hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log -log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender +hadoop.mapreduce.jobsummary.log.maxfilesize=256MB +hadoop.mapreduce.jobsummary.log.maxbackupindex=20 +log4j.appender.JSA=org.apache.log4j.RollingFileAppender log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file} +log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize} +log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex} log4j.appender.JSA.layout=org.apache.log4j.PatternLayout log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n -log4j.appender.JSA.DatePattern=.yyyy-MM-dd log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger} log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false @@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false # Set the ResourceManager summary log level and appender #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -# Appender for ResourceManager Application Summary Log - rolled daily +# Appender for ResourceManager Application Summary Log # Requires the following properties to be set # - hadoop.log.dir (Hadoop Log directory) # - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename) @@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger} #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false -#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender +#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file} +#log4j.appender.RMSUMMARY.MaxFileSize=256MB +#log4j.appender.RMSUMMARY.MaxBackupIndex=20 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd diff --git a/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm index 724c970aaa..05b160fbe9 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm @@ -86,8 +86,6 @@ Deprecated Properties *---+---+ |dfs.socket.timeout | dfs.client.socket-timeout *---+---+ -|dfs.upgrade.permission | dfs.namenode.upgrade.permission -*---+---+ |dfs.write.packet.size | dfs.client-write-packet-size *---+---+ |fs.checkpoint.dir | dfs.namenode.checkpoint.dir diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java index 292141dabe..7d30bdfdb5 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java @@ -25,11 +25,13 @@ import static org.mockito.Mockito.verify; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer; import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer; import static org.apache.hadoop.ha.TestNodeFencer.setupFencer; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.test.MockitoUtil; import org.junit.Test; import org.mockito.Mockito; @@ -40,6 +42,8 @@ public class TestFailoverController { private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234); private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678); + + private Configuration conf = new Configuration(); HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY) .setNotReadyToBecomeActive("injected not ready"); @@ -51,13 +55,13 @@ public void testFailoverAndFailback() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled = 0; - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.ACTIVE, svc2.state); AlwaysSucceedFencer.fenceCalled = 0; - FailoverController.failover(svc2, svc1, false, false); + doFailover(svc2, svc1, false, false); assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.ACTIVE, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); @@ -69,7 +73,7 @@ public void testFailoverFromStandbyToStandby() throws Exception { DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr); svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.ACTIVE, svc2.state); } @@ -81,7 +85,7 @@ public void testFailoverFromActiveToActive() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Can't failover to an already active service"); } catch (FailoverFailedException ffe) { // Expected @@ -102,7 +106,7 @@ public void testFailoverWithoutPermission() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Can't failover when access is denied"); } catch (FailoverFailedException ffe) { assertTrue(ffe.getCause().getMessage().contains("Access denied")); @@ -118,7 +122,7 @@ public void testFailoverToUnreadyService() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Can't failover to a service that's not ready"); } catch (FailoverFailedException ffe) { // Expected @@ -131,7 +135,7 @@ public void testFailoverToUnreadyService() throws Exception { assertEquals(HAServiceState.STANDBY, svc2.state); // Forcing it means we ignore readyToBecomeActive - FailoverController.failover(svc1, svc2, false, true); + doFailover(svc1, svc2, false, true); assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.ACTIVE, svc2.state); } @@ -145,7 +149,7 @@ public void testFailoverToUnhealthyServiceFailsAndFailsback() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failover to unhealthy service"); } catch (FailoverFailedException ffe) { // Expected @@ -165,7 +169,7 @@ public void testFailoverFromFaultyServiceSucceeds() throws Exception { AlwaysSucceedFencer.fenceCalled = 0; try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); } catch (FailoverFailedException ffe) { fail("Faulty active prevented failover"); } @@ -188,7 +192,7 @@ public void testFailoverFromFaultyServiceFencingFailure() throws Exception { AlwaysFailFencer.fenceCalled = 0; try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failed over even though fencing failed"); } catch (FailoverFailedException ffe) { // Expected @@ -208,7 +212,7 @@ public void testFencingFailureDuringFailover() throws Exception { AlwaysFailFencer.fenceCalled = 0; try { - FailoverController.failover(svc1, svc2, true, false); + doFailover(svc1, svc2, true, false); fail("Failed over even though fencing requested and failed"); } catch (FailoverFailedException ffe) { // Expected @@ -232,16 +236,26 @@ public void testFailoverFromNonExistantServiceWithFencer() throws Exception { .defaultAnswer(new ThrowsException( new IOException("Could not connect to host"))) .extraInterfaces(Closeable.class)); - Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(); + Mockito.doNothing().when((Closeable)errorThrowingProxy).close(); + + Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy( + Mockito.any(), + Mockito.anyInt()); DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr); svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); } catch (FailoverFailedException ffe) { fail("Non-existant active prevented failover"); } - + // Verify that the proxy created to try to make it go to standby + // gracefully used the right rpc timeout + Mockito.verify(svc1).getProxy( + Mockito.any(), + Mockito.eq( + CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT)); + // Don't check svc1 because we can't reach it, but that's OK, it's been fenced. assertEquals(HAServiceState.ACTIVE, svc2.state); } @@ -256,7 +270,7 @@ public void testFailoverToNonExistantServiceFails() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failed over to a non-existant standby"); } catch (FailoverFailedException ffe) { // Expected @@ -274,7 +288,7 @@ public void testFailoverToFaultyServiceFailsbackOK() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failover to already active service"); } catch (FailoverFailedException ffe) { // Expected @@ -296,7 +310,7 @@ public void testWeDontFailbackIfActiveWasFenced() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, true, false); + doFailover(svc1, svc2, true, false); fail("Failed over to service that won't transition to active"); } catch (FailoverFailedException ffe) { // Expected @@ -318,7 +332,7 @@ public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception { AlwaysSucceedFencer.fenceCalled = 0; try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failed over to service that won't transition to active"); } catch (FailoverFailedException ffe) { // Expected @@ -342,7 +356,7 @@ public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception { AlwaysFailFencer.fenceCalled = 0; try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failed over to service that won't transition to active"); } catch (FailoverFailedException ffe) { // Expected @@ -368,7 +382,7 @@ public void testFailbackToFaultyServiceFails() throws Exception { svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); try { - FailoverController.failover(svc1, svc2, false, false); + doFailover(svc1, svc2, false, false); fail("Failover to already active service"); } catch (FailoverFailedException ffe) { // Expected @@ -377,4 +391,37 @@ public void testFailbackToFaultyServiceFails() throws Exception { assertEquals(HAServiceState.STANDBY, svc1.state); assertEquals(HAServiceState.STANDBY, svc2.state); } + + @Test + public void testSelfFailoverFails() throws Exception { + DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr); + DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr); + svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName()); + AlwaysSucceedFencer.fenceCalled = 0; + + try { + doFailover(svc1, svc1, false, false); + fail("Can't failover to yourself"); + } catch (FailoverFailedException ffe) { + // Expected + } + assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled); + assertEquals(HAServiceState.ACTIVE, svc1.state); + + try { + doFailover(svc2, svc2, false, false); + fail("Can't failover to yourself"); + } catch (FailoverFailedException ffe) { + // Expected + } + assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled); + assertEquals(HAServiceState.STANDBY, svc2.state); + } + + private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2, + boolean forceFence, boolean forceActive) throws FailoverFailedException { + FailoverController fc = new FailoverController(conf); + fc.failover(tgt1, tgt2, forceFence, forceActive); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java index 557cdc8c77..e67dad93d3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java @@ -499,6 +499,18 @@ public void testCanonicalUriWithNoPortNoDefaultPort() { assertEquals("scheme://host.a.b/path", uri.toString()); } + @Test + public void testGetHostNameOfIP() { + assertNull(NetUtils.getHostNameOfIP(null)); + assertNull(NetUtils.getHostNameOfIP("")); + assertNull(NetUtils.getHostNameOfIP("crazytown")); + assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port + assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port + assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port + assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1")); + assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1")); + } + private void assertBetterArrayEquals(T[] expect, T[]got) { String expectStr = StringUtils.join(expect, ", "); String gotStr = StringUtils.join(got, ", "); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh index cefe11400d..c83a14363a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh @@ -55,8 +55,8 @@ if [ "${1}" = "stop" ]; then fi if [ "${HTTPFS_SILENT}" != "true" ]; then - ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" + exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" else - ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null + exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null fi diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7a2dea9583..89dfe3a929 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -117,6 +117,12 @@ Release 2.0.0 - UNRELEASED HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli) + HDFS-3137. Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16. (eli) + + HDFS-3138. Move DatanodeInfo#ipcPort to DatanodeID. (eli) + + HDFS-3164. Move DatanodeInfo#hostName to DatanodeID. (eli) + NEW FEATURES HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm) @@ -171,6 +177,8 @@ Release 2.0.0 - UNRELEASED DistributedFileSystem to @InterfaceAudience.LimitedPrivate. (harsh via szetszwo) + HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm) + IMPROVEMENTS HDFS-2018. Move all journal stream management code into one place. @@ -279,6 +287,15 @@ Release 2.0.0 - UNRELEASED HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo) + HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used + space and capacity per DN. (atm) + + HDFS-3172. dfs.upgrade.permission is dead code. (eli) + + HDFS-3171. The DatanodeID "name" field is overloaded. (eli) + + HDFS-3144. Refactor DatanodeID#getName by use. (eli) + OPTIMIZATIONS HDFS-3024. Improve performance of stringification in addStoredBlock (todd) @@ -366,6 +383,15 @@ Release 2.0.0 - UNRELEASED HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm) + HDFS-3142. TestHDFSCLI.testAll is failing. (Brandon Li via atm) + + HDFS-3070. HDFS balancer doesn't ensure that hdfs-site.xml is loaded. (atm) + + HDFS-2995. start-dfs.sh should only start the 2NN for namenodes + with dfs.namenode.secondary.http-address configured. (eli) + + HDFS-3174. Fix assert in TestPendingDataNodeMessages. (eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) @@ -713,6 +739,9 @@ Release 0.23.2 - UNRELEASED HDFS-3104. Add tests for HADOOP-8175. (Daryn Sharp via szetszwo) + HDFS-3066. Cap space usage of default log4j rolling policy. + (Patrick Hunt via eli) + OPTIMIZATIONS BUG FIXES @@ -764,6 +793,9 @@ Release 0.23.2 - UNRELEASED HDFS-3101. Cannot read empty file using WebHDFS. (szetszwo) + HDFS-3160. httpfs should exec catalina instead of forking it. + (Roman Shaposhnik via eli) + Release 0.23.1 - 2012-02-17 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 9bcff9d418..fb409f3170 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -120,7 +120,7 @@ export CLASSPATH=$CLASSPATH #turn security logger on the namenode if [ $COMMAND = "namenode" ]; then - HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}" + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}" else HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}" fi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh index d267e4cd7c..72d9e9057a 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh @@ -76,11 +76,13 @@ fi SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-) -echo "Starting secondary namenodes [$SECONDARY_NAMENODES]" +if [ -n "$SECONDARY_NAMENODES" ]; then + echo "Starting secondary namenodes [$SECONDARY_NAMENODES]" -"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$SECONDARY_NAMENODES" \ - --script "$bin/hdfs" start secondarynamenode + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$SECONDARY_NAMENODES" \ + --script "$bin/hdfs" start secondarynamenode +fi # eof diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh index 33967513c4..dff41526f9 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh @@ -52,11 +52,13 @@ fi SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-) -echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]" +if [ -n "$SECONDARY_NAMENODES" ]; then + echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]" -"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ - --config "$HADOOP_CONF_DIR" \ - --hostnames "$SECONDARY_NAMENODES" \ - --script "$bin/hdfs" stop secondarynamenode + "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \ + --config "$HADOOP_CONF_DIR" \ + --hostnames "$SECONDARY_NAMENODES" \ + --script "$bin/hdfs" stop secondarynamenode +fi # eof diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml index 02d569668e..cfb46edfc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml @@ -239,11 +239,6 @@ to the web server.


The name of the group of super-users. -
  • dfs.namenode.upgrade.permission = 0777 -
    The choice of initial mode during upgrade. The x permission is never set for files. - For configuration files, the decimal value 51110 may be used. -
  • -
  • fs.permissions.umask-mode = 022
    The umask used when creating files and directories. For configuration files, the decimal value 1810 may be used. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index f0cfa45bd3..cd85ebae05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -240,7 +240,7 @@ private static synchronized LocalDatanodeInfo getLocalDatanodeInfo(int port) { private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, Token token) throws IOException { - LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort); + LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort()); BlockLocalPathInfo pathinfo = null; ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node, conf, timeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 6dd75c2a7b..37575e55fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1340,7 +1340,8 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, //connect to a datanode sock = socketFactory.createSocket(); NetUtils.connect(sock, - NetUtils.createSocketAddr(datanodes[j].getName()), timeout); + NetUtils.createSocketAddr(datanodes[j].getXferAddr()), + timeout); sock.setSoTimeout(timeout); out = new DataOutputStream( @@ -1349,7 +1350,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, in = new DataInputStream(NetUtils.getInputStream(sock)); if (LOG.isDebugEnabled()) { - LOG.debug("write to " + datanodes[j].getName() + ": " + LOG.debug("write to " + datanodes[j] + ": " + Op.BLOCK_CHECKSUM + ", block=" + block); } // get block MD5 @@ -1364,7 +1365,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, if (LOG.isDebugEnabled()) { LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file " + src + " for block " + block - + " from datanode " + datanodes[j].getName() + + " from datanode " + datanodes[j] + ". Will retry the block once."); } lastRetriedIndex = i; @@ -1374,7 +1375,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src, break; } else { throw new IOException("Bad response " + reply + " for block " - + block + " from datanode " + datanodes[j].getName()); + + block + " from datanode " + datanodes[j]); } } @@ -1409,12 +1410,10 @@ else if (bpc != bytesPerCRC) { LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock); } - LOG.debug("got reply from " + datanodes[j].getName() - + ": md5=" + md5); + LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5); } } catch (IOException ie) { - LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" - + datanodes[j].getName(), ie); + LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index aad8c8fd56..4a945b5d5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -107,8 +107,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600; public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns"; public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000; - public static final String DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission"; - public static final int DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777; public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval"; public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000; public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 04089c9c3d..58f807f320 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -543,7 +543,7 @@ private synchronized int readBuffer(ReaderStrategy reader, int off, int len, return reader.doRead(blockReader, off, len); } catch ( ChecksumException ce ) { DFSClient.LOG.warn("Found Checksum error for " - + getCurrentBlock() + " from " + currentNode.getName() + + getCurrentBlock() + " from " + currentNode + " at " + ce.getPos()); ioe = ce; retryCurrentNode = false; @@ -671,7 +671,7 @@ private DNAddrPair chooseDataNode(LocatedBlock block) try { DatanodeInfo chosenNode = bestNode(nodes, deadNodes); InetSocketAddress targetAddr = - NetUtils.createSocketAddr(chosenNode.getName()); + NetUtils.createSocketAddr(chosenNode.getXferAddr()); return new DNAddrPair(chosenNode, targetAddr); } catch (IOException ie) { String blockInfo = block.getBlock() + " file=" + src; @@ -746,7 +746,7 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end, } catch (ChecksumException e) { DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " + src + " at " + block.getBlock() + ":" + - e.getPos() + " from " + chosenNode.getName()); + e.getPos() + " from " + chosenNode); // we want to remember what we have tried addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap); } catch (AccessControlException ex) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 08e69e78a9..1d4a45c848 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -667,7 +667,7 @@ public void run() { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + - targets[i].getName()); + targets[i]); } } @@ -898,7 +898,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { if (errorIndex >= 0) { StringBuilder pipelineMsg = new StringBuilder(); for (int j = 0; j < nodes.length; j++) { - pipelineMsg.append(nodes[j].getName()); + pipelineMsg.append(nodes[j]); if (j < nodes.length - 1) { pipelineMsg.append(", "); } @@ -911,7 +911,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { } DFSClient.LOG.warn("Error Recovery for block " + block + " in pipeline " + pipelineMsg + - ": bad datanode " + nodes[errorIndex].getName()); + ": bad datanode " + nodes[errorIndex]); failed.add(nodes[errorIndex]); DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1]; @@ -1005,7 +1005,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, String firstBadLink = ""; if (DFSClient.LOG.isDebugEnabled()) { for (int i = 0; i < nodes.length; i++) { - DFSClient.LOG.debug("pipeline = " + nodes[i].getName()); + DFSClient.LOG.debug("pipeline = " + nodes[i]); } } @@ -1061,7 +1061,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS, // find the datanode that matches if (firstBadLink.length() != 0) { for (int i = 0; i < nodes.length; i++) { - if (nodes[i].getName().equals(firstBadLink)) { + if (nodes[i].getXferAddr().equals(firstBadLink)) { errorIndex = i; break; } @@ -1165,9 +1165,10 @@ private void setLastException(IOException e) { static Socket createSocketForPipeline(final DatanodeInfo first, final int length, final DFSClient client) throws IOException { if(DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("Connecting to datanode " + first.getName()); + DFSClient.LOG.debug("Connecting to datanode " + first); } - final InetSocketAddress isa = NetUtils.createSocketAddr(first.getName()); + final InetSocketAddress isa = + NetUtils.createSocketAddr(first.getXferAddr()); final Socket sock = client.socketFactory.createSocket(); final int timeout = client.getDatanodeReadTimeout(length); NetUtils.connect(sock, isa, timeout); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index cbc0f0ea23..6655c1e440 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -295,16 +295,16 @@ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) { assert idx < nrBlocks : "Incorrect index"; DatanodeInfo[] locations = blk.getLocations(); String[] hosts = new String[locations.length]; - String[] names = new String[locations.length]; + String[] xferAddrs = new String[locations.length]; String[] racks = new String[locations.length]; for (int hCnt = 0; hCnt < locations.length; hCnt++) { hosts[hCnt] = locations[hCnt].getHostName(); - names[hCnt] = locations[hCnt].getName(); - NodeBase node = new NodeBase(names[hCnt], + xferAddrs[hCnt] = locations[hCnt].getXferAddr(); + NodeBase node = new NodeBase(xferAddrs[hCnt], locations[hCnt].getNetworkLocation()); racks[hCnt] = node.toString(); } - blkLocations[idx] = new BlockLocation(names, hosts, racks, + blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks, blk.getStartOffset(), blk.getBlockSize(), blk.isCorrupt()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 7fe6a40a8a..5c63d6a27f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -688,7 +688,7 @@ public boolean reportChecksumFailure(Path f, lblocks[0] = new LocatedBlock(dataBlock, dataNode); LOG.info("Found checksum error in data stream at block=" + dataBlock + " on datanode=" - + dataNode[0].getName()); + + dataNode[0]); // Find block in checksum stream DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums; @@ -700,8 +700,7 @@ public boolean reportChecksumFailure(Path f, DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; lblocks[1] = new LocatedBlock(sumsBlock, sumsNode); LOG.info("Found checksum error in checksum stream at block=" - + sumsBlock + " on datanode=" - + sumsNode[0].getName()); + + sumsBlock + " on datanode=" + sumsNode[0]); // Ask client to delete blocks. dfs.reportChecksumFailure(f.toString(), lblocks); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 75ce9118a9..621dde0380 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -86,7 +86,6 @@ private static void addDeprecatedKeys() { deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY); - deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY); deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY); deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY); deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 9c837d291f..849f2496f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.DeprecatedUTF8; +import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableComparable; /** @@ -32,22 +32,32 @@ * Datanodes are identified by how they can be contacted (hostname * and ports) and their storage ID, a unique number that associates * the Datanodes blocks with a particular Datanode. + * + * {@link DatanodeInfo#getName()} should be used to get the network + * location (for topology) of a datanode, instead of using + * {@link DatanodeID#getXferAddr()} here. Helpers are defined below + * for each context in which a DatanodeID is used. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeID implements WritableComparable { public static final DatanodeID[] EMPTY_ARRAY = {}; - public String name; // hostname:port (data transfer port) - public String storageID; // unique per cluster storageID - protected int infoPort; // info server port - public int ipcPort; // ipc server port + protected String ipAddr; // IP address + protected String hostName; // hostname + protected String storageID; // unique per cluster storageID + protected int xferPort; // data streaming port + protected int infoPort; // info server port + protected int ipcPort; // IPC server port /** Equivalent to DatanodeID(""). */ public DatanodeID() {this("");} - /** Equivalent to DatanodeID(nodeName, "", -1, -1). */ - public DatanodeID(String nodeName) {this(nodeName, "", -1, -1);} + /** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */ + public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);} + + /** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */ + public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);} /** * DatanodeID copy constructor @@ -55,29 +65,43 @@ public class DatanodeID implements WritableComparable { * @param from */ public DatanodeID(DatanodeID from) { - this(from.getName(), + this(from.getIpAddr(), + from.getHostName(), from.getStorageID(), + from.getXferPort(), from.getInfoPort(), from.getIpcPort()); } /** * Create DatanodeID - * @param nodeName (hostname:portNumber) + * @param ipAddr IP + * @param hostName hostname * @param storageID data storage ID + * @param xferPort data transfer port * @param infoPort info server port * @param ipcPort ipc server port */ - public DatanodeID(String nodeName, String storageID, - int infoPort, int ipcPort) { - this.name = nodeName; + public DatanodeID(String ipAddr, String hostName, String storageID, + int xferPort, int infoPort, int ipcPort) { + this.ipAddr = ipAddr; + this.hostName = hostName; this.storageID = storageID; + this.xferPort = xferPort; this.infoPort = infoPort; this.ipcPort = ipcPort; } - public void setName(String name) { - this.name = name; + public void setIpAddr(String ipAddr) { + this.ipAddr = ipAddr; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public void setXferPort(int xferPort) { + this.xferPort = xferPort; } public void setInfoPort(int infoPort) { @@ -87,19 +111,65 @@ public void setInfoPort(int infoPort) { public void setIpcPort(int ipcPort) { this.ipcPort = ipcPort; } - - /** - * @return hostname:portNumber. - */ - public String getName() { - return name; + + public void setStorageID(String storageID) { + this.storageID = storageID; } - + + /** + * @return ipAddr; + */ + public String getIpAddr() { + return ipAddr; + } + + /** + * @return hostname + */ + public String getHostName() { + return hostName; + } + + /** + * @return IP:xferPort string + */ + public String getXferAddr() { + return ipAddr + ":" + xferPort; + } + + /** + * @return IP:ipcPort string + */ + public String getIpcAddr() { + return ipAddr + ":" + ipcPort; + } + + /** + * @return IP:infoPort string + */ + public String getInfoAddr() { + return ipAddr + ":" + infoPort; + } + + /** + * @return hostname:xferPort + */ + public String getXferAddrWithHostname() { + return hostName + ":" + xferPort; + } + /** * @return data storage ID. */ public String getStorageID() { - return this.storageID; + return storageID; + } + + /** + * @return xferPort (the port for data streaming) + */ + public int getXferPort() { + return xferPort; } /** @@ -116,33 +186,6 @@ public int getIpcPort() { return ipcPort; } - /** - * sets the data storage ID. - */ - public void setStorageID(String storageID) { - this.storageID = storageID; - } - - /** - * @return hostname and no :portNumber. - */ - public String getHost() { - int colon = name.indexOf(":"); - if (colon < 0) { - return name; - } else { - return name.substring(0, colon); - } - } - - public int getPort() { - int colon = name.indexOf(":"); - if (colon < 0) { - return 50010; // default port. - } - return Integer.parseInt(name.substring(colon+1)); - } - public boolean equals(Object to) { if (this == to) { return true; @@ -150,16 +193,16 @@ public boolean equals(Object to) { if (!(to instanceof DatanodeID)) { return false; } - return (name.equals(((DatanodeID)to).getName()) && + return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) && storageID.equals(((DatanodeID)to).getStorageID())); } public int hashCode() { - return name.hashCode()^ storageID.hashCode(); + return getXferAddr().hashCode()^ storageID.hashCode(); } public String toString() { - return name; + return getXferAddr(); } /** @@ -167,39 +210,44 @@ public String toString() { * Note that this does not update storageID. */ public void updateRegInfo(DatanodeID nodeReg) { - name = nodeReg.getName(); + ipAddr = nodeReg.getIpAddr(); + hostName = nodeReg.getHostName(); + xferPort = nodeReg.getXferPort(); infoPort = nodeReg.getInfoPort(); ipcPort = nodeReg.getIpcPort(); - // update any more fields added in future. } - /** Comparable. - * Basis of compare is the String name (host:portNumber) only. + /** + * Compare based on data transfer address. + * * @param that - * @return as specified by Comparable. + * @return as specified by Comparable */ public int compareTo(DatanodeID that) { - return name.compareTo(that.getName()); + return getXferAddr().compareTo(that.getXferAddr()); } - ///////////////////////////////////////////////// - // Writable - ///////////////////////////////////////////////// @Override public void write(DataOutput out) throws IOException { - DeprecatedUTF8.writeString(out, name); - DeprecatedUTF8.writeString(out, storageID); + Text.writeString(out, ipAddr); + Text.writeString(out, hostName); + Text.writeString(out, storageID); + out.writeShort(xferPort); out.writeShort(infoPort); + out.writeShort(ipcPort); } @Override public void readFields(DataInput in) throws IOException { - name = DeprecatedUTF8.readString(in); - storageID = DeprecatedUTF8.readString(in); - // the infoPort read could be negative, if the port is a large number (more + ipAddr = Text.readString(in); + hostName = Text.readString(in); + storageID = Text.readString(in); + // The port read could be negative, if the port is a large number (more // than 15 bits in storage size (but less than 16 bits). // So chop off the first two bytes (and hence the signed bits) before // setting the field. - this.infoPort = in.readShort() & 0x0000ffff; + xferPort = in.readShort() & 0x0000ffff; + infoPort = in.readShort() & 0x0000ffff; + ipcPort = in.readShort() & 0x0000ffff; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 2065ae1d1e..22e0851f05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -51,9 +51,6 @@ public class DatanodeInfo extends DatanodeID implements Node { protected long lastUpdate; protected int xceiverCount; protected String location = NetworkTopology.DEFAULT_RACK; - - // The FQDN of the IP associated with the Datanode's hostname - protected String hostName = null; // Datanode administrative states public enum AdminStates { @@ -110,30 +107,27 @@ public DatanodeInfo(DatanodeID nodeID) { this.adminState = null; } - public DatanodeInfo(DatanodeID nodeID, String location, String hostName) { + public DatanodeInfo(DatanodeID nodeID, String location) { this(nodeID); this.location = location; - this.hostName = hostName; } - public DatanodeInfo(DatanodeID nodeID, String location, String hostName, + public DatanodeInfo(DatanodeID nodeID, String location, final long capacity, final long dfsUsed, final long remaining, final long blockPoolUsed, final long lastUpdate, final int xceiverCount, final AdminStates adminState) { - this(nodeID.getName(), nodeID.getStorageID(), nodeID.getInfoPort(), nodeID - .getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate, - xceiverCount, location, hostName, adminState); + this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(), + nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, + blockPoolUsed, lastUpdate, xceiverCount, location, adminState); } /** Constructor */ - public DatanodeInfo(final String name, final String storageID, - final int infoPort, final int ipcPort, + public DatanodeInfo(final String name, final String hostName, + final String storageID, final int xferPort, final int infoPort, final int ipcPort, final long capacity, final long dfsUsed, final long remaining, final long blockPoolUsed, final long lastUpdate, final int xceiverCount, - final String networkLocation, final String hostName, - final AdminStates adminState) { - super(name, storageID, infoPort, ipcPort); - + final String networkLocation, final AdminStates adminState) { + super(name, hostName, storageID, xferPort, infoPort, ipcPort); this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; @@ -141,10 +135,14 @@ public DatanodeInfo(final String name, final String storageID, this.lastUpdate = lastUpdate; this.xceiverCount = xceiverCount; this.location = networkLocation; - this.hostName = hostName; this.adminState = adminState; } + /** Network location name */ + public String getName() { + return getXferAddr(); + } + /** The raw capacity. */ public long getCapacity() { return capacity; } @@ -221,15 +219,7 @@ public void setXceiverCount(int xceiverCount) { public synchronized void setNetworkLocation(String location) { this.location = NodeBase.normalize(location); } - - public String getHostName() { - return (hostName == null || hostName.length()==0) ? getHost() : hostName; - } - - public void setHostName(String host) { - hostName = host; - } - + /** A formatted string for reporting the status of the DataNode. */ public String getDatanodeReport() { StringBuilder buffer = new StringBuilder(); @@ -239,9 +229,9 @@ public String getDatanodeReport() { long nonDFSUsed = getNonDfsUsed(); float usedPercent = getDfsUsedPercent(); float remainingPercent = getRemainingPercent(); - String lookupName = NetUtils.getHostNameOfIP(name); + String lookupName = NetUtils.getHostNameOfIP(getName()); - buffer.append("Name: "+ name); + buffer.append("Name: "+ getName()); if (lookupName != null) { buffer.append(" (" + lookupName + ")"); } @@ -275,7 +265,7 @@ public String dumpDatanode() { long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); - buffer.append(name); + buffer.append(ipAddr); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); } @@ -380,10 +370,6 @@ protected void setAdminState(AdminStates newState) { @Override public void write(DataOutput out) throws IOException { super.write(out); - - //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage - out.writeShort(ipcPort); - out.writeLong(capacity); out.writeLong(dfsUsed); out.writeLong(remaining); @@ -391,17 +377,12 @@ public void write(DataOutput out) throws IOException { out.writeLong(lastUpdate); out.writeInt(xceiverCount); Text.writeString(out, location); - Text.writeString(out, hostName == null? "": hostName); WritableUtils.writeEnum(out, getAdminState()); } @Override public void readFields(DataInput in) throws IOException { super.readFields(in); - - //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage - this.ipcPort = in.readShort() & 0x0000ffff; - this.capacity = in.readLong(); this.dfsUsed = in.readLong(); this.remaining = in.readLong(); @@ -409,7 +390,6 @@ public void readFields(DataInput in) throws IOException { this.lastUpdate = in.readLong(); this.xceiverCount = in.readInt(); this.location = Text.readString(in); - this.hostName = Text.readString(in); setAdminState(WritableUtils.readEnum(in, AdminStates.class)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java index c0b63fe0e9..a43f2d5a6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java @@ -84,8 +84,10 @@ public static ExtendedBlock fromProto(HdfsProtos.ExtendedBlockProto proto) { private static HdfsProtos.DatanodeIDProto toProto( DatanodeID dni) { return HdfsProtos.DatanodeIDProto.newBuilder() - .setName(dni.getName()) + .setIpAddr(dni.getIpAddr()) + .setHostName(dni.getHostName()) .setStorageID(dni.getStorageID()) + .setXferPort(dni.getXferPort()) .setInfoPort(dni.getInfoPort()) .setIpcPort(dni.getIpcPort()) .build(); @@ -93,8 +95,10 @@ private static HdfsProtos.DatanodeIDProto toProto( private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) { return new DatanodeID( - idProto.getName(), + idProto.getIpAddr(), + idProto.getHostName(), idProto.getStorageID(), + idProto.getXferPort(), idProto.getInfoPort(), idProto.getIpcPort()); } @@ -111,7 +115,6 @@ public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) { .setLastUpdate(dni.getLastUpdate()) .setXceiverCount(dni.getXceiverCount()) .setLocation(dni.getNetworkLocation()) - .setHostName(dni.getHostName()) .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf( dni.getAdminState().name())) .build(); @@ -119,7 +122,7 @@ public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) { public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) { DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()), - dniProto.getLocation(), dniProto.getHostName()); + dniProto.getLocation()); dniObj.setCapacity(dniProto.getCapacity()); dniObj.setDfsUsed(dniProto.getDfsUsed()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java index c6908ff4ed..941a320a79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java @@ -45,9 +45,8 @@ public UnregisteredNodeException(NodeRegistration nodeReg) { * @param storedNode data-node stored in the system with this storage id */ public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) { - super("Data node " + nodeID.getName() - + " is attempting to report storage ID " + super("Data node " + nodeID + " is attempting to report storage ID " + nodeID.getStorageID() + ". Node " - + storedNode.getName() + " is expected to serve this storage."); + + storedNode + " is expected to serve this storage."); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 7382543397..3bcc76474e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -97,8 +97,7 @@ public ClientDatanodeProtocolTranslatorPB(InetSocketAddress addr, */ public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid, Configuration conf, int socketTimeout) throws IOException { - InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost() - + ":" + datanodeid.getIpcPort()); + InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr()); rpcProxy = createClientDatanodeProtocolProxy(addr, UserGroupInformation.getCurrentUser(), conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout); @@ -107,8 +106,7 @@ public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid, static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy( DatanodeID datanodeid, Configuration conf, int socketTimeout, LocatedBlock locatedBlock) throws IOException { - InetSocketAddress addr = NetUtils.createSocketAddr( - datanodeid.getHost() + ":" + datanodeid.getIpcPort()); + InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr()); if (LOG.isDebugEnabled()) { LOG.debug("ClientDatanodeProtocol addr=" + addr); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b1e7be0a0e..89102956ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -204,14 +204,18 @@ public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { // DatanodeId public static DatanodeID convert(DatanodeIDProto dn) { - return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(), - dn.getIpcPort()); + return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), + dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); } public static DatanodeIDProto convert(DatanodeID dn) { - return DatanodeIDProto.newBuilder().setName(dn.getName()) - .setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort()) - .setStorageID(dn.getStorageID()).build(); + return DatanodeIDProto.newBuilder() + .setIpAddr(dn.getIpAddr()) + .setHostName(dn.getHostName()) + .setStorageID(dn.getStorageID()) + .setXferPort(dn.getXferPort()) + .setInfoPort(dn.getInfoPort()) + .setIpcPort(dn.getIpcPort()).build(); } // Arrays of DatanodeId @@ -442,7 +446,6 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { return new DatanodeInfo( PBHelper.convert(di.getId()), di.hasLocation() ? di.getLocation() : null , - di.hasHostName() ? di.getHostName() : null, di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , PBHelper.convert(di.getAdminState())); @@ -451,9 +454,6 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { if (di == null) return null; DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); - if (di.getHostName() != null) { - builder.setHostName(di.getHostName()); - } if (di.getNetworkLocation() != null) { builder.setLocation(di.getNetworkLocation()); } @@ -503,7 +503,6 @@ public static DatanodeInfoProto convert(DatanodeInfo info) { builder.setAdminState(PBHelper.convert(info.getAdminState())); builder.setCapacity(info.getCapacity()) .setDfsUsed(info.getDfsUsed()) - .setHostName(info.getHostName()) .setId(PBHelper.convert((DatanodeID)info)) .setLastUpdate(info.getLastUpdate()) .setLocation(info.getNetworkLocation()) @@ -610,8 +609,8 @@ public static DatanodeRegistrationProto convert( DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto .newBuilder(); return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) - .setStorageInfo(PBHelper.convert(registration.storageInfo)) - .setKeys(PBHelper.convert(registration.exportedKeys)).build(); + .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) + .setKeys(PBHelper.convert(registration.getExportedKeys())).build(); } public static DatanodeRegistration convert(DatanodeRegistrationProto proto) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index e808af623c..93270107dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -51,6 +51,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -304,8 +305,9 @@ private void dispatch() { DataOutputStream out = null; DataInputStream in = null; try { - sock.connect(NetUtils.createSocketAddr( - target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT); + sock.connect( + NetUtils.createSocketAddr(target.datanode.getXferAddr()), + HdfsServerConstants.READ_TIMEOUT); sock.setKeepAlive(true); out = new DataOutputStream( new BufferedOutputStream( sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE)); @@ -586,7 +588,7 @@ private Source(DatanodeInfo node, BalancingPolicy policy, double threshold) { /** Add a node task */ private void addNodeTask(NodeTask task) { assert (task.datanode != this) : - "Source and target are the same " + datanode.getName(); + "Source and target are the same " + datanode; incScheduledSize(task.getSize()); nodeTasks.add(task); } @@ -1006,7 +1008,7 @@ private boolean chooseTarget(Source source, targetCandidates.remove(); } LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " - +source.datanode.getName() + " to " + target.datanode.getName()); + +source.datanode + " to " + target.datanode); return true; } return false; @@ -1054,7 +1056,7 @@ private boolean chooseSource(BalancerDatanode target, sourceCandidates.remove(); } LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " - +source.datanode.getName() + " to " + target.datanode.getName()); + +source.datanode + " to " + target.datanode); return true; } return false; @@ -1550,7 +1552,7 @@ private static void printUsage() { */ public static void main(String[] args) { try { - System.exit(ToolRunner.run(null, new Cli(), args)); + System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { LOG.error("Exiting balancer due an exception", e); System.exit(-1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 1c9b2aad4f..411a06bc30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -808,9 +808,9 @@ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { NameNode.stateChangeLog.warn("BLOCK* getBlocks: " - + "Asking for blocks from an unrecorded node " + datanode.getName()); + + "Asking for blocks from an unrecorded node " + datanode); throw new HadoopIllegalArgumentException( - "Datanode " + datanode.getName() + " not found."); + "Datanode " + datanode + " not found."); } int numBlocks = node.numBlocks(); @@ -882,7 +882,7 @@ private void addToInvalidates(Block b) { .hasNext();) { DatanodeDescriptor node = it.next(); invalidateBlocks.add(b, node, false); - datanodes.append(node.getName()).append(" "); + datanodes.append(node).append(" "); } if (datanodes.length() != 0) { NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " @@ -921,7 +921,7 @@ private void markBlockAsCorrupt(BlockInfo storedBlock, if (node == null) { throw new IOException("Cannot mark block " + storedBlock.getBlockName() + - " as corrupt because datanode " + dn.getName() + + " as corrupt because datanode " + dn + " does not exist. "); } @@ -955,11 +955,11 @@ private void markBlockAsCorrupt(BlockInfo storedBlock, private void invalidateBlock(Block blk, DatanodeInfo dn) throws IOException { NameNode.stateChangeLog.info("BLOCK* invalidateBlock: " - + blk + " on " + dn.getName()); + + blk + " on " + dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate block " + blk - + " because datanode " + dn.getName() + " does not exist."); + + " because datanode " + dn + " does not exist."); } // Check how many copies we have of the block @@ -977,11 +977,11 @@ private void invalidateBlock(Block blk, DatanodeInfo dn) removeStoredBlock(blk, node); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: " - + blk + " on " + dn.getName() + " listed for deletion."); + + blk + " on " + dn + " listed for deletion."); } } else { NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on " - + dn.getName() + " is the only copy and was not deleted."); + + dn + " is the only copy and was not deleted."); } } @@ -1224,11 +1224,11 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { StringBuilder targetList = new StringBuilder("datanode(s)"); for (int k = 0; k < targets.length; k++) { targetList.append(' '); - targetList.append(targets[k].getName()); + targetList.append(targets[k]); } NameNode.stateChangeLog.info( "BLOCK* ask " - + rw.srcNode.getName() + " to replicate " + + rw.srcNode + " to replicate " + rw.block + " to " + targetList); } } @@ -1410,15 +1410,15 @@ public void processReport(final DatanodeID nodeID, final String poolId, try { final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { - throw new IOException("ProcessReport from dead or unregistered node: " - + nodeID.getName()); + throw new IOException( + "ProcessReport from dead or unregistered node: " + nodeID); } // To minimize startup time, we discard any second (or later) block reports // that we receive while still in startup phase. if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) { NameNode.stateChangeLog.info("BLOCK* processReport: " - + "discarded non-initial block report from " + nodeID.getName() + + "discarded non-initial block report from " + nodeID + " because namenode still in startup phase"); return; } @@ -1451,7 +1451,7 @@ public void processReport(final DatanodeID nodeID, final String poolId, // Log the block report processing stats from Namenode perspective NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime)); NameNode.stateChangeLog.info("BLOCK* processReport: from " - + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks() + + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + ", processing time: " + (endTime - startTime) + " msecs"); } @@ -1511,7 +1511,7 @@ private void processReport(final DatanodeDescriptor node, } for (Block b : toInvalidate) { NameNode.stateChangeLog.info("BLOCK* processReport: block " - + b + " on " + node.getName() + " size " + b.getNumBytes() + + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file."); addToInvalidates(b, node); } @@ -1662,7 +1662,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn, if(LOG.isDebugEnabled()) { LOG.debug("Reported block " + block - + " on " + dn.getName() + " size " + block.getNumBytes() + + " on " + dn + " size " + block.getNumBytes() + " replicaState = " + reportedState); } @@ -1837,7 +1837,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt( // closed. So, ignore this report, assuming we will get a // FINALIZED replica later. See HDFS-2791 LOG.info("Received an RBW replica for block " + storedBlock + - " on " + dn.getName() + ": ignoring it, since the block is " + + " on " + dn + ": ignoring it, since the block is " + "complete with the same generation stamp."); return null; } else { @@ -1850,7 +1850,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt( default: String msg = "Unexpected replica state " + reportedState + " for block: " + storedBlock + - " on " + dn.getName() + " size " + storedBlock.getNumBytes(); + " on " + dn + " size " + storedBlock.getNumBytes(); // log here at WARN level since this is really a broken HDFS // invariant LOG.warn(msg); @@ -1949,7 +1949,7 @@ private Block addStoredBlock(final BlockInfo block, if (storedBlock == null || storedBlock.getINode() == null) { // If this block does not belong to anyfile, then we are done. NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on " - + node.getName() + " size " + block.getNumBytes() + + node + " size " + block.getNumBytes() + " but it does not belong to any file."); // we could add this block to invalidate set of this datanode. // it will happen in next block report otherwise. @@ -1972,7 +1972,7 @@ private Block addStoredBlock(final BlockInfo block, curReplicaDelta = 0; NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: " + "Redundant addStoredBlock request received for " + storedBlock - + " on " + node.getName() + " size " + storedBlock.getNumBytes()); + + " on " + node + " size " + storedBlock.getNumBytes()); } // Now check for completion of blocks and safe block count @@ -2035,7 +2035,7 @@ private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { StringBuilder sb = new StringBuilder(500); sb.append("BLOCK* addStoredBlock: blockMap updated: ") - .append(node.getName()) + .append(node) .append(" is added to "); storedBlock.appendStringTo(sb); sb.append(" size " ) @@ -2069,7 +2069,7 @@ private void invalidateCorruptReplicas(Block blk) { } catch (IOException e) { NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " + "error in deleting bad block " + blk + - " on " + node + e); + " on " + node, e); gotException = true; } } @@ -2335,7 +2335,7 @@ private void chooseExcessReplicates(Collection nonExcess, // addToInvalidates(b, cur); NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: " - +"("+cur.getName()+", "+b+") is added to invalidated blocks set."); + +"("+cur+", "+b+") is added to invalidated blocks set."); } } @@ -2350,7 +2350,7 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) { excessBlocksCount++; if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:" - + " (" + dn.getName() + ", " + block + + " (" + dn + ", " + block + ") is added to excessReplicateMap"); } } @@ -2363,7 +2363,7 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) { public void removeStoredBlock(Block block, DatanodeDescriptor node) { if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " - + block + " from " + node.getName()); + + block + " from " + node); } assert (namesystem.hasWriteLock()); { @@ -2476,7 +2476,7 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, Block block, } for (Block b : toInvalidate) { NameNode.stateChangeLog.info("BLOCK* addBlock: block " - + b + " on " + node.getName() + " size " + b.getNumBytes() + + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file."); addToInvalidates(b, node); } @@ -2504,7 +2504,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, NameNode.stateChangeLog .warn("BLOCK* processIncrementalBlockReport" + " is received from dead or unregistered node " - + nodeID.getName()); + + nodeID); throw new IOException( "Got incremental block report from unregistered or dead node"); } @@ -2526,7 +2526,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, break; default: String msg = - "Unknown block status code reported by " + nodeID.getName() + + "Unknown block status code reported by " + nodeID + ": " + rdbi; NameNode.stateChangeLog.warn(msg); assert false : msg; // if assertions are enabled, throw. @@ -2535,14 +2535,14 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* block " + (rdbi.getStatus()) + ": " + rdbi.getBlock() - + " is received from " + nodeID.getName()); + + " is received from " + nodeID); } } } finally { namesystem.writeUnlock(); NameNode.stateChangeLog .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from " - + nodeID.getName() + + nodeID + " receiving: " + receiving + ", " + " received: " + received + ", " + " deleted: " + deleted); @@ -2618,7 +2618,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, StringBuilder nodeList = new StringBuilder(); while (nodeIter.hasNext()) { DatanodeDescriptor node = nodeIter.next(); - nodeList.append(node.name); + nodeList.append(node); nodeList.append(" "); } LOG.info("Block: " + block + ", Expected Replicas: " @@ -2628,7 +2628,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode, + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + fileINode.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " - + srcNode.name + ", Is current datanode decommissioning: " + + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 083d39ef33..440e3d4056 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -65,14 +65,14 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, nodes.add(dn); NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ blk.getBlockName() + - " added as corrupt on " + dn.getName() + + " added as corrupt on " + dn + " by " + Server.getRemoteIp() + reasonText); } else { NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ "duplicate requested for " + blk.getBlockName() + " to add as corrupt " + - "on " + dn.getName() + + "on " + dn + " by " + Server.getRemoteIp() + reasonText); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index f01cd0e3f6..bfae110ac1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -175,19 +175,7 @@ public DatanodeDescriptor(DatanodeID nodeID) { */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation) { - this(nodeID, networkLocation, null); - } - - /** DatanodeDescriptor constructor - * - * @param nodeID id of the data node - * @param networkLocation location of the data node in network - * @param hostName it could be different from host specified for DatanodeID - */ - public DatanodeDescriptor(DatanodeID nodeID, - String networkLocation, - String hostName) { - this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0L, 0, 0); + this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0); } /** DatanodeDescriptor constructor @@ -223,14 +211,13 @@ public DatanodeDescriptor(DatanodeID nodeID, */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation, - String hostName, long capacity, long dfsUsed, long remaining, long bpused, int xceiverCount, int failedVolumes) { - super(nodeID, networkLocation, hostName); + super(nodeID, networkLocation); updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, failedVolumes); } @@ -436,23 +423,6 @@ public Block[] getInvalidateBlocks(int maxblocks) { } } - /** Serialization for FSEditLog */ - public void readFieldsFromFSEditLog(DataInput in) throws IOException { - this.name = DeprecatedUTF8.readString(in); - this.storageID = DeprecatedUTF8.readString(in); - this.infoPort = in.readShort() & 0x0000ffff; - - this.capacity = in.readLong(); - this.dfsUsed = in.readLong(); - this.remaining = in.readLong(); - this.blockPoolUsed = in.readLong(); - this.lastUpdate = in.readLong(); - this.xceiverCount = in.readInt(); - this.location = Text.readString(in); - this.hostName = Text.readString(in); - setAdminState(WritableUtils.readEnum(in, AdminStates.class)); - } - /** * @return Approximate number of blocks currently scheduled to be written * to this datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 8c59ccba5f..5b4cebbd80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -238,7 +238,7 @@ public DatanodeDescriptor getDatanode(DatanodeID nodeID final DatanodeDescriptor node = getDatanode(nodeID.getStorageID()); if (node == null) return null; - if (!node.getName().equals(nodeID.getName())) { + if (!node.getXferAddr().equals(nodeID.getXferAddr())) { final UnregisteredNodeException e = new UnregisteredNodeException( nodeID, node); NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: " @@ -270,7 +270,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo) { networktopology.remove(nodeInfo); if (LOG.isDebugEnabled()) { - LOG.debug("remove datanode " + nodeInfo.getName()); + LOG.debug("remove datanode " + nodeInfo); } namesystem.checkSafeMode(); } @@ -288,7 +288,7 @@ public void removeDatanode(final DatanodeID node removeDatanode(descriptor); } else { NameNode.stateChangeLog.warn("BLOCK* removeDatanode: " - + node.getName() + " does not exist"); + + node + " does not exist"); } } finally { namesystem.writeUnlock(); @@ -306,7 +306,7 @@ void removeDeadDatanode(final DatanodeID nodeID) { } if (d != null && isDatanodeDead(d)) { NameNode.stateChangeLog.info( - "BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName()); + "BLOCK* removeDeadDatanode: lost heartbeat from " + d); removeDatanode(d); } } @@ -332,19 +332,19 @@ private void addDatanode(final DatanodeDescriptor node) { if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".addDatanode: " - + "node " + node.getName() + " is added to datanodeMap."); + + "node " + node + " is added to datanodeMap."); } } /** Physically remove node from datanodeMap. */ - private void wipeDatanode(final DatanodeID node) throws IOException { + private void wipeDatanode(final DatanodeID node) { final String key = node.getStorageID(); synchronized (datanodeMap) { host2DatanodeMap.remove(datanodeMap.remove(key)); } if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".wipeDatanode(" - + node.getName() + "): storage " + key + + node + "): storage " + key + " is removed from datanodeMap."); } } @@ -354,7 +354,7 @@ private void resolveNetworkLocation (DatanodeDescriptor node) { List names = new ArrayList(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { // get the node's IP address - names.add(node.getHost()); + names.add(node.getIpAddr()); } else { // get the node's host name String hostName = node.getHostName(); @@ -376,12 +376,12 @@ private void resolveNetworkLocation (DatanodeDescriptor node) { node.setNetworkLocation(networkLocation); } - private boolean inHostsList(DatanodeID node, String ipAddr) { - return checkInList(node, ipAddr, hostsReader.getHosts(), false); + private boolean inHostsList(DatanodeID node) { + return checkInList(node, hostsReader.getHosts(), false); } - private boolean inExcludedHostsList(DatanodeID node, String ipAddr) { - return checkInList(node, ipAddr, hostsReader.getExcludedHosts(), true); + private boolean inExcludedHostsList(DatanodeID node) { + return checkInList(node, hostsReader.getExcludedHosts(), true); } /** @@ -419,7 +419,7 @@ private void removeDecomNodeFromList(final List nodeList) { for (Iterator it = nodeList.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); - if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null)) + if ((!inHostsList(node)) && (!inExcludedHostsList(node)) && node.isDecommissioned()) { // Include list is not empty, an existing datanode does not appear // in both include or exclude lists and it has been decommissioned. @@ -430,37 +430,23 @@ private void removeDecomNodeFromList(final List nodeList) { } /** - * Check if the given node (of DatanodeID or ipAddress) is in the (include or - * exclude) list. If ipAddress in null, check only based upon the given - * DatanodeID. If ipAddress is not null, the ipAddress should refers to the - * same host that given DatanodeID refers to. + * Check if the given DatanodeID is in the given (include or exclude) list. * - * @param node, the host DatanodeID - * @param ipAddress, if not null, should refers to the same host - * that DatanodeID refers to - * @param hostsList, the list of hosts in the include/exclude file - * @param isExcludeList, boolean, true if this is the exclude list - * @return boolean, if in the list + * @param node the DatanodeID to check + * @param hostsList the list of hosts in the include/exclude file + * @param isExcludeList true if this is the exclude list + * @return true if the node is in the list, false otherwise */ private static boolean checkInList(final DatanodeID node, - final String ipAddress, final Set hostsList, final boolean isExcludeList) { final InetAddress iaddr; - if (ipAddress != null) { - try { - iaddr = InetAddress.getByName(ipAddress); - } catch (UnknownHostException e) { - LOG.warn("Unknown ip address: " + ipAddress, e); - return isExcludeList; - } - } else { - try { - iaddr = InetAddress.getByName(node.getHost()); - } catch (UnknownHostException e) { - LOG.warn("Unknown host: " + node.getHost(), e); - return isExcludeList; - } + + try { + iaddr = InetAddress.getByName(node.getIpAddr()); + } catch (UnknownHostException e) { + LOG.warn("Unknown IP: " + node.getIpAddr(), e); + return isExcludeList; } // if include list is empty, host is in include list @@ -470,10 +456,10 @@ private static boolean checkInList(final DatanodeID node, return // compare ipaddress(:port) (hostsList.contains(iaddr.getHostAddress().toString())) || (hostsList.contains(iaddr.getHostAddress().toString() + ":" - + node.getPort())) + + node.getXferPort())) // compare hostname(:port) || (hostsList.contains(iaddr.getHostName())) - || (hostsList.contains(iaddr.getHostName() + ":" + node.getPort())) + || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort())) || ((node instanceof DatanodeInfo) && hostsList .contains(((DatanodeInfo) node).getHostName())); } @@ -481,10 +467,9 @@ private static boolean checkInList(final DatanodeID node, /** * Decommission the node if it is in exclude list. */ - private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) - throws IOException { + private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) { // If the registered node is in exclude list, then decommission it - if (inExcludedHostsList(nodeReg, ipAddr)) { + if (inExcludedHostsList(nodeReg)) { startDecommission(nodeReg); } } @@ -499,16 +484,16 @@ boolean checkDecommissionState(DatanodeDescriptor node) { if (node.isDecommissionInProgress()) { if (!blockManager.isReplicationInProgress(node)) { node.setDecommissioned(); - LOG.info("Decommission complete for node " + node.getName()); + LOG.info("Decommission complete for node " + node); } } return node.isDecommissioned(); } /** Start decommissioning the specified datanode. */ - private void startDecommission(DatanodeDescriptor node) throws IOException { + private void startDecommission(DatanodeDescriptor node) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { - LOG.info("Start Decommissioning node " + node.getName() + " with " + + LOG.info("Start Decommissioning node " + node + " with " + node.numBlocks() + " blocks."); heartbeatManager.startDecommission(node); node.decommissioningStatus.setStartTime(now()); @@ -519,9 +504,9 @@ private void startDecommission(DatanodeDescriptor node) throws IOException { } /** Stop decommissioning the specified datanodes. */ - void stopDecommission(DatanodeDescriptor node) throws IOException { + void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { - LOG.info("Stop Decommissioning node " + node.getName()); + LOG.info("Stop Decommissioning node " + node); heartbeatManager.stopDecommission(node); blockManager.processOverReplicatedBlocksOnReCommission(node); } @@ -545,41 +530,44 @@ private String newStorageID() { return newID; } - public void registerDatanode(DatanodeRegistration nodeReg - ) throws IOException { + /** + * Register the given datanode with the namenode. NB: the given + * registration is mutated and given back to the datanode. + * + * @param nodeReg the datanode registration + * @throws DisallowedDatanodeException if the registration request is + * denied because the datanode does not match includes/excludes + */ + public void registerDatanode(DatanodeRegistration nodeReg) + throws DisallowedDatanodeException { String dnAddress = Server.getRemoteAddress(); if (dnAddress == null) { // Mostly called inside an RPC. // But if not, use address passed by the data-node. - dnAddress = nodeReg.getHost(); - } + dnAddress = nodeReg.getIpAddr(); + } + + // Update the IP to the address of the RPC request that is + // registering this datanode. + nodeReg.setIpAddr(dnAddress); + nodeReg.setExportedKeys(blockManager.getBlockKeys()); // Checks if the node is not on the hosts list. If it is not, then // it will be disallowed from registering. - if (!inHostsList(nodeReg, dnAddress)) { + if (!inHostsList(nodeReg)) { throw new DisallowedDatanodeException(nodeReg); } - - String hostName = nodeReg.getHost(); - - // update the datanode's name with ip:port - DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(), - nodeReg.getStorageID(), - nodeReg.getInfoPort(), - nodeReg.getIpcPort()); - nodeReg.updateRegInfo(dnReg); - nodeReg.exportedKeys = blockManager.getBlockKeys(); NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: " - + "node registration from " + nodeReg.getName() + + "node registration from " + nodeReg + " storage " + nodeReg.getStorageID()); DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID()); - DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName()); + DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr()); if (nodeN != null && nodeN != nodeS) { NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: " - + "node from name: " + nodeN.getName()); + + "node from name: " + nodeN); // nodeN previously served a different data storage, // which is not served by anybody anymore. removeDatanode(nodeN); @@ -608,15 +596,14 @@ nodes with its data cleared (or user can just remove the StorageID but this is might not work if VERSION file format has changed */ NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: " - + "node " + nodeS.getName() - + " is replaced by " + nodeReg.getName() + + + "node " + nodeS + + " is replaced by " + nodeReg + " with the same storageID " + nodeReg.getStorageID()); } // update cluster map getNetworkTopology().remove(nodeS); nodeS.updateRegInfo(nodeReg); - nodeS.setHostName(hostName); nodeS.setDisallowed(false); // Node is in the include list // resolve network location @@ -630,11 +617,11 @@ nodes with its data cleared (or user can just remove the StorageID } // this is a new datanode serving a new data storage - if (nodeReg.getStorageID().equals("")) { + if ("".equals(nodeReg.getStorageID())) { // this data storage has never been registered // it is either empty or was created by pre-storageID version of DFS - nodeReg.storageID = newStorageID(); - if(NameNode.stateChangeLog.isDebugEnabled()) { + nodeReg.setStorageID(newStorageID()); + if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( "BLOCK* NameSystem.registerDatanode: " + "new storageID " + nodeReg.getStorageID() + " assigned."); @@ -642,7 +629,7 @@ nodes with its data cleared (or user can just remove the StorageID } // register new datanode DatanodeDescriptor nodeDescr - = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName); + = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK); resolveNetworkLocation(nodeDescr); addDatanode(nodeDescr); checkDecommissioning(nodeDescr, dnAddress); @@ -690,10 +677,10 @@ private void refreshHostsReader(Configuration conf) throws IOException { private void refreshDatanodes() throws IOException { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. - if (!inHostsList(node, null)) { + if (!inHostsList(node)) { node.setDisallowed(true); // case 2. } else { - if (inExcludedHostsList(node, null)) { + if (inExcludedHostsList(node)) { startDecommission(node); // case 3. } else { stopDecommission(node); // case 4. @@ -820,16 +807,16 @@ public List getDatanodeListForReport( } //Remove any form of the this datanode in include/exclude lists. try { - InetAddress inet = InetAddress.getByName(dn.getHost()); + InetAddress inet = InetAddress.getByName(dn.getIpAddr()); // compare hostname(:port) mustList.remove(inet.getHostName()); - mustList.remove(inet.getHostName()+":"+dn.getPort()); + mustList.remove(inet.getHostName()+":"+dn.getXferPort()); // compare ipaddress(:port) mustList.remove(inet.getHostAddress().toString()); - mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getPort()); + mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort()); } catch ( UnknownHostException e ) { mustList.remove(dn.getName()); - mustList.remove(dn.getHost()); + mustList.remove(dn.getIpAddr()); LOG.warn(e); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java index 4072ba92d2..68ea1f1710 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java @@ -39,10 +39,10 @@ boolean contains(DatanodeDescriptor node) { return false; } - String host = node.getHost(); + String ipAddr = node.getIpAddr(); hostmapLock.readLock().lock(); try { - DatanodeDescriptor[] nodes = map.get(host); + DatanodeDescriptor[] nodes = map.get(ipAddr); if (nodes != null) { for(DatanodeDescriptor containedNode:nodes) { if (node==containedNode) { @@ -66,8 +66,8 @@ boolean add(DatanodeDescriptor node) { return false; } - String host = node.getHost(); - DatanodeDescriptor[] nodes = map.get(host); + String ipAddr = node.getIpAddr(); + DatanodeDescriptor[] nodes = map.get(ipAddr); DatanodeDescriptor[] newNodes; if (nodes==null) { newNodes = new DatanodeDescriptor[1]; @@ -77,7 +77,7 @@ boolean add(DatanodeDescriptor node) { System.arraycopy(nodes, 0, newNodes, 0, nodes.length); newNodes[nodes.length] = node; } - map.put(host, newNodes); + map.put(ipAddr, newNodes); return true; } finally { hostmapLock.writeLock().unlock(); @@ -92,17 +92,17 @@ boolean remove(DatanodeDescriptor node) { return false; } - String host = node.getHost(); + String ipAddr = node.getIpAddr(); hostmapLock.writeLock().lock(); try { - DatanodeDescriptor[] nodes = map.get(host); + DatanodeDescriptor[] nodes = map.get(ipAddr); if (nodes==null) { return false; } if (nodes.length==1) { if (nodes[0]==node) { - map.remove(host); + map.remove(ipAddr); return true; } else { return false; @@ -122,7 +122,7 @@ boolean remove(DatanodeDescriptor node) { newNodes = new DatanodeDescriptor[nodes.length-1]; System.arraycopy(nodes, 0, newNodes, 0, i); System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1); - map.put(host, newNodes); + map.put(ipAddr, newNodes); return true; } } finally { @@ -130,17 +130,18 @@ boolean remove(DatanodeDescriptor node) { } } - /** get a data node by its host. - * @return DatanodeDescriptor if found; otherwise null. + /** + * Get a data node by its IP address. + * @return DatanodeDescriptor if found, null otherwise */ - DatanodeDescriptor getDatanodeByHost(String host) { - if (host==null) { + DatanodeDescriptor getDatanodeByHost(String ipAddr) { + if (ipAddr == null) { return null; } hostmapLock.readLock().lock(); try { - DatanodeDescriptor[] nodes = map.get(host); + DatanodeDescriptor[] nodes = map.get(ipAddr); // no entry if (nodes== null) { return null; @@ -155,40 +156,4 @@ DatanodeDescriptor getDatanodeByHost(String host) { hostmapLock.readLock().unlock(); } } - - /** - * Find data node by its name. - * - * @return DatanodeDescriptor if found or null otherwise - */ - public DatanodeDescriptor getDatanodeByName(String name) { - if (name==null) { - return null; - } - - int colon = name.indexOf(":"); - String host; - if (colon < 0) { - host = name; - } else { - host = name.substring(0, colon); - } - - hostmapLock.readLock().lock(); - try { - DatanodeDescriptor[] nodes = map.get(host); - // no entry - if (nodes== null) { - return null; - } - for(DatanodeDescriptor containedNode:nodes) { - if (name.equals(containedNode.getName())) { - return containedNode; - } - } - return null; - } finally { - hostmapLock.readLock().unlock(); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 5c7e0bdca1..d4c0f1c469 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -75,7 +75,7 @@ synchronized void add(final Block block, final DatanodeInfo datanode, numBlocks++; if (log) { NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() - + ": add " + block + " to " + datanode.getName()); + + ": add " + block + " to " + datanode); } } } @@ -111,7 +111,8 @@ synchronized void dump(final PrintWriter out) { for(Map.Entry> entry : node2blocks.entrySet()) { final LightWeightHashSet blocks = entry.getValue(); if (blocks.size() > 0) { - out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks); + out.println(datanodeManager.getDatanode(entry.getKey())); + out.println(blocks); } } } @@ -135,7 +136,7 @@ int invalidateWork(final String storageId) { if (NameNode.stateChangeLog.isInfoEnabled()) { NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() - + ": ask " + dn.getName() + " to delete " + toInvalidate); + + ": ask " + dn + " to delete " + toInvalidate); } return toInvalidate.size(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index e07aeeb375..5a13a612cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -88,9 +88,6 @@ private JspHelper() {} private static class NodeRecord extends DatanodeInfo { int frequency; - public NodeRecord() { - frequency = -1; - } public NodeRecord(DatanodeInfo info, int count) { super(info); this.frequency = count; @@ -172,7 +169,7 @@ public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom, //just ping to check whether the node is alive InetSocketAddress targetAddr = NetUtils.createSocketAddr( - chosenNode.getHost() + ":" + chosenNode.getInfoPort()); + chosenNode.getInfoAddr()); try { s = NetUtils.getDefaultSocketFactory(conf).createSocket(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index c76d24c6a7..5ed6e35886 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -64,18 +64,12 @@ public abstract class Storage extends StorageInfo { public static final Log LOG = LogFactory.getLog(Storage.class.getName()); - // Constants - // last layout version that did not support upgrades public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3; - // this corresponds to Hadoop-0.14. - public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7; - protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14"; - - /* this should be removed when LAST_UPGRADABLE_LV goes beyond -13. - * any upgrade code that uses this constant should also be removed. */ - public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13; + // this corresponds to Hadoop-0.18 + public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16; + protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18"; /** Layout versions of 0.20.203 release */ public static final int[] LAYOUT_VERSIONS_203 = {-19, -31}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index bef6e4da5c..02010d358d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -325,10 +325,10 @@ synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOExcep void registrationSucceeded(BPServiceActor bpServiceActor, DatanodeRegistration reg) throws IOException { if (bpRegistration != null) { - checkNSEquality(bpRegistration.storageInfo.getNamespaceID(), - reg.storageInfo.getNamespaceID(), "namespace ID"); - checkNSEquality(bpRegistration.storageInfo.getClusterID(), - reg.storageInfo.getClusterID(), "cluster ID"); + checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(), + reg.getStorageInfo().getNamespaceID(), "namespace ID"); + checkNSEquality(bpRegistration.getStorageInfo().getClusterID(), + reg.getStorageInfo().getClusterID(), "cluster ID"); } else { bpRegistration = reg; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 8f813ce0c7..a0850f8b4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -602,7 +602,7 @@ void register() throws IOException { while (shouldRun()) { try { - // Use returned registration from namenode with updated machine name. + // Use returned registration from namenode with updated fields bpRegistration = bpNamenode.registerDatanode(bpRegistration); break; } catch(SocketTimeoutException e) { // namenode is busy diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 586084b0c9..b1635a9ae3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -164,9 +164,9 @@ import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; - /********************************************************** * DataNode is a class (and program) that stores a set of * blocks for a DFS deployment. A single deployment can @@ -244,9 +244,10 @@ public static InetSocketAddress createSocketAddr(String target) { private DataStorage storage = null; private HttpServer infoServer = null; DataNodeMetrics metrics; - private InetSocketAddress selfAddr; + private InetSocketAddress streamingAddr; - private volatile String hostName; // Host name of this datanode + private String hostName; + private DatanodeID id; boolean isBlockTokenEnabled; BlockPoolTokenSecretManager blockPoolTokenSecretManager; @@ -288,6 +289,7 @@ public static InetSocketAddress createSocketAddr(String target) { .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); try { hostName = getHostName(conf); + LOG.info("Configured hostname is " + hostName); startDataNode(conf, dataDirs, resources); } catch (IOException ie) { shutdown(); @@ -305,16 +307,25 @@ private synchronized void setClusterId(final String nsCid, final String bpid clusterId = nsCid; } + /** + * Returns the hostname for this datanode. If the hostname is not + * explicitly configured in the given config, then it is determined + * via the DNS class. + * + * @param config + * @return the hostname (NB: may not be a FQDN) + * @throws UnknownHostException if the dfs.datanode.dns.interface + * option is used and the hostname can not be determined + */ private static String getHostName(Configuration config) throws UnknownHostException { - // use configured nameserver & interface to get local hostname String name = config.get(DFS_DATANODE_HOST_NAME_KEY); if (name == null) { - name = DNS - .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY, - DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get( - DFS_DATANODE_DNS_NAMESERVER_KEY, - DFS_DATANODE_DNS_NAMESERVER_DEFAULT)); + name = DNS.getDefaultHost( + config.get(DFS_DATANODE_DNS_INTERFACE_KEY, + DFS_DATANODE_DNS_INTERFACE_DEFAULT), + config.get(DFS_DATANODE_DNS_NAMESERVER_KEY, + DFS_DATANODE_DNS_NAMESERVER_DEFAULT)); } return name; } @@ -485,23 +496,22 @@ private synchronized void shutdownDirectoryScanner() { } private void initDataXceiver(Configuration conf) throws IOException { - InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); - // find free port or use privileged port provided ServerSocket ss; - if(secureResources == null) { + if (secureResources == null) { + InetSocketAddress addr = DataNode.getStreamingAddr(conf); ss = (dnConf.socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); - Server.bind(ss, streamingAddr, 0); + Server.bind(ss, addr, 0); } else { ss = secureResources.getStreamingSocket(); } ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); - // adjust machine name with the actual port - int tmpPort = ss.getLocalPort(); - selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), - tmpPort); - LOG.info("Opened streaming server at " + selfAddr); + + streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), + ss.getLocalPort()); + + LOG.info("Opened streaming server at " + streamingAddr); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); @@ -646,7 +656,7 @@ void startDataNode(Configuration conf, this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager(); initIpcServer(conf); - metrics = DataNodeMetrics.create(conf, getMachineName()); + metrics = DataNodeMetrics.create(conf, getDisplayName()); blockPoolManager = new BlockPoolManager(this); blockPoolManager.refreshNamenodes(conf); @@ -657,14 +667,18 @@ void startDataNode(Configuration conf, * @param nsInfo the namespace info from the first part of the NN handshake */ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) { - DatanodeRegistration bpRegistration = createUnknownBPRegistration(); - String blockPoolId = nsInfo.getBlockPoolID(); - + final String xferIp = streamingAddr.getAddress().getHostAddress(); + DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp); + bpRegistration.setXferPort(getXferPort()); + bpRegistration.setInfoPort(getInfoPort()); + bpRegistration.setIpcPort(getIpcPort()); + bpRegistration.setHostName(hostName); bpRegistration.setStorageID(getStorageId()); - StorageInfo storageInfo = storage.getBPStorage(blockPoolId); + + StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID()); if (storageInfo == null) { // it's null in the case of SimulatedDataSet - bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION; + bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION; bpRegistration.setStorageInfo(nsInfo); } else { bpRegistration.setStorageInfo(storageInfo); @@ -679,17 +693,18 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) { * Also updates the block pool's state in the secret manager. */ synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, - String blockPoolId) - throws IOException { - hostName = bpRegistration.getHost(); + String blockPoolId) throws IOException { + // Set the ID if we haven't already + if (null == id) { + id = bpRegistration; + } if (storage.getStorageID().equals("")) { - // This is a fresh datanode -- take the storage ID provided by the - // NN and persist it. + // This is a fresh datanode, persist the NN-provided storage ID storage.setStorageID(bpRegistration.getStorageID()); storage.writeAll(); LOG.info("New storage id " + bpRegistration.getStorageID() - + " is assigned to data-node " + bpRegistration.getName()); + + " is assigned to data-node " + bpRegistration); } else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) { throw new IOException("Inconsistent storage IDs. Name-node returned " + bpRegistration.getStorageID() @@ -708,7 +723,7 @@ synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration, */ private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException { - ExportedBlockKeys keys = bpRegistration.exportedKeys; + ExportedBlockKeys keys = bpRegistration.getExportedKeys(); isBlockTokenEnabled = keys.isBlockTokenEnabled(); // TODO should we check that all federated nns are either enabled or // disabled? @@ -728,8 +743,8 @@ private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistrat } blockPoolTokenSecretManager.setKeys(blockPoolId, - bpRegistration.exportedKeys); - bpRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS; + bpRegistration.getExportedKeys()); + bpRegistration.setExportedKeys(ExportedBlockKeys.DUMMY_KEYS); } /** @@ -783,18 +798,6 @@ void initBlockPool(BPOfferService bpos) throws IOException { data.addBlockPool(nsInfo.getBlockPoolID(), conf); } - /** - * Create a DatanodeRegistration object with no valid StorageInfo. - * This is used when reporting an error during handshake - ie - * before we can load any specific block pool. - */ - private DatanodeRegistration createUnknownBPRegistration() { - DatanodeRegistration reg = new DatanodeRegistration(getMachineName()); - reg.setInfoPort(infoServer.getPort()); - reg.setIpcPort(getIpcPort()); - return reg; - } - BPOfferService[] getAllBpOs() { return blockPoolManager.getAllNamenodeThreads(); } @@ -844,23 +847,37 @@ private void registerMXBean() { MBeans.register("DataNode", "DataNodeInfo", this); } - int getPort() { - return selfAddr.getPort(); + int getXferPort() { + return streamingAddr.getPort(); } String getStorageId() { return storage.getStorageID(); } - - /** - * Get host:port with host set to Datanode host and port set to the - * port {@link DataXceiver} is serving. - * @return host:port string + + /** + * @return name useful for logging */ - public String getMachineName() { - return hostName + ":" + getPort(); + public String getDisplayName() { + // NB: our DatanodeID may not be set yet + return hostName + ":" + getIpcPort(); } - + + /** + * NB: The datanode can perform data transfer on the streaming + * address however clients are given the IPC IP address for data + * transfer, and that may be be a different address. + * + * @return socket address for data transfer + */ + public InetSocketAddress getXferAddress() { + return streamingAddr; + } + + /** + * @return the datanode's IPC port + */ + @VisibleForTesting public int getIpcPort() { return ipcServer.getListenerAddress().getPort(); } @@ -880,25 +897,6 @@ DatanodeRegistration getDNRegistrationForBP(String bpid) return bpos.bpRegistration; } - /** - * get BP registration by machine and port name (host:port) - * @param mName - the name that the NN used - * @return BP registration - * @throws IOException - */ - DatanodeRegistration getDNRegistrationByMachineName(String mName) { - // TODO: all the BPs should have the same name as each other, they all come - // from getName() here! and the use cases only are in tests where they just - // call with getName(). So we could probably just make this method return - // the first BPOS's registration. See HDFS-2609. - BPOfferService [] bposArray = blockPoolManager.getAllNamenodeThreads(); - for (BPOfferService bpos : bposArray) { - if(bpos.bpRegistration.getName().equals(mName)) - return bpos.bpRegistration; - } - return null; - } - /** * Creates either NIO or regular depending on socketWriteTimeout. */ @@ -918,8 +916,8 @@ DatanodeProtocolClientSideTranslatorPB connectToNN( public static InterDatanodeProtocol createInterDataNodeProtocolProxy( DatanodeID datanodeid, final Configuration conf, final int socketTimeout) throws IOException { - final InetSocketAddress addr = NetUtils.createSocketAddr( - datanodeid.getHost() + ":" + datanodeid.getIpcPort()); + final InetSocketAddress addr = + NetUtils.createSocketAddr(datanodeid.getIpcAddr()); if (InterDatanodeProtocol.LOG.isDebugEnabled()) { InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr); } @@ -936,10 +934,6 @@ public InterDatanodeProtocol run() throws IOException { throw new IOException(ie.getMessage()); } } - - public InetSocketAddress getSelfAddr() { - return selfAddr; - } DataNodeMetrics getMetrics() { return metrics; @@ -947,7 +941,7 @@ DataNodeMetrics getMetrics() { public static void setNewStorageID(DatanodeID dnId) { LOG.info("Datanode is " + dnId); - dnId.storageID = createNewStorageId(dnId.getPort()); + dnId.setStorageID(createNewStorageId(dnId.getXferPort())); } static String createNewStorageId(int port) { @@ -1223,7 +1217,7 @@ private void transferBlock( ExtendedBlock block, if (LOG.isInfoEnabled()) { StringBuilder xfersBuilder = new StringBuilder(); for (int i = 0; i < numTargets; i++) { - xfersBuilder.append(xferTargets[i].getName()); + xfersBuilder.append(xferTargets[i]); xfersBuilder.append(" "); } LOG.info(bpReg + " Starting thread to transfer block " + @@ -1381,7 +1375,7 @@ public void run() { try { InetSocketAddress curTarget = - NetUtils.createSocketAddr(targets[0].getName()); + NetUtils.createSocketAddr(targets[0].getXferAddr()); sock = newSocket(); NetUtils.connect(sock, curTarget, dnConf.socketTimeout); sock.setSoTimeout(targets.length * dnConf.socketTimeout); @@ -1434,9 +1428,8 @@ public void run() { } } } catch (IOException ie) { - LOG.warn( - bpReg + ":Failed to transfer " + b + " to " + targets[0].getName() - + " got ", ie); + LOG.warn(bpReg + ":Failed to transfer " + b + " to " + + targets[0] + " got ", ie); // check if there are any disk problem checkDiskError(); @@ -1632,7 +1625,7 @@ static ArrayList getDataDirsFromURIs(Collection dataDirs, @Override public String toString() { - return "DataNode{data=" + data + ", localName='" + getMachineName() + return "DataNode{data=" + data + ", localName='" + getDisplayName() + "', storageID='" + getStorageId() + "', xmitsInProgress=" + xmitsInProgress.get() + "}"; } @@ -1990,15 +1983,14 @@ void syncBlock(RecoveringBlock rBlock, private static void logRecoverBlock(String who, ExtendedBlock block, DatanodeID[] targets) { - StringBuilder msg = new StringBuilder(targets[0].getName()); + StringBuilder msg = new StringBuilder(targets[0].toString()); for (int i = 1; i < targets.length; i++) { - msg.append(", " + targets[i].getName()); + msg.append(", " + targets[i]); } LOG.info(who + " calls recoverBlock(block=" + block + ", targets=[" + msg + "])"); } - // ClientDataNodeProtocol implementation @Override // ClientDataNodeProtocol public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { checkWriteAccess(block); @@ -2076,8 +2068,7 @@ void finalizeUpgradeForPool(String blockPoolId) throws IOException { storage.finalizeUpgrade(blockPoolId); } - // Determine a Datanode's streaming address - public static InetSocketAddress getStreamingAddr(Configuration conf) { + static InetSocketAddress getStreamingAddr(Configuration conf) { return NetUtils.createSocketAddr( conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT)); } @@ -2099,8 +2090,11 @@ public String getHttpPort(){ return this.getConf().get("dfs.datanode.info.port"); } - public int getInfoPort(){ - return this.infoServer.getPort(); + /** + * @return the datanode's http port + */ + public int getInfoPort() { + return infoServer.getPort(); } /** @@ -2142,7 +2136,7 @@ public void refreshNamenodes(Configuration conf) throws IOException { blockPoolManager.refreshNamenodes(conf); } - @Override //ClientDatanodeProtocol + @Override // ClientDatanodeProtocol public void refreshNamenodes() throws IOException { conf = new Configuration(); refreshNamenodes(conf); @@ -2204,10 +2198,9 @@ public boolean isDatanodeFullyStarted() { return true; } - /** Methods used by fault injection tests */ + @VisibleForTesting public DatanodeID getDatanodeId() { - return new DatanodeID(getMachineName(), getStorageId(), - infoServer.getPort(), getIpcPort()); + return id; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 16244c725b..9b5f2bb31f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -73,9 +73,6 @@ public class DataStorage extends Storage { public final static String STORAGE_DIR_FINALIZED = "finalized"; public final static String STORAGE_DIR_TMP = "tmp"; - private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = - Pattern.compile("(.*blk_[-]*\\d+)\\.meta$"); - /** Access to this variable is guarded by "this" */ private String storageID; @@ -197,7 +194,7 @@ synchronized void recoverTransitionRead(DataNode datanode, } // make sure we have storage id set - if not - generate new one - createStorageID(datanode.getPort()); + createStorageID(datanode.getXferPort()); // 3. Update all storages. Some of them might have just been formatted. this.writeAll(); @@ -669,13 +666,6 @@ static void linkBlocks(File from, File to, int oldLV, HardLink hl) in.close(); } } else { - - //check if we are upgrading from pre-generation stamp version. - if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) { - // Link to the new file name. - to = new File(convertMetatadataFileName(to.getAbsolutePath())); - } - HardLink.createHardLink(from, to); hl.linkStats.countSingleLinks++; } @@ -687,50 +677,32 @@ static void linkBlocks(File from, File to, int oldLV, HardLink hl) if (!to.mkdirs()) throw new IOException("Cannot create directory " + to); - //If upgrading from old stuff, need to munge the filenames. That has to - //be done one file at a time, so hardlink them one at a time (slow). - if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) { - String[] blockNames = from.list(new java.io.FilenameFilter() { - public boolean accept(File dir, String name) { - return name.startsWith(BLOCK_SUBDIR_PREFIX) - || name.startsWith(BLOCK_FILE_PREFIX) - || name.startsWith(COPY_FILE_PREFIX); - } - }); - if (blockNames.length == 0) { - hl.linkStats.countEmptyDirs++; + String[] blockNames = from.list(new java.io.FilenameFilter() { + public boolean accept(File dir, String name) { + return name.startsWith(BLOCK_FILE_PREFIX); } - else for(int i = 0; i < blockNames.length; i++) - linkBlocks(new File(from, blockNames[i]), - new File(to, blockNames[i]), oldLV, hl); - } - else { - //If upgrading from a relatively new version, we only need to create - //links with the same filename. This can be done in bulk (much faster). - String[] blockNames = from.list(new java.io.FilenameFilter() { + }); + + // Block files just need hard links with the same file names + // but a different directory + if (blockNames.length > 0) { + HardLink.createHardLinkMult(from, blockNames, to); + hl.linkStats.countMultLinks++; + hl.linkStats.countFilesMultLinks += blockNames.length; + } else { + hl.linkStats.countEmptyDirs++; + } + + // Now take care of the rest of the files and subdirectories + String[] otherNames = from.list(new java.io.FilenameFilter() { public boolean accept(File dir, String name) { - return name.startsWith(BLOCK_FILE_PREFIX); + return name.startsWith(BLOCK_SUBDIR_PREFIX) + || name.startsWith(COPY_FILE_PREFIX); } }); - if (blockNames.length > 0) { - HardLink.createHardLinkMult(from, blockNames, to); - hl.linkStats.countMultLinks++; - hl.linkStats.countFilesMultLinks += blockNames.length; - } else { - hl.linkStats.countEmptyDirs++; - } - - //now take care of the rest of the files and subdirectories - String[] otherNames = from.list(new java.io.FilenameFilter() { - public boolean accept(File dir, String name) { - return name.startsWith(BLOCK_SUBDIR_PREFIX) - || name.startsWith(COPY_FILE_PREFIX); - } - }); - for(int i = 0; i < otherNames.length; i++) - linkBlocks(new File(from, otherNames[i]), - new File(to, otherNames[i]), oldLV, hl); - } + for(int i = 0; i < otherNames.length; i++) + linkBlocks(new File(from, otherNames[i]), + new File(to, otherNames[i]), oldLV, hl); } private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um, @@ -741,22 +713,6 @@ private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um, um.initializeUpgrade(nsInfo); } - /** - * This is invoked on target file names when upgrading from pre generation - * stamp version (version -13) to correct the metatadata file name. - * @param oldFileName - * @return the new metadata file name with the default generation stamp. - */ - private static String convertMetatadataFileName(String oldFileName) { - Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); - if (matcher.matches()) { - //return the current metadata file name - return DatanodeUtil.getMetaFileName(matcher.group(1), - GenerationStamp.GRANDFATHER_GENERATION_STAMP); - } - return oldFileName; - } - /** * Add bpStorage into bpStorageMap */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index da9bc79048..995840066d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -168,13 +168,13 @@ public void run() { ++opsProcessed; } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0); } catch (Throwable t) { - LOG.error(datanode.getMachineName() + ":DataXceiver error processing " + + LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dest: " + localAddress, t); } finally { if (LOG.isDebugEnabled()) { - LOG.debug(datanode.getMachineName() + ":Number of active connections is: " + LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount()); } updateCurrentThreadName("Cleaning up"); @@ -352,7 +352,7 @@ public void writeBlock(final ExtendedBlock block, if (targets.length > 0) { InetSocketAddress mirrorTarget = null; // Connect to backup machine - mirrorNode = targets[0].getName(); + mirrorNode = targets[0].getXferAddr(); mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorSock = datanode.newSocket(); try { @@ -667,8 +667,8 @@ public void replaceBlock(final ExtendedBlock block, try { // get the output stream to the proxy - InetSocketAddress proxyAddr = NetUtils.createSocketAddr( - proxySource.getName()); + InetSocketAddress proxyAddr = + NetUtils.createSocketAddr(proxySource.getXferAddr()); proxySock = datanode.newSocket(); NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout); proxySock.setSoTimeout(dnConf.socketTimeout); @@ -820,7 +820,7 @@ private void checkAccess(DataOutputStream out, final boolean reply, if (mode == BlockTokenSecretManager.AccessMode.WRITE) { DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk.getBlockPoolId()); - resp.setFirstBadLink(dnR.getName()); + resp.setFirstBadLink(dnR.getXferAddr()); } resp.build().writeDelimitedTo(out); out.flush(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index 8c932d71cc..f32b2968f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -152,11 +152,11 @@ public void run() { // another thread closed our listener socket - that's expected during shutdown, // but not in other circumstances if (datanode.shouldRun) { - LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace); + LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace); } } catch (IOException ie) { IOUtils.closeSocket(s); - LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie); + LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie); } catch (OutOfMemoryError ie) { IOUtils.closeSocket(s); // DataNode can run out of memory if there is too many transfers. @@ -169,7 +169,7 @@ public void run() { // ignore } } catch (Throwable te) { - LOG.error(datanode.getMachineName() + LOG.error(datanode.getDisplayName() + ":DataXceiverServer: Exiting due to: ", te); datanode.shouldRun = false; } @@ -177,7 +177,7 @@ public void run() { try { ss.close(); } catch (IOException ie) { - LOG.warn(datanode.getMachineName() + LOG.warn(datanode.getDisplayName() + " :DataXceiverServer: close exception", ie); } } @@ -188,7 +188,7 @@ void kill() { try { this.ss.close(); } catch (IOException ie) { - LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie); + LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie); } // close all the sockets that were accepted earlier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index 3682ccb7ad..62a2f53ac7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -136,10 +136,8 @@ static void generateDirectoryStructure(JspWriter out, out.print("Empty file"); } else { DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); - String fqdn = canonicalize(chosenNode.getHost()); - String datanodeAddr = chosenNode.getName(); - int datanodePort = Integer.parseInt(datanodeAddr.substring( - datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); + String fqdn = canonicalize(chosenNode.getIpAddr()); + int datanodePort = chosenNode.getXferPort(); String redirectLocation = "http://" + fqdn + ":" + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" + firstBlock.getBlock().getBlockId() + "&blockSize=" @@ -313,7 +311,7 @@ static void generateFileDetails(JspWriter out, dfs.close(); return; } - String fqdn = canonicalize(chosenNode.getHost()); + String fqdn = canonicalize(chosenNode.getIpAddr()); String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort() + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "&namenodeInfoPort=" + namenodeInfoPort @@ -360,10 +358,9 @@ static void generateFileDetails(JspWriter out, out.print("" + blockidstring + ":"); DatanodeInfo[] locs = cur.getLocations(); for (int j = 0; j < locs.length; j++) { - String datanodeAddr = locs[j].getName(); - datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr - .indexOf(':') + 1, datanodeAddr.length())); - fqdn = canonicalize(locs[j].getHost()); + String datanodeAddr = locs[j].getXferAddr(); + datanodePort = locs[j].getXferPort(); + fqdn = canonicalize(locs[j].getIpAddr()); String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort() + "/browseBlock.jsp?blockId=" + blockidstring + "&blockSize=" + blockSize @@ -519,10 +516,8 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req, nextStartOffset = 0; nextBlockSize = nextBlock.getBlock().getNumBytes(); DatanodeInfo d = JspHelper.bestNode(nextBlock, conf); - String datanodeAddr = d.getName(); - nextDatanodePort = Integer.parseInt(datanodeAddr.substring( - datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); - nextHost = d.getHost(); + nextDatanodePort = d.getXferPort(); + nextHost = d.getIpAddr(); nextPort = d.getInfoPort(); } } @@ -573,10 +568,8 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req, prevStartOffset = 0; prevBlockSize = prevBlock.getBlock().getNumBytes(); DatanodeInfo d = JspHelper.bestNode(prevBlock, conf); - String datanodeAddr = d.getName(); - prevDatanodePort = Integer.parseInt(datanodeAddr.substring( - datanodeAddr.indexOf(':') + 1, datanodeAddr.length())); - prevHost = d.getHost(); + prevDatanodePort = d.getXferPort(); + prevHost = d.getIpAddr(); prevPort = d.getInfoPort(); } } @@ -693,7 +686,8 @@ static void generateFileChunksForTail(JspWriter out, HttpServletRequest req, dfs.close(); return; } - InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName()); + InetSocketAddress addr = + NetUtils.createSocketAddr(chosenNode.getXferAddr()); // view the last chunkSizeToView bytes while Tailing final long startOffset = blockSize >= chunkSizeToView ? blockSize - chunkSizeToView : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java index 9ada40fd5f..6b080013fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java @@ -55,7 +55,7 @@ synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException { if( ! super.initializeUpgrade()) return; // distr upgrade is not needed DataNode.LOG.info("\n Distributed upgrade for DataNode " - + dataNode.getMachineName() + + dataNode.getDisplayName() + " version " + getUpgradeVersion() + " to current LV " + HdfsConstants.LAYOUT_VERSION + " is initialized."); UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first(); @@ -113,7 +113,7 @@ public synchronized boolean startUpgrade() throws IOException { upgradeDaemon = new Daemon(curUO); upgradeDaemon.start(); DataNode.LOG.info("\n Distributed upgrade for DataNode " - + dataNode.getMachineName() + + dataNode.getDisplayName() + " version " + getUpgradeVersion() + " to current LV " + HdfsConstants.LAYOUT_VERSION + " is started."); return true; @@ -128,7 +128,7 @@ synchronized void processUpgradeCommand(UpgradeCommand command if(startUpgrade()) // upgrade started return; throw new IOException( - "Distributed upgrade for DataNode " + dataNode.getMachineName() + "Distributed upgrade for DataNode " + dataNode.getDisplayName() + " version " + getUpgradeVersion() + " to current LV " + HdfsConstants.LAYOUT_VERSION + " cannot be started. " + "The upgrade object is not defined."); @@ -143,7 +143,7 @@ public synchronized void completeUpgrade() throws IOException { currentUpgrades = null; upgradeDaemon = null; DataNode.LOG.info("\n Distributed upgrade for DataNode " - + dataNode.getMachineName() + + dataNode.getDisplayName() + " version " + getUpgradeVersion() + " to current LV " + HdfsConstants.LAYOUT_VERSION + " is complete."); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index aa6b70cf4e..ad8ddc0628 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.util.Holder; -import org.apache.hadoop.io.IOUtils; import com.google.common.base.Joiner; @@ -231,37 +230,13 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // get name and replication final short replication = fsNamesys.getBlockManager( ).adjustReplication(addCloseOp.replication); - PermissionStatus permissions = fsNamesys.getUpgradePermission(); - if (addCloseOp.permissions != null) { - permissions = addCloseOp.permissions; - } - long blockSize = addCloseOp.blockSize; - - // Versions of HDFS prior to 0.17 may log an OP_ADD transaction - // which includes blocks in it. When we update the minimum - // upgrade version to something more recent than 0.17, we can - // simplify this code by asserting that OP_ADD transactions - // don't have any blocks. - - // Older versions of HDFS does not store the block size in inode. - // If the file has more than one block, use the size of the - // first block as the blocksize. Otherwise use the default - // block size. - if (-8 <= logVersion && blockSize == 0) { - if (addCloseOp.blocks.length > 1) { - blockSize = addCloseOp.blocks[0].getNumBytes(); - } else { - long first = ((addCloseOp.blocks.length == 1)? - addCloseOp.blocks[0].getNumBytes(): 0); - blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first); - } - } + assert addCloseOp.blocks.length == 0; // add to the file tree newFile = (INodeFile)fsDir.unprotectedAddFile( - addCloseOp.path, permissions, + addCloseOp.path, addCloseOp.permissions, replication, addCloseOp.mtime, - addCloseOp.atime, blockSize, + addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine); fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path); @@ -373,12 +348,7 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_MKDIR: { MkdirOp mkdirOp = (MkdirOp)op; - PermissionStatus permissions = fsNamesys.getUpgradePermission(); - if (mkdirOp.permissions != null) { - permissions = mkdirOp.permissions; - } - - fsDir.unprotectedMkdir(mkdirOp.path, permissions, + fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions, mkdirOp.timestamp); break; } @@ -493,9 +463,6 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, // no data in here currently. break; } - case OP_DATANODE_ADD: - case OP_DATANODE_REMOVE: - break; default: throw new IOException("Invalid operation read " + op.opCode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 949554dbda..3c9dab4b0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -30,11 +30,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; -import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.util.PureJavaCrc32; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*; @@ -81,8 +78,6 @@ protected EnumMap initialValue() { instances.put(OP_DELETE, new DeleteOp()); instances.put(OP_MKDIR, new MkdirOp()); instances.put(OP_SET_GENSTAMP, new SetGenstampOp()); - instances.put(OP_DATANODE_ADD, new DatanodeAddOp()); - instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp()); instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp()); instances.put(OP_SET_OWNER, new SetOwnerOp()); instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp()); @@ -147,7 +142,6 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin PermissionStatus permissions; String clientName; String clientMachine; - //final DatanodeDescriptor[] dataNodeDescriptors; UNUSED private AddCloseOp(FSEditLogOpCodes opCode) { super(opCode); @@ -226,13 +220,10 @@ void writeFields(DataOutputStream out) throws IOException { @Override void readFields(DataInputStream in, int logVersion) throws IOException { - // versions > 0 support per file replication - // get name and replication if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); } - if (-7 == logVersion && length != 3|| - -17 < logVersion && logVersion < -7 && length != 4 || + if ((-17 < logVersion && length != 4) || (logVersion <= -17 && length != 5 && !LayoutVersion.supports( Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) { throw new IOException("Incorrect data format." + @@ -259,49 +250,26 @@ void readFields(DataInputStream in, int logVersion) } else { this.atime = 0; } - if (logVersion < -7) { - if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { - this.blockSize = FSImageSerialization.readLong(in); - } else { - this.blockSize = readLong(in); - } + + if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { + this.blockSize = FSImageSerialization.readLong(in); } else { - this.blockSize = 0; + this.blockSize = readLong(in); } - // get blocks this.blocks = readBlocks(in, logVersion); - - if (logVersion <= -11) { - this.permissions = PermissionStatus.read(in); - } else { - this.permissions = null; - } + this.permissions = PermissionStatus.read(in); // clientname, clientMachine and block locations of last block. - if (this.opCode == OP_ADD && logVersion <= -12) { + if (this.opCode == OP_ADD) { this.clientName = FSImageSerialization.readString(in); this.clientMachine = FSImageSerialization.readString(in); - if (-13 <= logVersion) { - readDatanodeDescriptorArray(in); - } } else { this.clientName = ""; this.clientMachine = ""; } } - /** This method is defined for compatibility reason. */ - private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in) - throws IOException { - DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()]; - for (int i = 0; i < locations.length; i++) { - locations[i] = new DatanodeDescriptor(); - locations[i].readFieldsFromFSEditLog(in); - } - return locations; - } - private static Block[] readBlocks( DataInputStream in, int logVersion) throws IOException { @@ -309,14 +277,7 @@ private static Block[] readBlocks( Block[] blocks = new Block[numBlocks]; for (int i = 0; i < numBlocks; i++) { Block blk = new Block(); - if (logVersion <= -14) { - blk.readFields(in); - } else { - BlockTwo oldblk = new BlockTwo(); - oldblk.readFields(in); - blk.set(oldblk.blkid, oldblk.len, - GenerationStamp.GRANDFATHER_GENERATION_STAMP); - } + blk.readFields(in); blocks[i] = blk; } return blocks; @@ -788,17 +749,14 @@ void writeFields(DataOutputStream out) throws IOException { } @Override - void readFields(DataInputStream in, int logVersion) - throws IOException { - + void readFields(DataInputStream in, int logVersion) throws IOException { if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { this.length = in.readInt(); } if (-17 < logVersion && length != 2 || logVersion <= -17 && length != 3 && !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { - throw new IOException("Incorrect data format. " - + "Mkdir operation."); + throw new IOException("Incorrect data format. Mkdir operation."); } this.path = FSImageSerialization.readString(in); if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { @@ -811,7 +769,6 @@ void readFields(DataInputStream in, int logVersion) // However, currently this is not being updated/used because of // performance reasons. if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) { - /* unused this.atime = */ if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) { FSImageSerialization.readLong(in); } else { @@ -819,11 +776,7 @@ void readFields(DataInputStream in, int logVersion) } } - if (logVersion <= -11) { - this.permissions = PermissionStatus.read(in); - } else { - this.permissions = null; - } + this.permissions = PermissionStatus.read(in); } @Override @@ -888,77 +841,6 @@ public String toString() { } } - @SuppressWarnings("deprecation") - static class DatanodeAddOp extends FSEditLogOp { - private DatanodeAddOp() { - super(OP_DATANODE_ADD); - } - - static DatanodeAddOp getInstance() { - return (DatanodeAddOp)opInstances.get() - .get(OP_DATANODE_ADD); - } - - @Override - void writeFields(DataOutputStream out) throws IOException { - throw new IOException("Deprecated, should not write"); - } - - @Override - void readFields(DataInputStream in, int logVersion) - throws IOException { - //Datanodes are not persistent any more. - FSImageSerialization.DatanodeImage.skipOne(in); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("DatanodeAddOp [opCode="); - builder.append(opCode); - builder.append(", txid="); - builder.append(txid); - builder.append("]"); - return builder.toString(); - } - } - - @SuppressWarnings("deprecation") - static class DatanodeRemoveOp extends FSEditLogOp { - private DatanodeRemoveOp() { - super(OP_DATANODE_REMOVE); - } - - static DatanodeRemoveOp getInstance() { - return (DatanodeRemoveOp)opInstances.get() - .get(OP_DATANODE_REMOVE); - } - - @Override - void writeFields(DataOutputStream out) throws IOException { - throw new IOException("Deprecated, should not write"); - } - - @Override - void readFields(DataInputStream in, int logVersion) - throws IOException { - DatanodeID nodeID = new DatanodeID(); - nodeID.readFields(in); - //Datanodes are not persistent any more. - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("DatanodeRemoveOp [opCode="); - builder.append(opCode); - builder.append(", txid="); - builder.append(txid); - builder.append("]"); - return builder.toString(); - } - } - static class SetPermissionsOp extends FSEditLogOp { String src; FsPermission permissions; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 1f809c12b2..c08a5a92a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -36,8 +36,8 @@ public enum FSEditLogOpCodes { OP_DELETE ((byte) 2), OP_MKDIR ((byte) 3), OP_SET_REPLICATION ((byte) 4), - @Deprecated OP_DATANODE_ADD ((byte) 5), - @Deprecated OP_DATANODE_REMOVE((byte) 6), + @Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete + @Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete OP_SET_PERMISSIONS ((byte) 7), OP_SET_OWNER ((byte) 8), OP_CLOSE ((byte) 9), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index e029b24022..f666f35b74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -131,34 +131,22 @@ void load(File curFile) DataInputStream in = new DataInputStream(fin); try { - /* - * Note: Remove any checks for version earlier than - * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get - * to here with older images. - */ - - /* - * TODO we need to change format of the image file - * it should not contain version and namespace fields - */ // read image version: first appeared in version -1 int imgVersion = in.readInt(); - if(getLayoutVersion() != imgVersion) + if (getLayoutVersion() != imgVersion) { throw new InconsistentFSStateException(curFile, "imgVersion " + imgVersion + " expected to be " + getLayoutVersion()); + } // read namespaceID: first appeared in version -2 in.readInt(); - // read number of files - long numFiles = readNumFiles(in); + long numFiles = in.readLong(); // read in the last generation stamp. - if (imgVersion <= -12) { - long genstamp = in.readLong(); - namesystem.setGenerationStamp(genstamp); - } + long genstamp = in.readLong(); + namesystem.setGenerationStamp(genstamp); // read the transaction ID of the last edit represented by // this image @@ -167,7 +155,6 @@ void load(File curFile) } else { imgTxId = 0; } - // read compression related info FSImageCompression compression; @@ -189,13 +176,9 @@ void load(File curFile) loadFullNameINodes(numFiles, in); } - // load datanode info - this.loadDatanodes(in); + loadFilesUnderConstruction(in); - // load Files Under Construction - this.loadFilesUnderConstruction(in); - - this.loadSecretManagerState(in); + loadSecretManagerState(in); // make sure to read to the end of file int eof = in.read(); @@ -335,89 +318,44 @@ private INode loadINode(DataInputStream in) if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) { atime = in.readLong(); } - if (imgVersion <= -8) { - blockSize = in.readLong(); - } + blockSize = in.readLong(); int numBlocks = in.readInt(); BlockInfo blocks[] = null; - // for older versions, a blocklist of size 0 - // indicates a directory. - if ((-9 <= imgVersion && numBlocks > 0) || - (imgVersion < -9 && numBlocks >= 0)) { + if (numBlocks >= 0) { blocks = new BlockInfo[numBlocks]; for (int j = 0; j < numBlocks; j++) { blocks[j] = new BlockInfo(replication); - if (-14 < imgVersion) { - blocks[j].set(in.readLong(), in.readLong(), - GenerationStamp.GRANDFATHER_GENERATION_STAMP); - } else { - blocks[j].readFields(in); - } - } - } - // Older versions of HDFS does not store the block size in inode. - // If the file has more than one block, use the size of the - // first block as the blocksize. Otherwise use the default block size. - // - if (-8 <= imgVersion && blockSize == 0) { - if (numBlocks > 1) { - blockSize = blocks[0].getNumBytes(); - } else { - long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0); - blockSize = Math.max(namesystem.getDefaultBlockSize(), first); + blocks[j].readFields(in); } } // get quota only when the node is a directory long nsQuota = -1L; - if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion) - && blocks == null && numBlocks == -1) { - nsQuota = in.readLong(); - } - long dsQuota = -1L; - if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion) - && blocks == null && numBlocks == -1) { - dsQuota = in.readLong(); - } - - // Read the symlink only when the node is a symlink - String symlink = ""; - if (numBlocks == -2) { - symlink = Text.readString(in); - } - - PermissionStatus permissions = namesystem.getUpgradePermission(); - if (imgVersion <= -11) { - permissions = PermissionStatus.read(in); - } - - return INode.newINode(permissions, blocks, symlink, replication, - modificationTime, atime, nsQuota, dsQuota, blockSize); + if (blocks == null && numBlocks == -1) { + nsQuota = in.readLong(); + } + long dsQuota = -1L; + if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion) + && blocks == null && numBlocks == -1) { + dsQuota = in.readLong(); } - private void loadDatanodes(DataInputStream in) - throws IOException { - int imgVersion = getLayoutVersion(); - - if (imgVersion > -3) // pre datanode image version - return; - if (imgVersion <= -12) { - return; // new versions do not store the datanodes any more. - } - int size = in.readInt(); - for(int i = 0; i < size; i++) { - // We don't need to add these descriptors any more. - FSImageSerialization.DatanodeImage.skipOne(in); - } + // Read the symlink only when the node is a symlink + String symlink = ""; + if (numBlocks == -2) { + symlink = Text.readString(in); } + + PermissionStatus permissions = PermissionStatus.read(in); + + return INode.newINode(permissions, blocks, symlink, replication, + modificationTime, atime, nsQuota, dsQuota, blockSize); + } private void loadFilesUnderConstruction(DataInputStream in) throws IOException { FSDirectory fsDir = namesystem.dir; - int imgVersion = getLayoutVersion(); - if (imgVersion > -13) // pre lease image version - return; int size = in.readInt(); LOG.info("Number of files under construction = " + size); @@ -457,17 +395,6 @@ private int getLayoutVersion() { return namesystem.getFSImage().getStorage().getLayoutVersion(); } - private long readNumFiles(DataInputStream in) - throws IOException { - int imgVersion = getLayoutVersion(); - - if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) { - return in.readLong(); - } else { - return in.readInt(); - } - } - private boolean isRoot(byte[][] path) { return path.length == 1 && path[0] == null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index f5084339e8..d6453fa8b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.DataInput; import java.io.DataInputStream; -import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; @@ -31,7 +29,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -39,7 +36,6 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.ShortWritable; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; /** @@ -107,13 +103,10 @@ static INodeFileUnderConstruction readINodeUnderConstruction( String clientName = readString(in); String clientMachine = readString(in); - // These locations are not used at all + // We previously stored locations for the last block, now we + // just record that there are none int numLocs = in.readInt(); - DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs]; - for (i = 0; i < numLocs; i++) { - locations[i] = new DatanodeDescriptor(); - locations[i].readFields(in); - } + assert numLocs == 0 : "Unexpected block locations"; return new INodeFileUnderConstruction(name, blockReplication, @@ -320,53 +313,4 @@ public static Block[] readCompactBlockArray( } return ret; } - - /** - * DatanodeImage is used to store persistent information - * about datanodes into the fsImage. - */ - static class DatanodeImage implements Writable { - DatanodeDescriptor node = new DatanodeDescriptor(); - - static void skipOne(DataInput in) throws IOException { - DatanodeImage nodeImage = new DatanodeImage(); - nodeImage.readFields(in); - } - - ///////////////////////////////////////////////// - // Writable - ///////////////////////////////////////////////// - /** - * Public method that serializes the information about a - * Datanode to be stored in the fsImage. - */ - public void write(DataOutput out) throws IOException { - new DatanodeID(node).write(out); - out.writeLong(node.getCapacity()); - out.writeLong(node.getRemaining()); - out.writeLong(node.getLastUpdate()); - out.writeInt(node.getXceiverCount()); - } - - /** - * Public method that reads a serialized Datanode - * from the fsImage. - */ - public void readFields(DataInput in) throws IOException { - DatanodeID id = new DatanodeID(); - id.readFields(in); - long capacity = in.readLong(); - long remaining = in.readLong(); - long lastUpdate = in.readLong(); - int xceiverCount = in.readInt(); - - // update the DatanodeDescriptor with the data we read in - node.updateRegInfo(id); - node.setStorageID(id.getStorageID()); - node.setCapacity(capacity); - node.setRemaining(remaining); - node.setLastUpdate(lastUpdate); - node.setXceiverCount(xceiverCount); - } - } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9d647f2353..06e613aee0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -52,8 +52,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY; @@ -118,7 +116,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.ServiceFailedException; -import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -267,7 +264,6 @@ private static final void logAuditEvent(UserGroupInformation ugi, private boolean persistBlocks; private UserGroupInformation fsOwner; private String supergroup; - private PermissionStatus defaultPermission; private boolean standbyShouldCheckpoint; // Scan interval is not configurable. @@ -846,11 +842,6 @@ private void setConfigurationParameters(Configuration conf) "must not be specified if HA is not enabled."); } - short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY, - DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT); - this.defaultPermission = PermissionStatus.createImmutable( - fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission)); - this.serverDefaults = new FsServerDefaults( conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT), @@ -878,14 +869,6 @@ private void setConfigurationParameters(Configuration conf) DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT); } - /** - * Return the default path permission when upgrading from releases with no - * permissions (<=0.15) to releases with permissions (>=0.16) - */ - protected PermissionStatus getUpgradePermission() { - return defaultPermission; - } - NamespaceInfo getNamespaceInfo() { readLock(); try { @@ -5072,6 +5055,8 @@ public String getLiveNodes() { innerinfo.put("lastContact", getLastContact(node)); innerinfo.put("usedSpace", getDfsUsed(node)); innerinfo.put("adminState", node.getAdminState().toString()); + innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed()); + innerinfo.put("capacity", node.getCapacity()); info.put(node.getHostName(), innerinfo); } return JSON.toString(info); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 321d0398c5..9c9b0fdda0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -59,7 +59,7 @@ private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host, HttpServletRequest request, NameNode nn) throws IOException { final String hostname = host instanceof DatanodeInfo - ? ((DatanodeInfo)host).getHostName() : host.getHost(); + ? ((DatanodeInfo)host).getHostName() : host.getIpAddr(); final String scheme = request.getScheme(); final int port = "https".equals(scheme) ? (Integer)getServletContext().getAttribute("datanode.https.port") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java index 1604ec128b..c8ccca16d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java @@ -59,7 +59,7 @@ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus st if (host instanceof DatanodeInfo) { hostname = ((DatanodeInfo)host).getHostName(); } else { - hostname = host.getHost(); + hostname = host.getIpAddr(); } final int port = "https".equals(scheme) ? (Integer)getServletContext().getAttribute("datanode.https.port") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 61ad180e86..ca4ab24c21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -854,7 +854,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg, BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks()); if(stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*BLOCK* NameNode.blockReport: " - + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks() + + "from " + nodeReg + " " + blist.getNumberOfBlocks() + " blocks"); } @@ -870,7 +870,7 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, verifyRequest(nodeReg); if(stateChangeLog.isDebugEnabled()) { stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " - +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length + +"from "+nodeReg+" "+receivedAndDeletedBlocks.length +" blocks."); } namesystem.getBlockManager().processIncrementalBlockReport( @@ -880,7 +880,8 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, @Override // DatanodeProtocol public void errorReport(DatanodeRegistration nodeReg, int errorCode, String msg) throws IOException { - String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); + String dnName = + (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString(); if (errorCode == DatanodeProtocol.NOTIFY) { LOG.info("Error report from " + dnName + ": " + msg); @@ -909,13 +910,10 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept } /** - * Verify request. + * Verifies the given registration. * - * Verifies correctness of the datanode version, registration ID, and - * if the datanode does not need to be shutdown. - * - * @param nodeReg data node registration - * @throws IOException + * @param nodeReg node registration + * @throws UnregisteredNodeException if the registration is invalid */ void verifyRequest(NodeRegistration nodeReg) throws IOException { verifyVersion(nodeReg.getVersion()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 8763c93e3b..7cb868b179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -496,7 +496,7 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock, try { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); - targetAddr = NetUtils.createSocketAddr(chosenNode.getName()); + targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) { if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { throw new IOException("Could not obtain block " + lblock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 44c07510ba..2dfa59751f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -260,14 +260,14 @@ void generateHealthReport(JspWriter out, NameNode nn, // Find out common suffix. Should this be before or after the sort? String port_suffix = null; if (live.size() > 0) { - String name = live.get(0).getName(); + String name = live.get(0).getXferAddr(); int idx = name.indexOf(':'); if (idx > 0) { port_suffix = name.substring(idx); } for (int i = 1; port_suffix != null && i < live.size(); i++) { - if (live.get(i).getName().endsWith(port_suffix) == false) { + if (live.get(i).getXferAddr().endsWith(port_suffix) == false) { port_suffix = null; break; } @@ -404,7 +404,7 @@ static void redirectToRandomDataNode(ServletContext context, final String nodeToRedirect; int redirectPort; if (datanode != null) { - nodeToRedirect = datanode.getHost(); + nodeToRedirect = datanode.getIpAddr(); redirectPort = datanode.getInfoPort(); } else { nodeToRedirect = nn.getHttpAddress().getHostName(); @@ -466,14 +466,14 @@ private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d, + URLEncoder.encode("/", "UTF-8") + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr); - String name = d.getHostName() + ":" + d.getPort(); + String name = d.getXferAddrWithHostname(); if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*")) name = name.replaceAll("\\.[^.:]*", ""); int idx = (suffix != null && name.endsWith(suffix)) ? name .indexOf(suffix) : -1; - out.print(rowTxt() + "" + out.print(rowTxt() + "" + ((idx > 0) ? name.substring(0, idx) : name) + "" + ((alive) ? "" : "\n")); } @@ -599,14 +599,14 @@ void generateNodesList(ServletContext context, JspWriter out, // Find out common suffix. Should this be before or after the sort? String port_suffix = null; if (live.size() > 0) { - String name = live.get(0).getName(); + String name = live.get(0).getXferAddr(); int idx = name.indexOf(':'); if (idx > 0) { port_suffix = name.substring(idx); } for (int i = 1; port_suffix != null && i < live.size(); i++) { - if (live.get(i).getName().endsWith(port_suffix) == false) { + if (live.get(i).getXferAddr().endsWith(port_suffix) == false) { port_suffix = null; break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index c36fb69ee4..9439c631d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -80,9 +80,8 @@ public interface DatanodeProtocol { * * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration) * @param registration datanode registration information - * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains - * new storageID if the datanode did not have one and - * registration ID for further communication. + * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with + * updated registration information */ public DatanodeRegistration registerDatanode(DatanodeRegistration registration ) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java index d21b92ed1b..228fb62262 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java @@ -49,8 +49,8 @@ public class DatanodeRegistration extends DatanodeID }); } - public StorageInfo storageInfo; - public ExportedBlockKeys exportedKeys; + private StorageInfo storageInfo; + private ExportedBlockKeys exportedKeys; /** * Default constructor. @@ -62,8 +62,8 @@ public DatanodeRegistration() { /** * Create DatanodeRegistration */ - public DatanodeRegistration(String nodeName) { - this(nodeName, new StorageInfo(), new ExportedBlockKeys()); + public DatanodeRegistration(String ipAddr) { + this(ipAddr, new StorageInfo(), new ExportedBlockKeys()); } public DatanodeRegistration(DatanodeID dn, StorageInfo info, @@ -73,9 +73,9 @@ public DatanodeRegistration(DatanodeID dn, StorageInfo info, this.exportedKeys = keys; } - public DatanodeRegistration(String nodeName, StorageInfo info, + public DatanodeRegistration(String ipAddr, StorageInfo info, ExportedBlockKeys keys) { - super(nodeName); + super(ipAddr); this.storageInfo = info; this.exportedKeys = keys; } @@ -83,7 +83,19 @@ public DatanodeRegistration(String nodeName, StorageInfo info, public void setStorageInfo(StorageInfo storage) { this.storageInfo = new StorageInfo(storage); } - + + public StorageInfo getStorageInfo() { + return storageInfo; + } + + public void setExportedKeys(ExportedBlockKeys keys) { + this.exportedKeys = keys; + } + + public ExportedBlockKeys getExportedKeys() { + return exportedKeys; + } + @Override // NodeRegistration public int getVersion() { return storageInfo.getLayoutVersion(); @@ -96,13 +108,13 @@ public String getRegistrationID() { @Override // NodeRegistration public String getAddress() { - return getName(); + return getXferAddr(); } @Override public String toString() { return getClass().getSimpleName() - + "(" + name + + "(" + ipAddr + ", storageID=" + storageID + ", infoPort=" + infoPort + ", ipcPort=" + ipcPort diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java index a22ef5fca0..a68f1c2c8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java @@ -38,6 +38,6 @@ public class DisallowedDatanodeException extends IOException { private static final long serialVersionUID = 1L; public DisallowedDatanodeException(DatanodeID nodeID) { - super("Datanode denied communication with namenode: " + nodeID.getName()); + super("Datanode denied communication with namenode: " + nodeID); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java index a33854b883..a684418545 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java @@ -28,7 +28,7 @@ public interface NodeRegistration { /** * Get address of the server node. - * @return hostname:portNumber + * @return ipAddr:portNumber */ public String getAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 970f1dc610..d27b664f6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -280,10 +280,11 @@ private static Map toJsonMap(final DatanodeInfo datanodeinfo) { } final Map m = new TreeMap(); - m.put("name", datanodeinfo.getName()); + m.put("ipAddr", datanodeinfo.getIpAddr()); + m.put("hostName", datanodeinfo.getHostName()); m.put("storageID", datanodeinfo.getStorageID()); + m.put("xferPort", datanodeinfo.getXferPort()); m.put("infoPort", datanodeinfo.getInfoPort()); - m.put("ipcPort", datanodeinfo.getIpcPort()); m.put("capacity", datanodeinfo.getCapacity()); @@ -293,7 +294,6 @@ private static Map toJsonMap(final DatanodeInfo datanodeinfo) { m.put("lastUpdate", datanodeinfo.getLastUpdate()); m.put("xceiverCount", datanodeinfo.getXceiverCount()); m.put("networkLocation", datanodeinfo.getNetworkLocation()); - m.put("hostName", datanodeinfo.getHostName()); m.put("adminState", datanodeinfo.getAdminState().name()); return m; } @@ -306,7 +306,9 @@ private static DatanodeInfo toDatanodeInfo(final Map m) { return new DatanodeInfo( (String)m.get("name"), + (String)m.get("hostName"), (String)m.get("storageID"), + (int)(long)(Long)m.get("xferPort"), (int)(long)(Long)m.get("infoPort"), (int)(long)(Long)m.get("ipcPort"), @@ -317,7 +319,6 @@ private static DatanodeInfo toDatanodeInfo(final Map m) { (Long)m.get("lastUpdate"), (int)(long)(Long)m.get("xceiverCount"), (String)m.get("networkLocation"), - (String)m.get("hostName"), AdminStates.valueOf((String)m.get("adminState"))); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index cc45593b29..a0b055642f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -48,10 +48,12 @@ message BlockTokenIdentifierProto { * Identifies a Datanode */ message DatanodeIDProto { - required string name = 1; // hostname:portNumber - required string storageID = 2; // Unique storage id - required uint32 infoPort = 3; // the port where the infoserver is running - required uint32 ipcPort = 4; // the port where the ipc Server is running + required string ipAddr = 1; // IP address + required string hostName = 2; // hostname + required string storageID = 3; // unique storage id + required uint32 xferPort = 4; // data streaming port + required uint32 infoPort = 5; // info server port + required uint32 ipcPort = 6; // ipc server port } /** @@ -73,7 +75,6 @@ message DatanodeInfoProto { optional uint64 lastUpdate = 6 [default = 0]; optional uint32 xceiverCount = 7 [default = 0]; optional string location = 8; - optional string hostName = 9; enum AdminState { NORMAL = 0; DECOMMISSION_INPROGRESS = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index a920865f42..80503e67ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -143,7 +143,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR Socket sock = null; ExtendedBlock block = testBlock.getBlock(); DatanodeInfo[] nodes = testBlock.getLocations(); - targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); + targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); @@ -162,7 +162,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR */ public DataNode getDataNode(LocatedBlock testBlock) { DatanodeInfo[] nodes = testBlock.getLocations(); - int ipcport = nodes[0].ipcPort; + int ipcport = nodes[0].getIpcPort(); return cluster.getDataNode(ipcport); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 7854f95f88..a11b927fce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -339,7 +339,7 @@ public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns, } /* - * Wait up to 20s for the given DN (host:port) to be decommissioned. + * Wait up to 20s for the given DN (IP:port) to be decommissioned */ public static void waitForDecommission(FileSystem fs, String name) throws IOException, InterruptedException, TimeoutException { @@ -351,7 +351,7 @@ public static void waitForDecommission(FileSystem fs, String name) Thread.sleep(1000); DistributedFileSystem dfs = (DistributedFileSystem)fs; for (DatanodeInfo info : dfs.getDataNodeStats()) { - if (name.equals(info.getName())) { + if (name.equals(info.getXferAddr())) { dn = info; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6717a01dab..35619f8fd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1041,9 +1041,9 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes, // hadoop.security.token.service.use_ip=true //since the HDFS does things based on IP:port, we need to add the mapping //for IP:port to rackId - String ipAddr = dn.getSelfAddr().getAddress().getHostAddress(); + String ipAddr = dn.getXferAddress().getAddress().getHostAddress(); if (racks != null) { - int port = dn.getSelfAddr().getPort(); + int port = dn.getXferAddress().getPort(); LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks[i-curDatanodesNum]); StaticMapping.addNodeToRack(ipAddr + ":" + port, @@ -1422,7 +1422,7 @@ public synchronized DataNodeProperties stopDataNode(int i) { DataNodeProperties dnprop = dataNodes.remove(i); DataNode dn = dnprop.datanode; LOG.info("MiniDFSCluster Stopping DataNode " + - dn.getMachineName() + + dn.getDisplayName() + " from a total of " + (dataNodes.size() + 1) + " datanodes."); dn.shutdown(); @@ -1433,16 +1433,13 @@ public synchronized DataNodeProperties stopDataNode(int i) { /* * Shutdown a datanode by name. */ - public synchronized DataNodeProperties stopDataNode(String name) { + public synchronized DataNodeProperties stopDataNode(String dnName) { int i; for (i = 0; i < dataNodes.size(); i++) { DataNode dn = dataNodes.get(i).datanode; - // get BP registration - DatanodeRegistration dnR = - DataNodeTestUtils.getDNRegistrationByMachineName(dn, name); - LOG.info("for name=" + name + " found bp=" + dnR + - "; with dnMn=" + dn.getMachineName()); - if(dnR != null) { + LOG.info("DN name=" + dnName + " found DN=" + dn + + " with name=" + dn.getDisplayName()); + if (dnName.equals(dn.getDatanodeId().getXferAddr())) { break; } } @@ -1472,9 +1469,9 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop, String[] args = dnprop.dnArgs; Configuration newconf = new HdfsConfiguration(conf); // save cloned config if (keepPort) { - InetSocketAddress addr = dnprop.datanode.getSelfAddr(); - conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" - + addr.getPort()); + InetSocketAddress addr = dnprop.datanode.getXferAddress(); + conf.set(DFS_DATANODE_ADDRESS_KEY, + addr.getAddress().getHostAddress() + ":" + addr.getPort()); } dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf), newconf, args)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index dea2ba0ba3..327dd7c7ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -220,7 +220,7 @@ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, final DataNode dn = cluster.getDataNode(dninfo.getIpcPort()); corruptBlock(block, dn); LOG.debug("Corrupted block " + block.getBlockName() + " on data node " - + dninfo.getName()); + + dninfo); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index 6673bf547b..335734d5b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -158,7 +158,7 @@ public void testSocketCache() throws IOException { testFile.toString(), 0, FILE_SIZE) .getLocatedBlocks().get(0); DataNode dn = util.getDataNode(block); - InetSocketAddress dnAddr = dn.getSelfAddr(); + InetSocketAddress dnAddr = dn.getXferAddress(); // Make some sockets to the DN Socket[] dnSockets = new Socket[CACHE_SIZE]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java index 4d614b8d18..d21592e485 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java @@ -50,7 +50,7 @@ public void testDFSAddressConfig() throws IOException { ArrayList dns = cluster.getDataNodes(); DataNode dn = dns.get(0); - String selfSocketAddr = dn.getSelfAddr().toString(); + String selfSocketAddr = dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); assertTrue(selfSocketAddr.contains("/127.0.0.1:")); @@ -75,7 +75,7 @@ public void testDFSAddressConfig() throws IOException { dns = cluster.getDataNodes(); dn = dns.get(0); - selfSocketAddr = dn.getSelfAddr().toString(); + selfSocketAddr = dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); // assert that default self socket address is 127.0.0.1 assertTrue(selfSocketAddr.contains("/127.0.0.1:")); @@ -100,7 +100,7 @@ public void testDFSAddressConfig() throws IOException { dns = cluster.getDataNodes(); dn = dns.get(0); - selfSocketAddr = dn.getSelfAddr().toString(); + selfSocketAddr = dn.getXferAddress().toString(); System.out.println("DN Self Socket Addr == " + selfSocketAddr); // assert that default self socket address is 0.0.0.0 assertTrue(selfSocketAddr.contains("/0.0.0.0:")); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 1e39b9a40d..f3b980f5fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -334,7 +334,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) { LocatedBlock badLocatedBlock = new LocatedBlock( goodLocatedBlock.getBlock(), new DatanodeInfo[] { - new DatanodeInfo(new DatanodeID("255.255.255.255:234")) + new DatanodeInfo(new DatanodeID("255.255.255.255", 234)) }, goodLocatedBlock.getStartOffset(), false); @@ -608,7 +608,7 @@ public void testGetFileChecksum() throws Exception { cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE) .getLocatedBlocks(); final DatanodeInfo first = locatedblocks.get(0).getLocations()[0]; - cluster.stopDataNode(first.getName()); + cluster.stopDataNode(first.getXferAddr()); //get checksum again final FileChecksum cs2 = fs.getFileChecksum(p); @@ -629,7 +629,7 @@ public void testClientDNProtocolTimeout() throws IOException { final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = new DatanodeID( - "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 567fbabddd..ba92c569d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -52,7 +52,6 @@ public class TestDFSUpgradeFromImage extends TestCase { .getLog(TestDFSUpgradeFromImage.class); private static File TEST_ROOT_DIR = new File(MiniDFSCluster.getBaseDirectory()); - private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz"; private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; @@ -68,10 +67,6 @@ private static class ReferenceFileInfo { boolean printChecksum = false; - public void unpackStorage() throws IOException { - unpackStorage(HADOOP14_IMAGE); - } - private void unpackStorage(String tarFileName) throws IOException { String tarFile = System.getProperty("test.cache.data", "build/test/cache") @@ -227,14 +222,6 @@ public void testFailOnPreUpgradeImage() throws IOException { } } - /** - * Test upgrade from an 0.14 image - */ - public void testUpgradeFromRel14Image() throws IOException { - unpackStorage(); - upgradeAndVerify(); - } - /** * Test upgrade from 0.22 image */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index af0bf6a19d..71ad9afa95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -128,8 +128,7 @@ private void sendRecvData(String testDescription, if (eofExpected) { throw new IOException("Did not recieve IOException when an exception " + - "is expected while reading from " + - datanode.getName()); + "is expected while reading from " + datanode); } byte[] needed = recvBuf.toByteArray(); @@ -215,7 +214,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n String poolId = cluster.getNamesystem().getBlockPoolId(); datanode = DataNodeTestUtils.getDNRegistrationForBP( cluster.getDataNodes().get(0), poolId); - dnAddr = NetUtils.createSocketAddr(datanode.getName()); + dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); FileSystem fileSys = cluster.getFileSystem(); /* Test writing to finalized replicas */ @@ -349,7 +348,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0]; - dnAddr = NetUtils.createSocketAddr(datanode.getName()); + dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); FileSystem fileSys = cluster.getFileSystem(); int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index 288d432d84..c1b775939b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -269,7 +269,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes, if (corruptReplica(block, i)) { corruptReplicasDNIDs[j++] = i; LOG.info("successfully corrupted block " + block + " on node " - + i + " " + cluster.getDataNodes().get(i).getSelfAddr()); + + i + " " + cluster.getDataNodes().get(i).getDisplayName()); } } @@ -281,7 +281,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes, for (int i = numCorruptReplicas - 1; i >= 0 ; i--) { LOG.info("restarting node with corrupt replica: position " + i + " node " + corruptReplicasDNIDs[i] + " " - + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getSelfAddr()); + + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName()); cluster.restartDataNode(corruptReplicasDNIDs[i]); } @@ -343,7 +343,7 @@ public void testTruncatedBlockReport() throws Exception { if (!changeReplicaLength(block, 0, -1)) { throw new IOException( "failed to find or change length of replica on node 0 " - + cluster.getDataNodes().get(0).getSelfAddr()); + + cluster.getDataNodes().get(0).getDisplayName()); } } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index e271bb95a8..fbe98dce5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -389,9 +389,8 @@ private void simpleTest(int datanodeToKill) throws IOException { cluster.stopDataNode(victim); } else { int victim = datanodeToKill; - System.out.println("SimpleTest stopping datanode " + - targets[victim].getName()); - cluster.stopDataNode(targets[victim].getName()); + System.out.println("SimpleTest stopping datanode " + targets[victim]); + cluster.stopDataNode(targets[victim].getXferAddr()); } System.out.println("SimpleTest stopping datanode complete"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 6997ebc2e7..877ad1841c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -151,27 +151,27 @@ private String checkFile(FileSystem fileSys, Path name, int repl, int hasdown = 0; DatanodeInfo[] nodes = blk.getLocations(); for (int j = 0; j < nodes.length; j++) { // for each replica - if (isNodeDown && nodes[j].getName().equals(downnode)) { + if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) { hasdown++; //Downnode must actually be decommissioned if (!nodes[j].isDecommissioned()) { return "For block " + blk.getBlock() + " replica on " + - nodes[j].getName() + " is given as downnode, " + + nodes[j] + " is given as downnode, " + "but is not decommissioned"; } //Decommissioned node (if any) should only be last node in list. if (j != nodes.length - 1) { return "For block " + blk.getBlock() + " decommissioned node " - + nodes[j].getName() + " was not last node in list: " + + nodes[j] + " was not last node in list: " + (j + 1) + " of " + nodes.length; } LOG.info("Block " + blk.getBlock() + " replica on " + - nodes[j].getName() + " is decommissioned."); + nodes[j] + " is decommissioned."); } else { //Non-downnodes must not be decommissioned if (nodes[j].isDecommissioned()) { return "For block " + blk.getBlock() + " replica on " + - nodes[j].getName() + " is unexpectedly decommissioned"; + nodes[j] + " is unexpectedly decommissioned"; } } } @@ -215,7 +215,7 @@ private DatanodeInfo decommissionNode(int nnIndex, found = true; } } - String nodename = info[index].getName(); + String nodename = info[index].getXferAddr(); LOG.info("Decommissioning node: " + nodename); // write nodename into the exclude file. @@ -236,7 +236,7 @@ private DatanodeInfo decommissionNode(int nnIndex, /* stop decommission of the datanode and wait for each to reach the NORMAL state */ private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException { - LOG.info("Recommissioning node: " + decommissionedNode.getName()); + LOG.info("Recommissioning node: " + decommissionedNode); writeConfigFile(excludeFile, null); refreshNodes(cluster.getNamesystem(), conf); waitNodeState(decommissionedNode, AdminStates.NORMAL); @@ -373,7 +373,7 @@ private void testDecommission(int numNamenodes, int numDatanodes) DFSClient client = getDfsClient(cluster.getNameNode(i), conf); assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length); - assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes)); + assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes)); cleanupFile(fileSys, file1); } } @@ -414,7 +414,7 @@ private void testRecommission(int numNamenodes, int numDatanodes) DFSClient client = getDfsClient(cluster.getNameNode(i), conf); assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length); - assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes)); + assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes)); // stop decommission and check if the new replicas are removed recomissionNode(decomNode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index a9000ed5fc..87848f33a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -844,7 +844,7 @@ public void testLeaseExpireHardLimit() throws Exception { LocatedBlock locatedblock = locations.getLocatedBlocks().get(0); int successcount = 0; for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) { - DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort); + DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort()); ExtendedBlock blk = locatedblock.getBlock(); Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock( blk.getBlockPoolId(), blk.getBlockId()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java index 3b617c71d5..7e2630ec30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java @@ -147,7 +147,7 @@ private void testDataNodeRedirect(Path path) throws IOException { // if we were redirected to the right DN. BlockLocation[] locations = hdfs.getFileBlockLocations(path, 0, 10); - String locationName = locations[0].getNames()[0]; + String xferAddr = locations[0].getNames()[0]; // Connect to the NN to get redirected URL u = hftpFs.getNamenodeURL( @@ -164,7 +164,7 @@ private void testDataNodeRedirect(Path path) throws IOException { for (DataNode node : cluster.getDataNodes()) { DatanodeRegistration dnR = DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId); - if (dnR.getName().equals(locationName)) { + if (dnR.getXferAddr().equals(xferAddr)) { checked = true; assertEquals(dnR.getInfoPort(), conn.getURL().getPort()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java index 3e90665590..65a0465bd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.net.NetUtils; @@ -58,8 +59,9 @@ public static void setUp() throws Exception { cluster = (new MiniDFSCluster.Builder(conf)) .numDataNodes(1).build(); nnAddress = cluster.getNameNode().getNameNodeAddress(); - dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0) - .getDatanodeId().getHost(), cluster.getDataNodes().get(0).getIpcPort()); + DataNode dn = cluster.getDataNodes().get(0); + dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(), + dn.getIpcPort()); } @AfterClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index e211d20977..c05ccee7ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -117,7 +117,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl) isOnSameRack = false; isNotOnSameRack = false; for (int i = 0; i < datanodes.length-1; i++) { - LOG.info("datanode "+ i + ": "+ datanodes[i].getName()); + LOG.info("datanode "+ i + ": "+ datanodes[i]); boolean onRack = false; for( int j=i+1; j expected, @Test public void testConvertLocatedBlock() { DatanodeInfo [] dnInfos = new DatanodeInfo[3]; - dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999, - 59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS); - dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999, - 59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED); - dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999, - 59, 69, 32, "local", "host1", AdminStates.NORMAL); + dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999, + 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS); + dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999, + 59, 69, 32, "local", AdminStates.DECOMMISSIONED); + dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999, + 59, 69, 32, "local", AdminStates.NORMAL); LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); LocatedBlockProto lbProto = PBHelper.convert(lb); @@ -423,7 +423,7 @@ public void testConvertLocatedBlock() { @Test public void testConvertDatanodeRegistration() { - DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0); + DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0); BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) }; ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys); @@ -431,8 +431,8 @@ public void testConvertDatanodeRegistration() { new StorageInfo(), expKeys); DatanodeRegistrationProto proto = PBHelper.convert(reg); DatanodeRegistration reg2 = PBHelper.convert(proto); - compare(reg.storageInfo, reg2.storageInfo); - compare(reg.exportedKeys, reg2.exportedKeys); + compare(reg.getStorageInfo(), reg2.getStorageInfo()); + compare(reg.getExportedKeys(), reg2.getExportedKeys()); compare((DatanodeID)reg, (DatanodeID)reg2); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java index 01725b1bce..ea335d2612 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java @@ -279,8 +279,8 @@ public void testBlockTokenRpcLeak() throws Exception { server.start(); final InetSocketAddress addr = NetUtils.getConnectAddress(server); - DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(), - "fake-storage", 0, addr.getPort()); + DatanodeID fakeDnId = new DatanodeID("localhost", + "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L)); LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 1ec75112f8..41dbf1368c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -165,7 +165,7 @@ public static void noticeDeadDatanode(NameNode nn, String dnName) { DatanodeDescriptor[] dnds = hbm.getDatanodes(); DatanodeDescriptor theDND = null; for (DatanodeDescriptor dnd : dnds) { - if (dnd.getName().equals(dnName)) { + if (dnd.getXferAddr().equals(dnName)) { theDND = dnd; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index dbecfe7f78..0be519dd46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -48,12 +48,12 @@ public class TestBlockManager { private final List nodes = ImmutableList.of( - new DatanodeDescriptor(new DatanodeID("h1:5020"), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h2:5020"), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h3:5020"), "/rackA"), - new DatanodeDescriptor(new DatanodeID("h4:5020"), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h5:5020"), "/rackB"), - new DatanodeDescriptor(new DatanodeID("h6:5020"), "/rackB") + new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"), + new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"), + new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB") ); private final List rackA = nodes.subList(0, 3); private final List rackB = nodes.subList(3, 6); @@ -272,7 +272,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception { // the block is still under-replicated. Add a new node. This should allow // the third off-rack replica. - DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7:5020"), "/rackC"); + DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC"); addNodes(ImmutableList.of(rackCNode)); try { DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 0a25ef7983..c9436e4f33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -137,7 +137,7 @@ private static void tryRead(Configuration conf, LocatedBlock lblock, ExtendedBlock block = lblock.getBlock(); try { DatanodeInfo[] nodes = lblock.getLocations(); - targetAddr = NetUtils.createSocketAddr(nodes[0].getName()); + targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); s = NetUtils.getDefaultSocketFactory(conf).createSocket(); s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java index d34cf1c4c6..08607093db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java @@ -28,13 +28,13 @@ public class TestHost2NodesMap { private Host2NodesMap map = new Host2NodesMap(); private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"), }; private final DatanodeDescriptor NULL_NODE = null; - private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"), + private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040), "/d1/r4"); @Before @@ -56,24 +56,11 @@ public void testContains() throws Exception { @Test public void testGetDatanodeByHost() throws Exception { - assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]); - assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]); - DatanodeDescriptor node = map.getDatanodeByHost("h3"); + assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]); + assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]); + DatanodeDescriptor node = map.getDatanodeByHost("ip3"); assertTrue(node==dataNodes[2] || node==dataNodes[3]); - assertTrue(null==map.getDatanodeByHost("h4")); - } - - @Test - public void testGetDatanodeByName() throws Exception { - assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]); - assertTrue(map.getDatanodeByName("h1:5030")==null); - assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]); - assertTrue(map.getDatanodeByName("h2:5030")==null); - assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]); - assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]); - assertTrue(map.getDatanodeByName("h3:5040")==null); - assertTrue(map.getDatanodeByName("h4")==null); - assertTrue(map.getDatanodeByName(null)==null); + assertTrue(null==map.getDatanodeByHost("ip4")); } @Test @@ -81,21 +68,21 @@ public void testRemove() throws Exception { assertFalse(map.remove(NODE)); assertTrue(map.remove(dataNodes[0])); - assertTrue(map.getDatanodeByHost("h1")==null); - assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]); - DatanodeDescriptor node = map.getDatanodeByHost("h3"); + assertTrue(map.getDatanodeByHost("ip1")==null); + assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]); + DatanodeDescriptor node = map.getDatanodeByHost("ip3"); assertTrue(node==dataNodes[2] || node==dataNodes[3]); - assertTrue(null==map.getDatanodeByHost("h4")); + assertTrue(null==map.getDatanodeByHost("ip4")); assertTrue(map.remove(dataNodes[2])); - assertTrue(map.getDatanodeByHost("h1")==null); - assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]); - assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]); + assertTrue(map.getDatanodeByHost("ip1")==null); + assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]); + assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]); assertTrue(map.remove(dataNodes[3])); - assertTrue(map.getDatanodeByHost("h1")==null); - assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]); - assertTrue(map.getDatanodeByHost("h3")==null); + assertTrue(map.getDatanodeByHost("ip1")==null); + assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]); + assertTrue(map.getDatanodeByHost("ip3")==null); assertFalse(map.remove(NULL_NODE)); assertTrue(map.remove(dataNodes[1])); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java index d47f110344..2c92b66bb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java @@ -78,11 +78,11 @@ public void testNodeCount() throws Exception { // bring down first datanode DatanodeDescriptor datanode = datanodes[0]; - DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName()); + DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr()); // make sure that NN detects that the datanode is down BlockManagerTestUtil.noticeDeadDatanode( - cluster.getNameNode(), datanode.getName()); + cluster.getNameNode(), datanode.getXferAddr()); // the block will be replicated DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR); @@ -112,10 +112,10 @@ public void testNodeCount() throws Exception { assertTrue(nonExcessDN!=null); // bring down non excessive datanode - dnprop = cluster.stopDataNode(nonExcessDN.getName()); + dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr()); // make sure that NN detects that the datanode is down BlockManagerTestUtil.noticeDeadDatanode( - cluster.getNameNode(), nonExcessDN.getName()); + cluster.getNameNode(), nonExcessDN.getXferAddr()); // The block should be replicated initializeTimeout(TIMEOUT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index f7a5c0e065..cd4dfb94a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -91,9 +91,9 @@ public void testProcesOverReplicateBlock() throws IOException { synchronized(hm) { // set live datanode's remaining space to be 0 // so they will be chosen to be deleted when over-replication occurs - String corruptMachineName = corruptDataNode.getName(); + String corruptMachineName = corruptDataNode.getXferAddr(); for (DatanodeDescriptor datanode : hm.getDatanodes()) { - if (!corruptMachineName.equals(datanode.getName())) { + if (!corruptMachineName.equals(datanode.getXferAddr())) { datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java index 16977bb820..f73245860a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java @@ -40,7 +40,7 @@ public class TestPendingDataNodeMessages { private final Block block2Gs1 = new Block(2, 0, 1); private final DatanodeDescriptor fakeDN = new DatanodeDescriptor( - new DatanodeID("fake")); + new DatanodeID("fake", 100)); @Test public void testQueues() { @@ -56,8 +56,8 @@ public void testQueues() { Queue q = msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals( - "ReportedBlockInfo [block=blk_1_1, dn=fake, reportedState=FINALIZED]," + - "ReportedBlockInfo [block=blk_1_2, dn=fake, reportedState=FINALIZED]", + "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," + + "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]", Joiner.on(",").join(q)); assertEquals(0, msgs.count()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index f5926281ee..49925ab885 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -52,16 +52,16 @@ public class TestReplicationPolicy { private static final String filename = "/dummyfile.txt"; private static final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3") + new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"), + new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3") }; private final static DatanodeDescriptor NODE = - new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4"); + new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4"); static { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java deleted file mode 100644 index 25dce520a6..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java +++ /dev/null @@ -1,267 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ -package org.apache.hadoop.hdfs.server.common; - -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; -import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode; -import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; -import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.test.GenericTestUtils; - -import org.junit.Test; -import static org.junit.Assert.*; - -/** - */ -public class TestDistributedUpgrade { - private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class); - private Configuration conf; - private int testCounter = 0; - private MiniDFSCluster cluster = null; - private String clusterId = "testClsterId"; - - /** - * Writes an INFO log message containing the parameters. - */ - void log(String label, int numDirs) { - LOG.info("============================================================"); - LOG.info("***TEST " + (testCounter++) + "*** " - + label + ":" - + " numDirs="+numDirs); - } - - /** - * Attempts to start a NameNode with the given operation. Starting - * the NameNode should throw an exception. - */ - void startNameNodeShouldFail(StartupOption operation, - String exceptionSubstring) { - try { - //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail - // we set manage dirs to true as NN has to start from untar'ed image with - // nn dirs set to name1 and name2 - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .format(false) - .clusterId(clusterId) - .startupOption(operation) - .build(); // should fail - throw new AssertionError("NameNode should have failed to start"); - } catch (Exception expected) { - GenericTestUtils.assertExceptionContains( - exceptionSubstring, expected); - } - } - - /** - * Attempts to start a DataNode with the given operation. Starting - * the DataNode should throw an exception. - */ - void startDataNodeShouldFail(StartupOption operation) { - try { - cluster.startDataNodes(conf, 1, false, operation, null); // should fail - throw new AssertionError("DataNode should have failed to start"); - } catch (Exception expected) { - // expected - assertFalse(cluster.isDataNodeUp()); - } - } - - /** - */ - @Test(timeout=300000) // 5 min timeout - public void testDistributedUpgrade() throws Exception { - int numDirs = 1; - TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage(); - testImg.unpackStorage(); - int numDNs = testImg.numDataNodes; - - // register new upgrade objects (ignore all existing) - UpgradeObjectCollection.initialize(); - UpgradeObjectCollection.registerUpgrade(new UO_Datanode1()); - UpgradeObjectCollection.registerUpgrade(new UO_Namenode1()); - UpgradeObjectCollection.registerUpgrade(new UO_Datanode2()); - UpgradeObjectCollection.registerUpgrade(new UO_Namenode2()); - UpgradeObjectCollection.registerUpgrade(new UO_Datanode3()); - UpgradeObjectCollection.registerUpgrade(new UO_Namenode3()); - - conf = new HdfsConfiguration(); - if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant - System.setProperty("test.build.data", "build/test/data"); - } - conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off - - log("NameNode start in regular mode when dustributed upgrade is required", numDirs); - startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version"); - - log("Start NameNode only distributed upgrade", numDirs); - // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false) - // .startupOption(StartupOption.UPGRADE).build(); - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .format(false) - .clusterId(clusterId) - .startupOption(StartupOption.UPGRADE) - .build(); - cluster.shutdown(); - - log("NameNode start in regular mode when dustributed upgrade has been started", numDirs); - startNameNodeShouldFail(StartupOption.REGULAR, - "Previous distributed upgrade was not completed"); - - log("NameNode rollback to the old version that require a dustributed upgrade", numDirs); - startNameNodeShouldFail(StartupOption.ROLLBACK, - "Cannot rollback to storage version -7 using this version"); - - log("Normal distributed upgrade for the cluster", numDirs); - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDNs) - .format(false) - .clusterId(clusterId) - .startupOption(StartupOption.UPGRADE) - .build(); - DFSAdmin dfsAdmin = new DFSAdmin(); - dfsAdmin.setConf(conf); - dfsAdmin.run(new String[] {"-safemode", "wait"}); - cluster.shutdown(); - - // it should be ok to start in regular mode - log("NameCluster regular startup after the upgrade", numDirs); - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(numDNs) - .clusterId(clusterId) - .format(false) - .startupOption(StartupOption.REGULAR) - .build(); - - cluster.waitActive(); - cluster.shutdown(); - } - - public static void main(String[] args) throws Exception { - new TestDistributedUpgrade().testDistributedUpgrade(); - LOG.info("=== DONE ==="); - } -} - -/** - * Upgrade object for data-node - */ -class UO_Datanode extends UpgradeObjectDatanode { - int version; - - UO_Datanode(int v) { - this.status = (short)0; - version = v; - } - - public int getVersion() { - return version; - } - - public void doUpgrade() throws IOException { - this.status = (short)100; - DatanodeProtocol nn = getNamenode(); - nn.processUpgradeCommand( - new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS, - getVersion(), getUpgradeStatus())); - } - - public UpgradeCommand startUpgrade() throws IOException { - return null; - } -} - -/** - * Upgrade object for name-node - */ -class UO_Namenode extends UpgradeObjectNamenode { - int version; - - UO_Namenode(int v) { - status = (short)0; - version = v; - } - - public int getVersion() { - return version; - } - - synchronized public UpgradeCommand processUpgradeCommand( - UpgradeCommand command) throws IOException { - switch(command.getAction()) { - case UpgradeCommand.UC_ACTION_REPORT_STATUS: - this.status += command.getCurrentStatus()/8; // 4 reports needed - break; - default: - this.status++; - } - return null; - } - - public UpgradeCommand completeUpgrade() throws IOException { - return null; - } -} - -class UO_Datanode1 extends UO_Datanode { - UO_Datanode1() { - super(LAYOUT_VERSION+1); - } -} - -class UO_Namenode1 extends UO_Namenode { - UO_Namenode1() { - super(LAYOUT_VERSION+1); - } -} - -class UO_Datanode2 extends UO_Datanode { - UO_Datanode2() { - super(LAYOUT_VERSION+2); - } -} - -class UO_Namenode2 extends UO_Namenode { - UO_Namenode2() { - super(LAYOUT_VERSION+2); - } -} - -class UO_Datanode3 extends UO_Datanode { - UO_Datanode3() { - super(LAYOUT_VERSION+3); - } -} - -class UO_Namenode3 extends UO_Namenode { - UO_Namenode3() { - super(LAYOUT_VERSION+3); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java index 726c5d3ce3..f05bf653c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java @@ -36,12 +36,7 @@ * Utility class for accessing package-private DataNode information during tests. * */ -public class DataNodeTestUtils { - public static DatanodeRegistration - getDNRegistrationByMachineName(DataNode dn, String mName) { - return dn.getDNRegistrationByMachineName(mName); - } - +public class DataNodeTestUtils { public static DatanodeRegistration getDNRegistrationForBP(DataNode dn, String bpid) throws IOException { return dn.getDNRegistrationForBP(bpid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 8d9ee07ea0..e69b1c3021 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -383,7 +383,7 @@ private SimulatedBPStorage getBPStorage(String bpid) throws IOException { public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration conf) { if (storage != null) { - storage.createStorageID(datanode.getPort()); + storage.createStorageID(datanode.getXferPort()); this.storageId = storage.getStorageID(); } else { this.storageId = "unknownStorageId" + new Random().nextInt(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index 7531f8e3d4..d575d44efd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -197,9 +197,9 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1, locs, RECOVERY_ID); ArrayList syncList = new ArrayList(2); BlockRecord record1 = new BlockRecord( - new DatanodeID("xx", "yy", 44, 55), dn1, replica1); + new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1); BlockRecord record2 = new BlockRecord( - new DatanodeID("aa", "bb", 11, 22), dn2, replica2); + new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2); syncList.add(record1); syncList.add(record2); @@ -402,7 +402,7 @@ public void testRWRReplicas() throws IOException { private Collection initRecoveringBlocks() throws IOException { Collection blocks = new ArrayList(1); DatanodeInfo mockOtherDN = new DatanodeInfo( - new DatanodeID("127.0.0.1", "storage-1234", 0, 0)); + new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0)); DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())), mockOtherDN }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index 07c2425835..78d20ad655 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -162,16 +162,16 @@ public void testBlockReplacement() throws IOException, TimeoutException { // start to replace the block // case 1: proxySource does not contain the block - LOG.info("Testcase 1: Proxy " + newNode.getName() + LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block " + b); assertFalse(replaceBlock(b, source, newNode, proxies.get(0))); // case 2: destination already contains the block - LOG.info("Testcase 2: Destination " + proxies.get(1).getName() + LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block " + b); assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1))); // case 3: correct case - LOG.info("Testcase 3: Source=" + source.getName() + " Proxy=" + - proxies.get(0).getName() + " Destination=" + newNode.getName() ); + LOG.info("Testcase 3: Source=" + source + " Proxy=" + + proxies.get(0) + " Destination=" + newNode ); assertTrue(replaceBlock(b, source, proxies.get(0), newNode)); // after cluster has time to resolve the over-replication, // block locations should contain two proxies and newNode @@ -181,7 +181,7 @@ public void testBlockReplacement() throws IOException, TimeoutException { DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client); // case 4: proxies.get(0) is not a valid del hint // expect either source or newNode replica to be deleted instead - LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() ); + LOG.info("Testcase 4: invalid del hint " + proxies.get(0) ); assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source)); // after cluster has time to resolve the over-replication, // block locations should contain two proxies, @@ -222,7 +222,7 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, for (DatanodeInfo node : includeNodes) { if (!nodeLocations.contains(node) ) { notDone=true; - LOG.info("Block is not located at " + node.getName() ); + LOG.info("Block is not located at " + node ); break; } } @@ -231,9 +231,9 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, String expectedNodesList = ""; String currentNodesList = ""; for (DatanodeInfo dn : includeNodes) - expectedNodesList += dn.getName() + ", "; + expectedNodesList += dn + ", "; for (DatanodeInfo dn : nodes) - currentNodesList += dn.getName() + ", "; + currentNodesList += dn + ", "; LOG.info("Expected replica nodes are: " + expectedNodesList); LOG.info("Current actual replica nodes are: " + currentNodesList); throw new TimeoutException( @@ -254,7 +254,7 @@ private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source, DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException { Socket sock = new Socket(); sock.connect(NetUtils.createSocketAddr( - destination.getName()), HdfsServerConstants.READ_TIMEOUT); + destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); sock.setKeepAlive(true); // sendRequest DataOutputStream out = new DataOutputStream(sock.getOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index 985900030e..f2cb248ae1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -679,8 +679,9 @@ private void startDNandWait(Path filePath, boolean waitReplicas) assertEquals(datanodes.size(), 2); if(LOG.isDebugEnabled()) { + int lastDn = datanodes.size() - 1; LOG.debug("New datanode " - + cluster.getDataNodes().get(datanodes.size() - 1).getMachineName() + + cluster.getDataNodes().get(lastDn).getDisplayName() + " has been started"); } if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 20a16c3166..351a61cc45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -161,8 +161,8 @@ public void testFedSingleNN() throws IOException { assertEquals("number of volumes is wrong", 2, volInfos.size()); for (BPOfferService bpos : dn.getAllBpOs()) { - LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration.name + "; sid=" - + bpos.bpRegistration.storageID + "; nna=" + + LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid=" + + bpos.bpRegistration.getStorageID() + "; nna=" + getNNSocketAddress(bpos)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index e66dc56900..04aa71d8b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -270,7 +270,7 @@ private void accessBlock(DatanodeInfo datanode, LocatedBlock lblock) Socket s = null; ExtendedBlock block = lblock.getBlock(); - targetAddr = NetUtils.createSocketAddr(datanode.getName()); + targetAddr = NetUtils.createSocketAddr(datanode.getXferAddr()); s = NetUtils.getDefaultSocketFactory(conf).createSocket(); s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java index 2ff075c8ad..080f47ca9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java @@ -183,7 +183,7 @@ public void testDfsAdminDeleteBlockPool() throws Exception { Assert.assertEquals(1, dn1.getAllBpOs().length); DFSAdmin admin = new DFSAdmin(nn1Conf); - String dn1Address = dn1.getSelfAddr().getHostName()+":"+dn1.getIpcPort(); + String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args = { "-deleteBlockPool", dn1Address, bpid2 }; int ret = admin.run(args); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index 74434e5a44..7a83bf3408 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -136,7 +136,7 @@ public void testReplicationError() throws Exception { DataNode datanode = cluster.getDataNodes().get(sndNode); // replicate the block to the second datanode - InetSocketAddress target = datanode.getSelfAddr(); + InetSocketAddress target = datanode.getXferAddress(); Socket s = new Socket(target.getAddress(), target.getPort()); // write the header. DataOutputStream out = new DataOutputStream(s.getOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java index cecf2eddbb..9f96eac70f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java @@ -348,7 +348,7 @@ public void testInterDNProtocolTimeout() throws Throwable { final InetSocketAddress addr = NetUtils.getConnectAddress(server); DatanodeID fakeDnId = new DatanodeID( - "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort()); + "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort()); DatanodeInfo dInfo = new DatanodeInfo(fakeDnId); InterDatanodeProtocol proxy = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 61d7516863..73884c5b1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -766,28 +766,33 @@ private static class TinyDatanode implements Comparable { long[] blockReportList; /** - * Get data-node in the form - * : - * where port is a 6 digit integer. + * Return a a 6 digit integer port. * This is necessary in order to provide lexocographic ordering. * Host names are all the same, the ordering goes by port numbers. */ - private static String getNodeName(int port) throws IOException { - String machineName = DNS.getDefaultHost("default", "default"); - String sPort = String.valueOf(100000 + port); - if(sPort.length() > 6) - throw new IOException("Too many data-nodes."); - return machineName + ":" + sPort; + private static int getNodePort(int num) throws IOException { + int port = 100000 + num; + if (String.valueOf(port).length() > 6) { + throw new IOException("Too many data-nodes"); + } + return port; } TinyDatanode(int dnIdx, int blockCapacity) throws IOException { - dnRegistration = new DatanodeRegistration(getNodeName(dnIdx)); + String hostName = DNS.getDefaultHost("default", "default"); + dnRegistration = new DatanodeRegistration(hostName); + dnRegistration.setXferPort(getNodePort(dnIdx)); + dnRegistration.setHostName(hostName); this.blocks = new ArrayList(blockCapacity); this.nrBlocks = 0; } - String getName() { - return dnRegistration.getName(); + public String toString() { + return dnRegistration.toString(); + } + + String getXferAddr() { + return dnRegistration.getXferAddr(); } void register() throws IOException { @@ -850,8 +855,8 @@ long[] getBlockReportList() { return blockReportList; } - public int compareTo(String name) { - return getName().compareTo(name); + public int compareTo(String xferAddr) { + return getXferAddr().compareTo(xferAddr); } /** @@ -889,10 +894,12 @@ private int transferBlocks( Block blocks[], for(int t = 0; t < blockTargets.length; t++) { DatanodeInfo dnInfo = blockTargets[t]; DatanodeRegistration receivedDNReg; - receivedDNReg = new DatanodeRegistration(dnInfo.getName()); + receivedDNReg = new DatanodeRegistration(dnInfo.getIpAddr()); receivedDNReg.setStorageInfo( new DataStorage(nsInfo, dnInfo.getStorageID())); + receivedDNReg.setXferPort(dnInfo.getXferPort()); receivedDNReg.setInfoPort(dnInfo.getInfoPort()); + receivedDNReg.setIpcPort(dnInfo.getIpcPort()); ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo( blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, @@ -977,10 +984,10 @@ void generateInputs(int[] ignore) throws IOException { for(int idx=0; idx < nrDatanodes; idx++) { datanodes[idx] = new TinyDatanode(idx, blocksPerReport); datanodes[idx].register(); - assert datanodes[idx].getName().compareTo(prevDNName) > 0 + assert datanodes[idx].getXferAddr().compareTo(prevDNName) > 0 : "Data-nodes must be sorted lexicographically."; datanodes[idx].sendHeartbeat(); - prevDNName = datanodes[idx].getName(); + prevDNName = datanodes[idx].getXferAddr(); } // create files @@ -1010,7 +1017,7 @@ private ExtendedBlock addBlocks(String fileName, String clientName) LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null); prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { - int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); + int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr()); datanodes[dnIdx].addBlock(loc.getBlock().getLocalBlock()); ReceivedDeletedBlockInfo[] rdBlocks = { new ReceivedDeletedBlockInfo( loc.getBlock().getLocalBlock(), @@ -1165,9 +1172,9 @@ private void decommissionNodes() throws IOException { for(int i=0; i < nodesToDecommission; i++) { TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i]; numDecommissionedBlocks += dn.nrBlocks; - excludeFile.write(dn.getName().getBytes()); + excludeFile.write(dn.getXferAddr().getBytes()); excludeFile.write('\n'); - LOG.info("Datanode " + dn.getName() + " is decommissioned."); + LOG.info("Datanode " + dn + " is decommissioned."); } excludeFile.close(); nameNodeProto.refreshNodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 9934a6f534..463cd952fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -156,7 +156,7 @@ private String decommissionNode(FSNamesystem namesystem, throws IOException { DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE); - String nodename = info[nodeIndex].getName(); + String nodename = info[nodeIndex].getXferAddr(); System.out.println("Decommissioning node: " + nodename); // write nodename into the exclude file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 6647042145..97a81d3a77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -93,6 +93,15 @@ public void testNameNodeMXBeanInfo() throws Exception { // get attribute alivenodeinfo String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes")); + Map> liveNodes = + (Map>) JSON.parse(alivenodeinfo); + assertTrue(liveNodes.size() > 0); + for (Map liveNode : liveNodes.values()) { + assertTrue(liveNode.containsKey("nonDfsUsedSpace")); + assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0); + assertTrue(liveNode.containsKey("capacity")); + assertTrue(((Long)liveNode.get("capacity")) > 0); + } Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo); // get attribute deadnodeinfo String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java index ddfd573b4c..d7f2ff9789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyIsHot.java @@ -167,7 +167,7 @@ public void testDatanodeRestarts() throws Exception { // Stop the DN. DataNode dn = cluster.getDataNodes().get(0); - String dnName = dn.getDatanodeId().getName(); + String dnName = dn.getDatanodeId().getXferAddr(); DataNodeProperties dnProps = cluster.stopDataNode(0); // Make sure both NNs register it as dead. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index 9ebc13e5ed..36b2220641 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -65,12 +65,11 @@ public class TestOfflineEditsViewer { * * These are the opcodes that are not used anymore, some * are marked deprecated, we need to include them here to make - * sure we exclude them when checking for completness of testing, + * sure we exclude them when checking for completeness of testing, * that's why the "deprecation" warnings are suppressed. */ @SuppressWarnings("deprecation") private static void initializeObsoleteOpCodes() { - // these are obsolete obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_ADD, true); obsoleteOpCodes.put(FSEditLogOpCodes.OP_DATANODE_REMOVE, true); obsoleteOpCodes.put(FSEditLogOpCodes.OP_SET_NS_QUOTA, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java index 5a4643f410..36c38ef4da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java @@ -30,16 +30,16 @@ public class TestNetworkTopology extends TestCase { private final static NetworkTopology cluster = new NetworkTopology(); private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { - new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"), - new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d1/r2"), - new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3"), - new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r3") + new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"), + new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"), + new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"), + new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3") }; private final static DatanodeDescriptor NODE = - new DatanodeDescriptor(new DatanodeID("h8:5020"), "/d2/r4"); + new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4"); static { for(int i=0; i map = new TreeMap(); + if (dfs != null) { + map.put("namenode_port", dfs.getNameNodePort()); + } + + FileWriter fw = new FileWriter(new File(writeDetails)); + fw.write(new JSON().toJSON(map)); + fw.close(); + } + } + + /** + * Parses arguments and fills out the member variables. + * @param args Command-line arguments. + * @return true on successful parse; false to indicate that the + * program should exit. + */ + private boolean parseArguments(String[] args) { + Options options = makeOptions(); + CommandLine cli; + try { + CommandLineParser parser = new GnuParser(); + cli = parser.parse(options, args); + } catch(ParseException e) { + LOG.warn("options parsing failed: "+e.getMessage()); + new HelpFormatter().printHelp("...", options); + return false; + } + + if (cli.hasOption("help")) { + new HelpFormatter().printHelp("...", options); + return false; + } + + if (cli.getArgs().length > 0) { + for (String arg : cli.getArgs()) { + LOG.error("Unrecognized option: " + arg); + new HelpFormatter().printHelp("...", options); + return false; + } + } + + // HDFS + numDataNodes = intArgument(cli, "datanodes", 1); + nameNodePort = intArgument(cli, "nnport", 0); + dfsOpts = cli.hasOption("format") ? + StartupOption.FORMAT : StartupOption.REGULAR; + + // Runner + writeDetails = cli.getOptionValue("writeDetails"); + writeConfig = cli.getOptionValue("writeConfig"); + + // General + conf = new HdfsConfiguration(); + updateConfiguration(conf, cli.getOptionValues("D")); + + return true; + } + + /** + * Updates configuration based on what's given on the command line. + * + * @param conf2 The configuration object + * @param keyvalues An array of interleaved key value pairs. + */ + private void updateConfiguration(Configuration conf2, String[] keyvalues) { + int num_confs_updated = 0; + if (keyvalues != null) { + for (String prop : keyvalues) { + String[] keyval = prop.split("=", 2); + if (keyval.length == 2) { + conf2.set(keyval[0], keyval[1]); + num_confs_updated++; + } else { + LOG.warn("Ignoring -D option " + prop); + } + } + } + LOG.info("Updated " + num_confs_updated + + " configuration settings from command line."); + } + + /** + * Extracts an integer argument with specified default value. + */ + private int intArgument(CommandLine cli, String argName, int defaultValue) { + String o = cli.getOptionValue(argName); + try { + if (o != null) { + return Integer.parseInt(o); + } + } catch (NumberFormatException ex) { + LOG.error("Couldn't parse value (" + o + ") for option " + + argName + ". Using default: " + defaultValue); + } + + return defaultValue; + } + + /** + * Starts a MiniDFSClusterManager with parameters drawn from the command line. + */ + public static void main(String[] args) throws IOException { + new MiniDFSClusterManager().run(args); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz deleted file mode 100644 index 4d571a5951..0000000000 Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-14-dfs-dir.tgz and /dev/null differ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt index 3d1b67dae3..0fbc3189aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-dfs-dir.txt @@ -19,18 +19,6 @@ # See HADOOP-1629 for more info if needed. # These two files are used by unit test TestDFSUpgradeFromImage.java # -# hadoop-14-dfs-dir.tgz : -# --------------------- -# This file contains the HDFS directory structure for one namenode and 4 datanodes. -# The structure is setup similar to the structure used in MiniDFSCluster. -# The directory was created with Hadoo-0.14.x. -# -# In the test, this directory is unpacked and MiniDFSCluster is run with -# "-upgrade" option. The test waits for the upgrade to complete -# (leave safe mode) and then all the files are read. The test checks that the -# directory structure and file checksums exactly match the information -# in this file. -# # hadoop-dfs-dir.txt : # --------------------- # Along with this description this file contains the expected files and @@ -43,9 +31,6 @@ # For e.g. "top-dir-1Mb-512" contains files created with dfs.blocksize of 1Mb # and io.bytes.per.checksum of 512. # -# In the future, when Hadoop project no longer supports upgrade from -# Hadoop-0.12, then a new DFS directory image must be created. -# # To generate checksum info for new files : # --------------------------------------- # Uncomment the last coment (starts with "printChecksums") and run the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 7f9432a5f8..8f769cec38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -15420,7 +15420,7 @@ SubstringComparator - setSpaceQuota: java.io.FileNotFoundException: Directory does not exist: /test1 + setSpaceQuota: Directory does not exist: /test1 @@ -15486,7 +15486,7 @@ SubstringComparator - clrQuota: java.io.FileNotFoundException: Directory does not exist: /test1 + clrQuota: Directory does not exist: /test1 @@ -15506,7 +15506,7 @@ RegexpComparator - put: org.apache.hadoop.hdfs.protocol.DSQuotaExceededException: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]* + put: The DiskSpace quota of /dir1 is exceeded: quota=1.0k diskspace consumed=[0-9.]+[kmg]* @@ -15526,7 +15526,7 @@ SubstringComparator - mkdir: org.apache.hadoop.hdfs.protocol.NSQuotaExceededException: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2 + mkdir: The NameSpace quota (directories and files) of directory /dir1 is exceeded: quota=1 file count=2 @@ -15739,6 +15739,10 @@ RegexpComparator Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\) + + RegexpComparator + Hostname: [-.a-zA-z0-9\.]+ + RegexpComparator Decommission Status : [a-zA-Z]+ @@ -15792,7 +15796,7 @@ TokenComparator - saveNamespace: java.io.IOException: Safe mode should be turned ON in order to create namespace image. + saveNamespace: Safe mode should be turned ON in order to create namespace image. @@ -15836,6 +15840,10 @@ RegexpComparator Name: [0-9\.:]+ \([-.a-zA-z0-9\.]+\) + + RegexpComparator + Hostname: [-.a-zA-z0-9\.]+ + RegexpComparator Decommission Status : [a-zA-Z]+ @@ -16168,7 +16176,7 @@ SubstringComparator - setQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + setQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16187,7 +16195,7 @@ SubstringComparator - clrQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + clrQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16207,7 +16215,7 @@ SubstringComparator - setSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + setSpaceQuota: Cannot set quota on /test. Name node is in safe mode. @@ -16226,7 +16234,7 @@ SubstringComparator - clrSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode. + clrSpaceQuota: Cannot set quota on /test. Name node is in safe mode. diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d0c538e888..a28d38f2bb 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -52,6 +52,9 @@ Trunk (unreleased changes) BUG FIXES + MAPREDUCE-4087. [Gridmix] GenerateDistCacheData job of Gridmix can + become slow in some cases (ravigummadi). + MAPREDUCE-3953. [Gridmix] Gridmix throws NPE and does not simulate a job if the trace contains null taskStatus for a task. (ravigummadi) @@ -133,6 +136,9 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-3353. Add a channel between RM and AM to get information on nodes. (Bikas Saha via acmurthy) + MAPREDUCE-3955. Change MR to use ProtobufRpcEngine from hadoop-common + instead of ProtoOverHadoopRpcEngine. (Jitendra Nath Pandey via sseth) + OPTIMIZATIONS BUG FIXES @@ -176,6 +182,19 @@ Release 2.0.0 - UNRELEASED MAPREDUCE-4066. Use default value when fetching MR_AM_STAGING_DIR (xieguiming via harsh) + MAPREDUCE-3377. Added a unit test to ensure OutputCommitter.checkOutputSpecs + is called prior to copying job.xml. (Jane Chen via acmurthy) + + MAPREDUCE-4081. TestMROutputFormat.java does not compile (Jason Lowe via + bobby) + + MAPREDUCE-4082. hadoop-mapreduce-client-app's mrapp-generated-classpath + file should not be in the module JAR (tucu) + + MAPREDUCE-3916. various issues with running yarn proxyserver (devaraj via tucu) + + MAPREDUCE-4091. tools testcases failing because of MAPREDUCE-4082 (tucu) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES @@ -219,6 +238,9 @@ Release 0.23.2 - UNRELEASED MAPREDUCE-4043. Secret keys set in Credentials are not seen by tasks (Jason Lowe via bobby) + MAPREDUCE-3989. Cap space usage of default log4j rolling policy. + (Patrick Hunt via eli) + OPTIMIZATIONS MAPREDUCE-3901. Modified JobHistory records in YARN to lazily load job and diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh index 2272ae9564..4cd6eb1ec8 100644 --- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh +++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh @@ -90,7 +90,7 @@ fi # some variables export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log -export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA} +export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,RFA} export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA} log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml index 2059d28038..b63d181b6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml @@ -86,6 +86,11 @@ test-compile + + + mrapp-generated-classpath + + maven-dependency-plugin diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java index b94e4122a8..2b8efea9e5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java @@ -21,12 +21,12 @@ import java.lang.annotation.Annotation; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.hadoop.yarn.proto.MRClientProtocol; import org.apache.hadoop.yarn.security.client.ClientTokenSelector; public class MRClientSecurityInfo extends SecurityInfo { @@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { - if (!protocol.equals(MRClientProtocol.MRClientProtocolService.BlockingInterface.class)) { + if (!protocol.equals(MRClientProtocolPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java index 968d0423a7..dfb7469490 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java @@ -19,10 +19,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.yarn.proto.HSClientProtocol; /** * {@link PolicyProvider} for YARN MapReduce protocols. @@ -35,7 +35,7 @@ public class ClientHSPolicyProvider extends PolicyProvider { new Service[] { new Service( JHAdminConfig.MR_HS_SECURITY_SERVICE_AUTHORIZATION, - HSClientProtocol.HSClientProtocolService.BlockingInterface.class) + HSClientProtocolPB.class) }; @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java index 3f6ecb4386..24d0c2f7f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java @@ -21,9 +21,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.yarn.proto.MRClientProtocol; /** * {@link PolicyProvider} for YARN MapReduce protocols. @@ -39,7 +39,7 @@ public class MRAMPolicyProvider extends PolicyProvider { TaskUmbilicalProtocol.class), new Service( MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT, - MRClientProtocol.MRClientProtocolService.BlockingInterface.class) + MRClientProtocolPB.class) }; @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java new file mode 100644 index 0000000000..8c9a007b9b --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService; + +@ProtocolInfo(protocolName = "org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB", + protocolVersion = 1) +public interface HSClientProtocolPB extends HSClientProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java new file mode 100644 index 0000000000..835a161bec --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.v2.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB", + protocolVersion = 1) +public interface MRClientProtocolPB extends MRClientProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java index aa5d40e8e7..156930325b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java @@ -22,10 +22,10 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl implements HSClientProtocol { @@ -33,9 +33,9 @@ public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl public HSClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { super(); - RPC.setProtocolEngine(conf, HSClientProtocolService.BlockingInterface.class, - ProtoOverHadoopRpcEngine.class); - proxy = (HSClientProtocolService.BlockingInterface)RPC.getProxy( - HSClientProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, HSClientProtocolPB.class, + ProtobufRpcEngine.class); + proxy = (HSClientProtocolPB)RPC.getProxy( + HSClientProtocolPB.class, clientVersion, addr, conf); } } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java index 1fb57f972c..cf14532902 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java @@ -23,8 +23,10 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; @@ -86,21 +88,20 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import com.google.protobuf.ServiceException; public class MRClientProtocolPBClientImpl implements MRClientProtocol { - protected MRClientProtocolService.BlockingInterface proxy; + protected MRClientProtocolPB proxy; public MRClientProtocolPBClientImpl() {}; public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, MRClientProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (MRClientProtocolService.BlockingInterface)RPC.getProxy( - MRClientProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class); + proxy = (MRClientProtocolPB)RPC.getProxy( + MRClientProtocolPB.class, clientVersion, addr, conf); } @Override @@ -110,13 +111,7 @@ public GetJobReportResponse getJobReport(GetJobReportRequest request) try { return new GetJobReportResponsePBImpl(proxy.getJobReport(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -127,13 +122,7 @@ public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) try { return new GetTaskReportResponsePBImpl(proxy.getTaskReport(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -144,13 +133,7 @@ public GetTaskAttemptReportResponse getTaskAttemptReport( try { return new GetTaskAttemptReportResponsePBImpl(proxy.getTaskAttemptReport(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -161,13 +144,7 @@ public GetCountersResponse getCounters(GetCountersRequest request) try { return new GetCountersResponsePBImpl(proxy.getCounters(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -178,13 +155,7 @@ public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents( try { return new GetTaskAttemptCompletionEventsResponsePBImpl(proxy.getTaskAttemptCompletionEvents(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -195,13 +166,7 @@ public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) try { return new GetTaskReportsResponsePBImpl(proxy.getTaskReports(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -212,13 +177,7 @@ public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) try { return new GetDiagnosticsResponsePBImpl(proxy.getDiagnostics(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -231,13 +190,7 @@ public GetDelegationTokenResponse getDelegationToken( return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken( null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -248,13 +201,7 @@ public KillJobResponse killJob(KillJobRequest request) try { return new KillJobResponsePBImpl(proxy.killJob(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -265,13 +212,7 @@ public KillTaskResponse killTask(KillTaskRequest request) try { return new KillTaskResponsePBImpl(proxy.killTask(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -282,13 +223,7 @@ public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) try { return new KillTaskAttemptResponsePBImpl(proxy.killTaskAttempt(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -299,13 +234,7 @@ public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) try { return new FailTaskAttemptResponsePBImpl(proxy.failTaskAttempt(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java index cc9fd81278..4c4882a6f2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/HSClientProtocolPBServiceImpl.java @@ -19,10 +19,10 @@ package org.apache.hadoop.mapreduce.v2.api.impl.pb.service; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; -import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService.BlockingInterface; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; public class HSClientProtocolPBServiceImpl extends MRClientProtocolPBServiceImpl - implements BlockingInterface { + implements HSClientProtocolPB { public HSClientProtocolPBServiceImpl(HSClientProtocol impl) { super(impl); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java index 17725a7c40..90881215fa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapreduce.v2.api.impl.pb.service; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; @@ -91,12 +92,11 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService.BlockingInterface; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class MRClientProtocolPBServiceImpl implements BlockingInterface { +public class MRClientProtocolPBServiceImpl implements MRClientProtocolPB { private MRClientProtocol real; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java index 187bab06cb..43fc815232 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java @@ -21,20 +21,20 @@ import java.lang.annotation.Annotation; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.hadoop.yarn.proto.HSClientProtocol; public class ClientHSSecurityInfo extends SecurityInfo { @Override public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { if (!protocol - .equals(HSClientProtocol.HSClientProtocolService.BlockingInterface.class)) { + .equals(HSClientProtocolPB.class)) { return null; } return new KerberosInfo() { @@ -59,7 +59,7 @@ public String clientPrincipal() { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { if (!protocol - .equals(HSClientProtocol.HSClientProtocolService.BlockingInterface.class)) { + .equals(HSClientProtocolPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java index 574836c4e4..c0f3a0aa8a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/yarn/proto/HSClientProtocol.java @@ -18,14 +18,23 @@ package org.apache.hadoop.yarn.proto; +import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; +import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService; + /** * Fake protocol to differentiate the blocking interfaces in the * security info class loaders. */ public interface HSClientProtocol { public abstract class HSClientProtocolService { - public interface BlockingInterface extends - MRClientProtocol.MRClientProtocolService.BlockingInterface { + public interface BlockingInterface extends MRClientProtocolPB { + } + + public static com.google.protobuf.BlockingService newReflectiveBlockingService( + final HSClientProtocolService.BlockingInterface impl) { + // The cast is safe + return MRClientProtocolService + .newReflectiveBlockingService((MRClientProtocolService.BlockingInterface) impl); } } } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index b9a76b44c2..5c7b55270e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -36,6 +36,7 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.TypeConverter; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; @@ -96,7 +97,7 @@ public class HistoryClientService extends AbstractService { private static final Log LOG = LogFactory.getLog(HistoryClientService.class); - private MRClientProtocol protocolHandler; + private HSClientProtocol protocolHandler; private Server server; private WebApp webApp; private InetSocketAddress bindAddress; @@ -107,7 +108,7 @@ public HistoryClientService(HistoryContext history, JHSDelegationTokenSecretManager jhsDTSecretManager) { super("HistoryClientService"); this.history = history; - this.protocolHandler = new MRClientProtocolHandler(); + this.protocolHandler = new HSClientProtocolHandler(); this.jhsDTSecretManager = jhsDTSecretManager; } @@ -128,7 +129,7 @@ public void start() { } server = - rpc.getServer(MRClientProtocol.class, protocolHandler, address, + rpc.getServer(HSClientProtocol.class, protocolHandler, address, conf, jhsDTSecretManager, conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT, JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT)); @@ -177,7 +178,7 @@ public InetSocketAddress getBindAddress() { return this.bindAddress; } - private class MRClientProtocolHandler implements MRClientProtocol { + private class HSClientProtocolHandler implements HSClientProtocol { private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 769ae9e89b..b51166a11c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -34,6 +34,7 @@ import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse; @@ -340,9 +341,10 @@ public GetDelegationTokenResponse getDelegationToken( } } - class HistoryService extends AMService { + class HistoryService extends AMService implements HSClientProtocol { public HistoryService() { super(HSHOSTADDRESS); + this.protocol = HSClientProtocol.class; } @Override @@ -357,6 +359,7 @@ public GetCountersResponse getCounters(GetCountersRequest request) throws YarnRe class AMService extends AbstractService implements MRClientProtocol { + protected Class protocol; private InetSocketAddress bindAddress; private Server server; private final String hostAddress; @@ -367,6 +370,7 @@ public AMService() { public AMService(String hostAddress) { super("AMService"); + this.protocol = MRClientProtocol.class; this.hostAddress = hostAddress; } @@ -383,7 +387,7 @@ public void start(Configuration conf) { } server = - rpc.getServer(MRClientProtocol.class, this, address, + rpc.getServer(protocol, this, address, conf, null, 1); server.start(); this.bindAddress = diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java new file mode 100644 index 0000000000..88d118803d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMROutputFormat.java @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.JobConf; +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +public class TestMROutputFormat { + + @Test + public void testJobSubmission() throws Exception { + JobConf conf = new JobConf(); + Job job = new Job(conf); + job.setInputFormatClass(TestInputFormat.class); + job.setMapperClass(TestMapper.class); + job.setOutputFormatClass(TestOutputFormat.class); + job.setOutputKeyClass(IntWritable.class); + job.setOutputValueClass(IntWritable.class); + job.waitForCompletion(true); + assertTrue(job.isSuccessful()); + } + + public static class TestMapper + extends Mapper { + public void map(IntWritable key, IntWritable value, Context context) + throws IOException, InterruptedException { + context.write(key, value); + } + } +} + +class TestInputFormat extends InputFormat { + + @Override + public RecordReader createRecordReader( + InputSplit split, TaskAttemptContext context) throws IOException, + InterruptedException { + return new RecordReader() { + + private boolean done = false; + + @Override + public void close() throws IOException { + } + + @Override + public IntWritable getCurrentKey() throws IOException, + InterruptedException { + return new IntWritable(0); + } + + @Override + public IntWritable getCurrentValue() throws IOException, + InterruptedException { + return new IntWritable(0); + } + + @Override + public float getProgress() throws IOException, InterruptedException { + return done ? 0 : 1; + } + + @Override + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { + } + + @Override + public boolean nextKeyValue() throws IOException, InterruptedException { + if (!done) { + done = true; + return true; + } + return false; + } + }; + } + + @Override + public List getSplits(JobContext context) throws IOException, + InterruptedException { + List list = new ArrayList(); + list.add(new TestInputSplit()); + return list; + } +} + +class TestInputSplit extends InputSplit implements Writable { + + @Override + public long getLength() throws IOException, InterruptedException { + return 1; + } + + @Override + public String[] getLocations() throws IOException, InterruptedException { + String[] hosts = {"localhost"}; + return hosts; + } + + @Override + public void readFields(DataInput in) throws IOException { + } + + @Override + public void write(DataOutput out) throws IOException { + } +} + +class TestOutputFormat extends OutputFormat +implements Configurable { + + public static final String TEST_CONFIG_NAME = "mapred.test.jobsubmission"; + private Configuration conf; + + @Override + public void checkOutputSpecs(JobContext context) throws IOException, + InterruptedException { + conf.setBoolean(TEST_CONFIG_NAME, true); + } + + @Override + public OutputCommitter getOutputCommitter(TaskAttemptContext context) + throws IOException, InterruptedException { + return new OutputCommitter() { + + @Override + public void abortTask(TaskAttemptContext taskContext) throws IOException { + } + + @Override + public void commitTask(TaskAttemptContext taskContext) throws IOException { + } + + @Override + public boolean needsTaskCommit(TaskAttemptContext taskContext) + throws IOException { + return false; + } + + @Override + public void setupJob(JobContext jobContext) throws IOException { + } + + @Override + public void setupTask(TaskAttemptContext taskContext) throws IOException { + } + }; + } + + @Override + public RecordWriter getRecordWriter( + TaskAttemptContext context) throws IOException, InterruptedException { + assertTrue(context.getConfiguration().getBoolean(TEST_CONFIG_NAME, false)); + return new RecordWriter() { + + @Override + public void close(TaskAttemptContext context) throws IOException, + InterruptedException { + } + + @Override + public void write(IntWritable key, IntWritable value) throws IOException, + InterruptedException { + } + }; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } +} \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java index d65a198c20..0808eed922 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java @@ -35,7 +35,7 @@ import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.TypeConverter; -import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -142,7 +142,7 @@ public void testJobHistoryData() throws IOException, InterruptedException, LOG.info("CounterMR " + counterMR); Assert.assertEquals(counterHS, counterMR); - MRClientProtocol historyClient = instantiateHistoryProxy(); + HSClientProtocol historyClient = instantiateHistoryProxy(); GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class); gjReq.setJobId(jobId); JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport(); @@ -164,12 +164,12 @@ private void verifyJobReport(JobReport jobReport, JobId jobId) { && jobReport.getFinishTime() >= jobReport.getStartTime()); } - private MRClientProtocol instantiateHistoryProxy() { + private HSClientProtocol instantiateHistoryProxy() { final String serviceAddr = mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS); final YarnRPC rpc = YarnRPC.create(conf); - MRClientProtocol historyClient = - (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, + HSClientProtocol historyClient = + (HSClientProtocol) rpc.getProxy(HSClientProtocol.class, NetUtils.createSocketAddr(serviceAddr), mrCluster.getConfig()); return historyClient; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh index c36e99cac7..89ae9d87be 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh +++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-daemon.sh @@ -90,7 +90,7 @@ fi # some variables export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log -export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA} +export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,RFA} log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java new file mode 100644 index 0000000000..d8f0ab7797 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/AMRMProtocolPB.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService; + +@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.api.AMRMProtocolPB", + protocolVersion = 1) +public interface AMRMProtocolPB extends AMRMProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java new file mode 100644 index 0000000000..d5930873ef --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocolPB.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService; + +@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.api.ClientRMProtocolPB", + protocolVersion = 1) +public interface ClientRMProtocolPB extends ClientRMProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java new file mode 100644 index 0000000000..306eaf637e --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManagerPB.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.api.ContainerManagerPB", + protocolVersion = 1) +public interface ContainerManagerPB extends ContainerManagerService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java index 615b072f25..ae17ed0f8e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/impl/pb/YarnRemoteExceptionPBImpl.java @@ -18,12 +18,16 @@ package org.apache.hadoop.yarn.exceptions.impl.pb; +import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; +import java.lang.reflect.UndeclaredThrowableException; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnRemoteExceptionProtoOrBuilder; +import com.google.protobuf.ServiceException; public class YarnRemoteExceptionPBImpl extends YarnRemoteException { @@ -105,4 +109,30 @@ private void maybeInitBuilder() { } viaProto = false; } + + /** + * Utility method that unwraps and throws appropriate exception. + * @param se ServiceException + * @throws YarnRemoteException + * @throws UndeclaredThrowableException + */ + public static YarnRemoteException unwrapAndThrowException(ServiceException se) + throws UndeclaredThrowableException { + if (se.getCause() instanceof RemoteException) { + try { + throw ((RemoteException) se.getCause()) + .unwrapRemoteException(YarnRemoteExceptionPBImpl.class); + } catch (YarnRemoteException ex) { + return ex; + } catch (IOException e1) { + throw new UndeclaredThrowableException(e1); + } + } else if (se.getCause() instanceof YarnRemoteException) { + return (YarnRemoteException)se.getCause(); + } else if (se.getCause() instanceof UndeclaredThrowableException) { + throw (UndeclaredThrowableException)se.getCause(); + } else { + throw new UndeclaredThrowableException(se); + } + } } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java index 26ec40b87e..c43863c57b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/AMRMProtocolPBClientImpl.java @@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.api.impl.pb.client; import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.AMRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -38,8 +39,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto; @@ -48,12 +48,12 @@ public class AMRMProtocolPBClientImpl implements AMRMProtocol { - private AMRMProtocolService.BlockingInterface proxy; + private AMRMProtocolPB proxy; public AMRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, AMRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (AMRMProtocolService.BlockingInterface)RPC.getProxy( - AMRMProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, AMRMProtocolPB.class, ProtobufRpcEngine.class); + proxy = (AMRMProtocolPB)RPC.getProxy( + AMRMProtocolPB.class, clientVersion, addr, conf); } @@ -64,13 +64,7 @@ public AllocateResponse allocate(AllocateRequest request) try { return new AllocateResponsePBImpl(proxy.allocate(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -83,13 +77,7 @@ public FinishApplicationMasterResponse finishApplicationMaster( try { return new FinishApplicationMasterResponsePBImpl(proxy.finishApplicationMaster(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -100,13 +88,7 @@ public RegisterApplicationMasterResponse registerApplicationMaster( try { return new RegisterApplicationMasterResponsePBImpl(proxy.registerApplicationMaster(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java index 81333258bd..4167e29b9d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java @@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.api.impl.pb.client; import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ClientRMProtocol; +import org.apache.hadoop.yarn.api.ClientRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; @@ -66,8 +67,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; @@ -83,12 +83,12 @@ public class ClientRMProtocolPBClientImpl implements ClientRMProtocol { - private ClientRMProtocolService.BlockingInterface proxy; + private ClientRMProtocolPB proxy; public ClientRMProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ClientRMProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (ClientRMProtocolService.BlockingInterface)RPC.getProxy( - ClientRMProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, ClientRMProtocolPB.class, ProtobufRpcEngine.class); + proxy = (ClientRMProtocolPB)RPC.getProxy( + ClientRMProtocolPB.class, clientVersion, addr, conf); } @Override @@ -98,13 +98,7 @@ public KillApplicationResponse forceKillApplication( try { return new KillApplicationResponsePBImpl(proxy.forceKillApplication(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -115,13 +109,7 @@ public GetApplicationReportResponse getApplicationReport( try { return new GetApplicationReportResponsePBImpl(proxy.getApplicationReport(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -132,13 +120,7 @@ public GetClusterMetricsResponse getClusterMetrics( try { return new GetClusterMetricsResponsePBImpl(proxy.getClusterMetrics(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -149,13 +131,7 @@ public GetNewApplicationResponse getNewApplication( try { return new GetNewApplicationResponsePBImpl(proxy.getNewApplication(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -166,13 +142,7 @@ public SubmitApplicationResponse submitApplication( try { return new SubmitApplicationResponsePBImpl(proxy.submitApplication(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -185,13 +155,7 @@ public GetAllApplicationsResponse getAllApplications( return new GetAllApplicationsResponsePBImpl( proxy.getAllApplications(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -204,13 +168,7 @@ public GetClusterNodesResponse getClusterNodes( return new GetClusterNodesResponsePBImpl( proxy.getClusterNodes(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -223,13 +181,7 @@ public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) return new GetQueueInfoResponsePBImpl( proxy.getQueueInfo(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -242,13 +194,7 @@ public GetQueueUserAclsInfoResponse getQueueUserAcls( return new GetQueueUserAclsInfoResponsePBImpl( proxy.getQueueUserAcls(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -261,13 +207,7 @@ public GetDelegationTokenResponse getDelegationToken( return new GetDelegationTokenResponsePBImpl( proxy.getDelegationToken(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java index 34ad56073e..e97accedcd 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ContainerManagerPBClientImpl.java @@ -19,12 +19,13 @@ package org.apache.hadoop.yarn.api.impl.pb.client; import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ContainerManager; +import org.apache.hadoop.yarn.api.ContainerManagerPB; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -38,8 +39,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto; @@ -48,12 +48,12 @@ public class ContainerManagerPBClientImpl implements ContainerManager { - private ContainerManagerService.BlockingInterface proxy; + private ContainerManagerPB proxy; public ContainerManagerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ContainerManagerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (ContainerManagerService.BlockingInterface)RPC.getProxy( - ContainerManagerService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class); + proxy = (ContainerManagerPB)RPC.getProxy( + ContainerManagerPB.class, clientVersion, addr, conf); } public void close() { @@ -69,13 +69,7 @@ public GetContainerStatusResponse getContainerStatus( try { return new GetContainerStatusResponsePBImpl(proxy.getContainerStatus(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -86,31 +80,20 @@ public StartContainerResponse startContainer(StartContainerRequest request) try { return new StartContainerResponsePBImpl(proxy.startContainer(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @Override public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException { - StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl)request).getProto(); + StopContainerRequestProto requestProto = ((StopContainerRequestPBImpl) request) + .getProto(); try { - return new StopContainerResponsePBImpl(proxy.stopContainer(null, requestProto)); + return new StopContainerResponsePBImpl(proxy.stopContainer(null, + requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } - } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java index bc8f695cdb..4211690ffc 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/AMRMProtocolPBServiceImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service; import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.AMRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; @@ -29,7 +30,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.AMRMProtocol.AMRMProtocolService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; @@ -40,7 +40,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class AMRMProtocolPBServiceImpl implements BlockingInterface { +public class AMRMProtocolPBServiceImpl implements AMRMProtocolPB { private AMRMProtocol real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java index 7bece03657..2f0e89c5c2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service; import org.apache.hadoop.yarn.api.ClientRMProtocol; +import org.apache.hadoop.yarn.api.ClientRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; @@ -50,7 +51,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.ClientRMProtocol.ClientRMProtocolService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; @@ -75,7 +75,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class ClientRMProtocolPBServiceImpl implements BlockingInterface { +public class ClientRMProtocolPBServiceImpl implements ClientRMProtocolPB { private ClientRMProtocol real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java index 5f3cf17383..4b0af8156d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ContainerManagerPBServiceImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service; import org.apache.hadoop.yarn.api.ContainerManager; +import org.apache.hadoop.yarn.api.ContainerManagerPB; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; @@ -29,7 +30,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainerResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.ContainerManager.ContainerManagerService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; @@ -40,7 +40,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class ContainerManagerPBServiceImpl implements BlockingInterface { +public class ContainerManagerPBServiceImpl implements ContainerManagerPB { private ContainerManager real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java index f1f2892162..0ea9d1c65e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java @@ -26,19 +26,23 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.factories.RpcServerFactory; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; import com.google.protobuf.BlockingService; public class RpcServerFactoryPBImpl implements RpcServerFactory { + private static final Log LOG = LogFactory.getLog(RpcServerFactoryPBImpl.class); private static final String PROTO_GEN_PACKAGE_NAME = "org.apache.hadoop.yarn.proto"; private static final String PROTO_GEN_CLASS_SUFFIX = "Service"; private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb.service"; @@ -96,6 +100,7 @@ public Server getServer(Class protocol, Object instance, throw new YarnException(e); } + Class pbProtocol = service.getClass().getInterfaces()[0]; Method method = protoCache.get(protocol); if (method == null) { Class protoClazz = null; @@ -106,7 +111,8 @@ public Server getServer(Class protocol, Object instance, + getProtoClassName(protocol) + "]", e); } try { - method = protoClazz.getMethod("newReflectiveBlockingService", service.getClass().getInterfaces()[0]); + method = protoClazz.getMethod("newReflectiveBlockingService", + pbProtocol.getInterfaces()[0]); method.setAccessible(true); protoCache.putIfAbsent(protocol, method); } catch (NoSuchMethodException e) { @@ -115,7 +121,7 @@ public Server getServer(Class protocol, Object instance, } try { - return createServer(addr, conf, secretManager, numHandlers, + return createServer(pbProtocol, addr, conf, secretManager, numHandlers, (BlockingService)method.invoke(null, service)); } catch (InvocationTargetException e) { throw new YarnException(e); @@ -148,13 +154,15 @@ private String getPackageName(Class clazz) { return clazz.getPackage().getName(); } - private Server createServer(InetSocketAddress addr, Configuration conf, + private Server createServer(Class pbProtocol, InetSocketAddress addr, Configuration conf, SecretManager secretManager, int numHandlers, BlockingService blockingService) throws IOException { - RPC.setProtocolEngine(conf, BlockingService.class, ProtoOverHadoopRpcEngine.class); - Server server = RPC.getServer(BlockingService.class, blockingService, + RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class); + RPC.Server server = RPC.getServer(pbProtocol, blockingService, addr.getHostName(), addr.getPort(), numHandlers, false, conf, secretManager); + LOG.info("Adding protocol "+pbProtocol.getCanonicalName()+" to the server"); + server.addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService); return server; } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java deleted file mode 100644 index ca65a27beb..0000000000 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/ProtoOverHadoopRpcEngine.java +++ /dev/null @@ -1,404 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.ipc; - -import java.io.Closeable; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetSocketAddress; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.net.SocketFactory; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.DataOutputOutputStream; -import org.apache.hadoop.io.ObjectWritable; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtocolMetaInfoPB; -import org.apache.hadoop.ipc.ProtocolProxy; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RpcEngine; -import org.apache.hadoop.ipc.ClientCache; -import org.apache.hadoop.ipc.Client.ConnectionId; -import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.ProtoUtil; -import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; -import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcRequest; -import org.apache.hadoop.yarn.ipc.RpcProtos.ProtoSpecificRpcResponse; - -import com.google.protobuf.BlockingService; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - - - -@InterfaceStability.Evolving -public class ProtoOverHadoopRpcEngine implements RpcEngine { - private static final Log LOG = LogFactory.getLog(RPC.class); - - private static final ClientCache CLIENTS=new ClientCache(); - - @Override - @SuppressWarnings("unchecked") - public ProtocolProxy getProxy(Class protocol, long clientVersion, - InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, - SocketFactory factory, int rpcTimeout) throws IOException { - return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance(protocol - .getClassLoader(), new Class[] { protocol }, new Invoker(protocol, - addr, ticket, conf, factory, rpcTimeout)), false); - } - - @Override - public ProtocolProxy getProtocolMetaInfoProxy( - ConnectionId connId, Configuration conf, SocketFactory factory) - throws IOException { - Class protocol = ProtocolMetaInfoPB.class; - return new ProtocolProxy(protocol, - (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(), - new Class[] { protocol }, new Invoker(protocol, connId, conf, - factory)), false); - } - - private static class Invoker implements InvocationHandler, Closeable { - private Map returnTypes = new ConcurrentHashMap(); - private boolean isClosed = false; - private Client.ConnectionId remoteId; - private Client client; - - public Invoker(Class protocol, InetSocketAddress addr, - UserGroupInformation ticket, Configuration conf, SocketFactory factory, - int rpcTimeout) throws IOException { - this(protocol, Client.ConnectionId.getConnectionId(addr, protocol, - ticket, rpcTimeout, conf), conf, factory); - } - - public Invoker(Class protocol, Client.ConnectionId connId, - Configuration conf, SocketFactory factory) { - this.remoteId = connId; - this.client = CLIENTS.getClient(conf, factory, - ProtoSpecificResponseWritable.class); - } - - private ProtoSpecificRpcRequest constructRpcRequest(Method method, - Object[] params) throws ServiceException { - ProtoSpecificRpcRequest rpcRequest; - ProtoSpecificRpcRequest.Builder builder; - - builder = ProtoSpecificRpcRequest.newBuilder(); - builder.setMethodName(method.getName()); - - if (params.length != 2) { // RpcController + Message - throw new ServiceException("Too many parameters for request. Method: [" - + method.getName() + "]" + ", Expected: 2, Actual: " - + params.length); - } - if (params[1] == null) { - throw new ServiceException("null param while calling Method: [" - + method.getName() + "]"); - } - - Message param = (Message) params[1]; - builder.setRequestProto(param.toByteString()); - - rpcRequest = builder.build(); - return rpcRequest; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - long startTime = 0; - if (LOG.isDebugEnabled()) { - startTime = System.currentTimeMillis(); - } - - ProtoSpecificRpcRequest rpcRequest = constructRpcRequest(method, args); - ProtoSpecificResponseWritable val = null; - try { - val = (ProtoSpecificResponseWritable) client.call( - new ProtoSpecificRequestWritable(rpcRequest), remoteId); - } catch (Exception e) { - throw new ServiceException(e); - } - - ProtoSpecificRpcResponse response = val.message; - - if (LOG.isDebugEnabled()) { - long callTime = System.currentTimeMillis() - startTime; - LOG.debug("Call: " + method.getName() + " " + callTime); - } - - if (response.hasIsError() && response.getIsError() == true) { - YarnRemoteExceptionPBImpl exception = new YarnRemoteExceptionPBImpl(response.getException()); - exception.fillInStackTrace(); - ServiceException se = new ServiceException(exception); - throw se; - } - - Message prototype = null; - try { - prototype = getReturnProtoType(method); - } catch (Exception e) { - throw new ServiceException(e); - } - Message actualReturnMessage = prototype.newBuilderForType() - .mergeFrom(response.getResponseProto()).build(); - return actualReturnMessage; - } - - @Override - public void close() throws IOException { - if (!isClosed) { - isClosed = true; - CLIENTS.stopClient(client); - } - } - - private Message getReturnProtoType(Method method) throws Exception { - if (returnTypes.containsKey(method.getName())) { - return returnTypes.get(method.getName()); - } else { - Class returnType = method.getReturnType(); - - Method newInstMethod = returnType.getMethod("getDefaultInstance"); - newInstMethod.setAccessible(true); - Message prototype = (Message) newInstMethod.invoke(null, - (Object[]) null); - returnTypes.put(method.getName(), prototype); - return prototype; - } - } - } - - /** - * Writable Wrapper for Protocol Buffer Requests - */ - private static class ProtoSpecificRequestWritable implements Writable { - ProtoSpecificRpcRequest message; - - @SuppressWarnings("unused") - public ProtoSpecificRequestWritable() { - } - - ProtoSpecificRequestWritable(ProtoSpecificRpcRequest message) { - this.message = message; - } - - @Override - public void write(DataOutput out) throws IOException { - ((Message)message).writeDelimitedTo( - DataOutputOutputStream.constructOutputStream(out)); - } - - @Override - public void readFields(DataInput in) throws IOException { - int length = ProtoUtil.readRawVarint32(in); - byte[] bytes = new byte[length]; - in.readFully(bytes); - message = ProtoSpecificRpcRequest.parseFrom(bytes); - } - } - - /** - * Writable Wrapper for Protocol Buffer Responses - */ - public static class ProtoSpecificResponseWritable implements Writable { - ProtoSpecificRpcResponse message; - - public ProtoSpecificResponseWritable() { - } - - public ProtoSpecificResponseWritable(ProtoSpecificRpcResponse message) { - this.message = message; - } - - @Override - public void write(DataOutput out) throws IOException { - ((Message)message).writeDelimitedTo( - DataOutputOutputStream.constructOutputStream(out)); - } - - @Override - public void readFields(DataInput in) throws IOException { - int length = ProtoUtil.readRawVarint32(in); - byte[] bytes = new byte[length]; - in.readFully(bytes); - message = ProtoSpecificRpcResponse.parseFrom(bytes); - } - } - - @Override - public Object[] call(Method method, Object[][] params, - InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf) - throws IOException, InterruptedException { - throw new UnsupportedOperationException(); - } - - // for unit testing only - @InterfaceAudience.Private - @InterfaceStability.Unstable - static Client getClient(Configuration conf) { - return CLIENTS.getClient(conf, SocketFactory.getDefault(), - ProtoSpecificResponseWritable.class); - } - - public static class Server extends RPC.Server { - - private BlockingService service; - private boolean verbose; -// -// /** -// * Construct an RPC server. -// * -// * @param instance -// * the instance whose methods will be called -// * @param conf -// * the configuration to use -// * @param bindAddress -// * the address to bind on to listen for connection -// * @param port -// * the port to listen for connections on -// */ -// public Server(Object instance, Configuration conf, String bindAddress, -// int port) throws IOException { -// this(instance, conf, bindAddress, port, 1, false, null); -// } - - private static String classNameBase(String className) { - String[] names = className.split("\\.", -1); - if (names == null || names.length == 0) { - return className; - } - return names[names.length - 1]; - } - - /** - * Construct an RPC server. - * - * @param instance - * the instance whose methods will be called - * @param conf - * the configuration to use - * @param bindAddress - * the address to bind on to listen for connection - * @param port - * the port to listen for connections on - * @param numHandlers - * the number of method handler threads to run - * @param verbose - * whether each call should be logged - */ - public Server(Object instance, Configuration conf, String bindAddress, - int port, int numHandlers, int numReaders, - int queueSizePerHandler, boolean verbose, - SecretManager secretManager) - throws IOException { - super(bindAddress, port, ProtoSpecificRequestWritable.class, numHandlers, - numReaders, queueSizePerHandler, conf, classNameBase(instance.getClass().getName()), secretManager); - this.service = (BlockingService) instance; - this.verbose = verbose; - } - - @Override - public Writable call(RpcKind rpcKind, String protocol, - Writable writableRequest, long receiveTime) throws IOException { - ProtoSpecificRequestWritable request = (ProtoSpecificRequestWritable) writableRequest; - ProtoSpecificRpcRequest rpcRequest = request.message; - String methodName = rpcRequest.getMethodName(); - if (verbose) { - log("Call: protocol=" + protocol + ", method=" - + methodName); - } - MethodDescriptor methodDescriptor = service.getDescriptorForType() - .findMethodByName(methodName); - if (methodDescriptor == null) { - String msg = "Unknown method " + methodName + " called on " - + protocol + " protocol."; - LOG.warn(msg); - return handleException(new IOException(msg)); - } - Message prototype = service.getRequestPrototype(methodDescriptor); - Message param = prototype.newBuilderForType() - .mergeFrom(rpcRequest.getRequestProto()).build(); - Message result; - try { - result = service.callBlockingMethod(methodDescriptor, null, param); - } catch (ServiceException e) { - e.printStackTrace(); - return handleException(e); - } catch (Exception e) { - return handleException(e); - } - - ProtoSpecificRpcResponse response = constructProtoSpecificRpcSuccessResponse(result); - return new ProtoSpecificResponseWritable(response); - } - - private ProtoSpecificResponseWritable handleException(Throwable e) { - ProtoSpecificRpcResponse.Builder builder = ProtoSpecificRpcResponse - .newBuilder(); - builder.setIsError(true); - if (e.getCause() instanceof YarnRemoteExceptionPBImpl) { - builder.setException(((YarnRemoteExceptionPBImpl) e.getCause()) - .getProto()); - } else { - builder.setException(new YarnRemoteExceptionPBImpl(e).getProto()); - } - ProtoSpecificRpcResponse response = builder.build(); - return new ProtoSpecificResponseWritable(response); - } - - private ProtoSpecificRpcResponse constructProtoSpecificRpcSuccessResponse( - Message message) { - ProtoSpecificRpcResponse res = ProtoSpecificRpcResponse.newBuilder() - .setResponseProto(message.toByteString()).build(); - return res; - } - } - - private static void log(String value) { - if (value != null && value.length() > 55) - value = value.substring(0, 55) + "..."; - LOG.info(value); - } - - @Override - public RPC.Server getServer(Class protocol, Object instance, - String bindAddress, int port, int numHandlers,int numReaders, - int queueSizePerHandler, boolean verbose, - Configuration conf, SecretManager secretManager) - throws IOException { - return new Server(instance, conf, bindAddress, port, numHandlers, numReaders, queueSizePerHandler, - verbose, secretManager); - } -} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java index aaf5ff0be7..c7112e3c01 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerManagerSecurityInfo.java @@ -26,7 +26,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.hadoop.yarn.proto.ContainerManager; +import org.apache.hadoop.yarn.api.ContainerManagerPB; public class ContainerManagerSecurityInfo extends SecurityInfo { @@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { if (!protocol - .equals(ContainerManager.ContainerManagerService.BlockingInterface.class)) { + .equals(ContainerManagerPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java index 9f63b5f1aa..583e2f46e1 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/SchedulerSecurityInfo.java @@ -26,7 +26,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.hadoop.yarn.proto.AMRMProtocol; +import org.apache.hadoop.yarn.api.AMRMProtocolPB; public class SchedulerSecurityInfo extends SecurityInfo { @@ -37,7 +37,7 @@ public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { - if (!protocol.equals(AMRMProtocol.AMRMProtocolService.BlockingInterface.class)) { + if (!protocol.equals(AMRMProtocolPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java index 0a21c902b5..0e1e6781a5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientRMSecurityInfo.java @@ -26,15 +26,15 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; +import org.apache.hadoop.yarn.api.ClientRMProtocolPB; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.ClientRMProtocol; public class ClientRMSecurityInfo extends SecurityInfo { @Override public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { if (!protocol - .equals(ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class)) { + .equals(ClientRMProtocolPB.class)) { return null; } return new KerberosInfo() { @@ -59,7 +59,7 @@ public String clientPrincipal() { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { if (!protocol - .equals(ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class)) { + .equals(ClientRMProtocolPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/proxy/.keep b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/proxy/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 0a419a4488..686cb49210 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -477,8 +477,7 @@ The address for the web proxy as HOST:PORT, if this is not - given or if it matches yarn.resourcemanager.address then the proxy will - run as part of the RM + given then the proxy will run as part of the RM yarn.web-proxy.address diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index e007ad6fc6..2c127cc6a2 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -22,11 +22,14 @@ import junit.framework.Assert; -import org.apache.hadoop.ipc.Server; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.ContainerManager; +import org.apache.hadoop.yarn.api.ContainerManagerPB; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; @@ -84,6 +87,8 @@ public void testUnknownCall() { "Unknown method getNewApplication called on.*" + "org.apache.hadoop.yarn.proto.ClientRMProtocol" + "\\$ClientRMProtocolService\\$BlockingInterface protocol.")); + } catch (Exception e) { + e.printStackTrace(); } } @@ -101,6 +106,7 @@ private void test(String rpcClass) throws Exception { Server server = rpc.getServer(ContainerManager.class, new DummyContainerManager(), addr, conf, null, 1); server.start(); + RPC.setProtocolEngine(conf, ContainerManagerPB.class, ProtobufRpcEngine.class); ContainerManager proxy = (ContainerManager) rpc.getProxy(ContainerManager.class, NetUtils.createSocketAddr("localhost:" + server.getPort()), conf); @@ -144,11 +150,11 @@ private void test(String rpcClass) throws Exception { proxy.stopContainer(stopRequest); } catch (YarnRemoteException e) { exception = true; - System.err.println(e.getMessage()); - System.err.println(e.getCause().getMessage()); - Assert.assertTrue(EXCEPTION_MSG.equals(e.getMessage())); - Assert.assertTrue(EXCEPTION_CAUSE.equals(e.getCause().getMessage())); + Assert.assertTrue(e.getMessage().contains(EXCEPTION_MSG)); + Assert.assertTrue(e.getMessage().contains(EXCEPTION_CAUSE)); System.out.println("Test Exception is " + RPCUtil.toString(e)); + } catch (Exception ex) { + ex.printStackTrace(); } Assert.assertTrue(exception); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java index 1b23b77322..f4940398fe 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/RMNMSecurityInfoClass.java @@ -25,13 +25,13 @@ import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.ResourceTracker; +import org.apache.hadoop.yarn.server.api.ResourceTrackerPB; public class RMNMSecurityInfoClass extends SecurityInfo { @Override public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { - if (!protocol.equals(ResourceTracker.ResourceTrackerService.BlockingInterface.class)) { + if (!protocol.equals(ResourceTrackerPB.class)) { return null; } return new KerberosInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java new file mode 100644 index 0000000000..840976c805 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTrackerPB.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.server.api.ResourceTrackerPB", + protocolVersion = 1) +public interface ResourceTrackerPB extends ResourceTrackerService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java index d2160d1977..88c3b0d524 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceTrackerPBClientImpl.java @@ -19,17 +19,17 @@ package org.apache.hadoop.yarn.server.api.impl.pb.client; import java.io.IOException; -import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto; import org.apache.hadoop.yarn.server.api.ResourceTracker; +import org.apache.hadoop.yarn.server.api.ResourceTrackerPB; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; @@ -43,12 +43,12 @@ public class ResourceTrackerPBClientImpl implements ResourceTracker { -private ResourceTrackerService.BlockingInterface proxy; +private ResourceTrackerPB proxy; public ResourceTrackerPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, ResourceTrackerService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (ResourceTrackerService.BlockingInterface)RPC.getProxy( - ResourceTrackerService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, ResourceTrackerPB.class, ProtobufRpcEngine.class); + proxy = (ResourceTrackerPB)RPC.getProxy( + ResourceTrackerPB.class, clientVersion, addr, conf); } @Override @@ -58,13 +58,7 @@ public RegisterNodeManagerResponse registerNodeManager( try { return new RegisterNodeManagerResponsePBImpl(proxy.registerNodeManager(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -75,13 +69,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) try { return new NodeHeartbeatResponsePBImpl(proxy.nodeHeartbeat(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java index d4d20bf59b..18c5dcb61b 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceTrackerPBServiceImpl.java @@ -19,12 +19,12 @@ package org.apache.hadoop.yarn.server.api.impl.pb.service; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.ResourceTracker.ResourceTrackerService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerRequestProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.RegisterNodeManagerResponseProto; import org.apache.hadoop.yarn.server.api.ResourceTracker; +import org.apache.hadoop.yarn.server.api.ResourceTrackerPB; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeHeartbeatRequestPBImpl; @@ -35,7 +35,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class ResourceTrackerPBServiceImpl implements BlockingInterface { +public class ResourceTrackerPBServiceImpl implements ResourceTrackerPB { private ResourceTracker real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java new file mode 100644 index 0000000000..4bfa9f22c9 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/LocalizationProtocolPB.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService; + +@ProtocolInfo(protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB", + protocolVersion = 1) +public interface LocalizationProtocolPB extends LocalizationProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java index 1cd981cfee..80b3f79869 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/client/LocalizationProtocolPBClientImpl.java @@ -18,32 +18,31 @@ package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.client; import java.io.IOException; - -import java.lang.reflect.UndeclaredThrowableException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; +import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; +import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerHeartbeatResponsePBImpl; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.LocalizerStatusPBImpl; -import static org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import com.google.protobuf.ServiceException; public class LocalizationProtocolPBClientImpl implements LocalizationProtocol { - private LocalizationProtocolService.BlockingInterface proxy; + private LocalizationProtocolPB proxy; public LocalizationProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, LocalizationProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); - proxy = (LocalizationProtocolService.BlockingInterface)RPC.getProxy( - LocalizationProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, LocalizationProtocolPB.class, ProtobufRpcEngine.class); + proxy = (LocalizationProtocolPB)RPC.getProxy( + LocalizationProtocolPB.class, clientVersion, addr, conf); } @Override @@ -54,13 +53,7 @@ public LocalizerHeartbeatResponse heartbeat(LocalizerStatus status) return new LocalizerHeartbeatResponsePBImpl( proxy.heartbeat(null, statusProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java index d69a4f95c3..31111d30f3 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/service/LocalizationProtocolPBServiceImpl.java @@ -24,13 +24,13 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.LocalizationProtocol.LocalizationProtocolService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerHeartbeatResponseProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerServiceProtos.LocalizerStatusProto; import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol; +import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse; -public class LocalizationProtocolPBServiceImpl implements BlockingInterface { +public class LocalizationProtocolPBServiceImpl implements LocalizationProtocolPB { private LocalizationProtocol real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java index 050b9922a3..04fec51241 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/security/LocalizerSecurityInfo.java @@ -26,7 +26,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; -import org.apache.hadoop.yarn.proto.LocalizationProtocol; +import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; public class LocalizerSecurityInfo extends SecurityInfo { @@ -38,7 +38,7 @@ public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { @Override public TokenInfo getTokenInfo(Class protocol, Configuration conf) { if (!protocol - .equals(LocalizationProtocol.LocalizationProtocolService.BlockingInterface.class)) { + .equals(LocalizationProtocolPB.class)) { return null; } return new TokenInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java index 0f818bd3b2..11c470edf8 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java @@ -21,9 +21,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.yarn.proto.ContainerManager; +import org.apache.hadoop.yarn.api.ContainerManagerPB; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.LocalizationProtocol; +import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB; /** * {@link PolicyProvider} for YARN NodeManager protocols. @@ -36,9 +36,9 @@ public class NMPolicyProvider extends PolicyProvider { new Service[] { new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER, - ContainerManager.ContainerManagerService.BlockingInterface.class), + ContainerManagerPB.class), new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER, - LocalizationProtocol.LocalizationProtocolService.BlockingInterface.class) + LocalizationProtocolPB.class) }; @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java new file mode 100644 index 0000000000..551189463f --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/RMAdminProtocolPB.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.api; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.server.nodemanager.api.RMAdminProtocolPB", + protocolVersion = 1) +public interface RMAdminProtocolPB extends RMAdminProtocolService.BlockingInterface { + +} diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java index cf2ce894ee..80df1b9c8c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/client/RMAdminProtocolPBClientImpl.java @@ -23,10 +23,10 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine; -import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService; +import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol; +import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesRequest; @@ -64,14 +65,14 @@ public class RMAdminProtocolPBClientImpl implements RMAdminProtocol { - private RMAdminProtocolService.BlockingInterface proxy; + private RMAdminProtocolPB proxy; public RMAdminProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { - RPC.setProtocolEngine(conf, RMAdminProtocolService.BlockingInterface.class, - ProtoOverHadoopRpcEngine.class); - proxy = (RMAdminProtocolService.BlockingInterface)RPC.getProxy( - RMAdminProtocolService.BlockingInterface.class, clientVersion, addr, conf); + RPC.setProtocolEngine(conf, RMAdminProtocolPB.class, + ProtobufRpcEngine.class); + proxy = (RMAdminProtocolPB)RPC.getProxy( + RMAdminProtocolPB.class, clientVersion, addr, conf); } @Override @@ -83,13 +84,7 @@ public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) return new RefreshQueuesResponsePBImpl( proxy.refreshQueues(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -102,13 +97,7 @@ public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) return new RefreshNodesResponsePBImpl( proxy.refreshNodes(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -122,13 +111,7 @@ public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfigu return new RefreshSuperUserGroupsConfigurationResponsePBImpl( proxy.refreshSuperUserGroupsConfiguration(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -141,13 +124,7 @@ public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( return new RefreshUserToGroupsMappingsResponsePBImpl( proxy.refreshUserToGroupsMappings(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -160,13 +137,7 @@ public RefreshAdminAclsResponse refreshAdminAcls( return new RefreshAdminAclsResponsePBImpl( proxy.refreshAdminAcls(null, requestProto)); } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); } } @@ -175,18 +146,12 @@ public RefreshServiceAclsResponse refreshServiceAcls( RefreshServiceAclsRequest request) throws YarnRemoteException { RefreshServiceAclsRequestProto requestProto = ((RefreshServiceAclsRequestPBImpl)request).getProto(); - try { - return new RefreshServiceAclsResponsePBImpl( - proxy.refreshServiceAcls(null, requestProto)); - } catch (ServiceException e) { - if (e.getCause() instanceof YarnRemoteException) { - throw (YarnRemoteException)e.getCause(); - } else if (e.getCause() instanceof UndeclaredThrowableException) { - throw (UndeclaredThrowableException)e.getCause(); - } else { - throw new UndeclaredThrowableException(e); - } - } + try { + return new RefreshServiceAclsResponsePBImpl(proxy.refreshServiceAcls( + null, requestProto)); + } catch (ServiceException e) { + throw YarnRemoteExceptionPBImpl.unwrapAndThrowException(e); + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java index f6b6760b53..948e86ee8f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/api/impl/pb/service/RMAdminProtocolPBServiceImpl.java @@ -19,11 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.api.impl.pb.service; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.RMAdminProtocol.RMAdminProtocolService.BlockingInterface; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*; import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocol; +import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.resourcemanager.api.protocolrecords.RefreshQueuesResponse; @@ -46,7 +46,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -public class RMAdminProtocolPBServiceImpl implements BlockingInterface { +public class RMAdminProtocolPBServiceImpl implements RMAdminProtocolPB { private RMAdminProtocol real; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java index 48eda6930a..275da39334 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/admin/AdminSecurityInfo.java @@ -25,13 +25,13 @@ import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.RMAdminProtocol; +import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB; public class AdminSecurityInfo extends SecurityInfo { @Override public KerberosInfo getKerberosInfo(Class protocol, Configuration conf) { - if (!protocol.equals(RMAdminProtocol.RMAdminProtocolService.BlockingInterface.class)) { + if (!protocol.equals(RMAdminProtocolPB.class)) { return null; } return new KerberosInfo() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java index 6fe2c1912e..ba58f3e0d3 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/authorize/RMPolicyProvider.java @@ -21,12 +21,12 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; +import org.apache.hadoop.yarn.api.AMRMProtocolPB; +import org.apache.hadoop.yarn.api.ClientRMProtocolPB; +import org.apache.hadoop.yarn.api.ContainerManagerPB; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.proto.ContainerManager; -import org.apache.hadoop.yarn.proto.ResourceTracker; -import org.apache.hadoop.yarn.proto.RMAdminProtocol; -import org.apache.hadoop.yarn.proto.ClientRMProtocol; -import org.apache.hadoop.yarn.proto.AMRMProtocol; +import org.apache.hadoop.yarn.server.api.ResourceTrackerPB; +import org.apache.hadoop.yarn.server.resourcemanager.api.RMAdminProtocolPB; /** * {@link PolicyProvider} for YARN ResourceManager protocols. @@ -39,19 +39,19 @@ public class RMPolicyProvider extends PolicyProvider { new Service[] { new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER, - ResourceTracker.ResourceTrackerService.BlockingInterface.class), + ResourceTrackerPB.class), new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER, - ClientRMProtocol.ClientRMProtocolService.BlockingInterface.class), + ClientRMProtocolPB.class), new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_RESOURCEMANAGER, - AMRMProtocol.AMRMProtocolService.BlockingInterface.class), + AMRMProtocolPB.class), new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_ADMIN, - RMAdminProtocol.RMAdminProtocolService.BlockingInterface.class), + RMAdminProtocolPB.class), new Service( YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER, - ContainerManager.ContainerManagerService.BlockingInterface.class), + ContainerManagerPB.class), }; @Override diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java index d7868d5199..5ac5deef33 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAMAuthorization.java @@ -235,10 +235,11 @@ public AMRMProtocol run() { client.registerApplicationMaster(request); Assert.fail("Should fail with authorization error"); } catch (YarnRemoteException e) { - Assert.assertEquals("Unauthorized request from ApplicationMaster. " - + "Expected ApplicationAttemptID: " - + applicationAttemptId.toString() + " Found: " - + otherAppAttemptId.toString(), e.getMessage()); + Assert.assertTrue(e.getMessage().contains( + "Unauthorized request from ApplicationMaster. " + + "Expected ApplicationAttemptID: " + + applicationAttemptId.toString() + " Found: " + + otherAppAttemptId.toString())); } finally { rm.stop(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index d86673c6b3..ea27be32da 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -323,8 +323,10 @@ private void verifyEnemyAccess() throws Exception { Assert.fail("App killing by the enemy should fail!!"); } catch (YarnRemoteException e) { LOG.info("Got exception while killing app as the enemy", e); - Assert.assertEquals("User enemy cannot perform operation MODIFY_APP on " - + applicationId, e.getMessage()); + Assert + .assertTrue(e.getMessage().contains( + "User enemy cannot perform operation MODIFY_APP on " + + applicationId)); } rmClient.forceKillApplication(finishAppRequest); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index 765234665f..226bccded5 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -247,10 +247,12 @@ public Void run() { Assert.assertEquals( java.lang.reflect.UndeclaredThrowableException.class .getCanonicalName(), e.getClass().getCanonicalName()); - Assert.assertEquals( - "DIGEST-MD5: digest response format violation. " - + "Mismatched response.", e.getCause().getCause() - .getMessage()); + Assert.assertTrue(e + .getCause() + .getMessage() + .contains( + "DIGEST-MD5: digest response format violation. " + + "Mismatched response.")); } return null; } @@ -468,9 +470,10 @@ void callWithIllegalContainerID(ContainerManager client, + "access is expected to fail."); } catch (YarnRemoteException e) { LOG.info("Got exception : ", e); - Assert.assertEquals("Unauthorized request to start container. " - + "\nExpected containerId: " + tokenId.getContainerID() - + " Found: " + newContainerId.toString(), e.getMessage()); + Assert.assertTrue(e.getMessage().contains( + "Unauthorized request to start container. " + + "\nExpected containerId: " + tokenId.getContainerID() + + " Found: " + newContainerId.toString())); } } diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java index c90e17c6bf..fad34ab7df 100644 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java +++ b/hadoop-mapreduce-project/src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java @@ -156,10 +156,11 @@ public void map(LongWritable key, BytesWritable value, Context context) FSDataOutputStream dos = FileSystem.create(fs, path, new FsPermission((short)0755)); - for (long bytes = key.get(); bytes > 0; bytes -= val.getLength()) { + int size = 0; + for (long bytes = key.get(); bytes > 0; bytes -= size) { r.nextBytes(val.getBytes()); - val.setSize((int)Math.min(val.getLength(), bytes)); - dos.write(val.getBytes(), 0, val.getLength());// Write to distCache file + size = (int)Math.min(val.getLength(), bytes); + dos.write(val.getBytes(), 0, size);// Write to distCache file } dos.close(); } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index b37b0bad95..69dd16bf92 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -283,7 +283,7 @@ commons-net commons-net - 1.4.1 + 3.1 javax.servlet