Merge trunk into auto-failover branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1308260 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
2fd05aa597
@ -231,10 +231,20 @@ Release 2.0.0 - UNRELEASED
|
||||
HADOOP-8216. Address log4j.properties inconsistencies btw main and
|
||||
template dirs. (Patrick Hunt via eli)
|
||||
|
||||
HADOOP-8149. Cap space usage of default log4j rolling policy.
|
||||
(Patrick Hunt via eli)
|
||||
|
||||
HADOOP-8211. Update commons-net version to 3.1. (eli)
|
||||
|
||||
HADOOP-8236. haadmin should have configurable timeouts for failover
|
||||
commands. (todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh)
|
||||
|
||||
HADOOP-7635. RetryInvocationHandler should release underlying resources on
|
||||
close. (atm)
|
||||
|
||||
@ -295,6 +305,9 @@ Release 2.0.0 - UNRELEASED
|
||||
HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock
|
||||
(todd)
|
||||
|
||||
HADOOP-8238. NetUtils#getHostNameOfIP blows up if given ip:port
|
||||
string w/o port. (eli)
|
||||
|
||||
BREAKDOWN OF HADOOP-7454 SUBTASKS
|
||||
|
||||
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
|
||||
@ -445,6 +458,8 @@ Release 0.23.2 - UNRELEASED
|
||||
HADOOP-8088. User-group mapping cache incorrectly does negative caching on
|
||||
transient failures (Khiwal Lee via bobby)
|
||||
|
||||
HADOOP-8208. Disallow self failover. (eli)
|
||||
|
||||
Release 0.23.1 - 2012-02-17
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -107,8 +107,8 @@ fi
|
||||
|
||||
# some variables
|
||||
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
|
||||
export HADOOP_ROOT_LOGGER="INFO,DRFA"
|
||||
export HADOOP_SECURITY_LOGGER="INFO,DRFAS"
|
||||
export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
|
||||
export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
|
||||
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
|
||||
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
# Start all hadoop daemons. Run this on master node.
|
||||
|
||||
echo "This script is Deprecated. Instead use start-dfs.sh and start-mapred.sh"
|
||||
echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
|
||||
|
||||
bin=`dirname "${BASH_SOURCE-$0}"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
||||
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
|
||||
|
||||
# start hdfs daemons if hdfs is present
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh ]; then
|
||||
"${HADOOP_HDFS_HOME}"/bin/start-dfs.sh --config $HADOOP_CONF_DIR
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
|
||||
"${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
|
||||
fi
|
||||
|
||||
# start yarn daemons if yarn is present
|
||||
if [ -f "${YARN_HOME}"/sbin/start-dfs.sh ]; then
|
||||
"${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
|
||||
fi
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
# Stop all hadoop daemons. Run this on master node.
|
||||
|
||||
echo "This script is Deprecated. Instead use stop-dfs.sh and stop-mapred.sh"
|
||||
echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
|
||||
|
||||
bin=`dirname "${BASH_SOURCE-$0}"`
|
||||
bin=`cd "$bin"; pwd`
|
||||
@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
|
||||
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
|
||||
|
||||
# stop hdfs daemons if hdfs is present
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh ]; then
|
||||
"${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh --config $HADOOP_CONF_DIR
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
|
||||
"${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
|
||||
fi
|
||||
|
||||
# stop yarn daemons if yarn is present
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
|
||||
"${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
|
||||
fi
|
||||
|
@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
|
||||
hadoop.log.dir=.
|
||||
hadoop.log.file=hadoop.log
|
||||
|
||||
|
||||
# Define the root logger to the system property "hadoop.root.logger".
|
||||
log4j.rootLogger=${hadoop.root.logger}, EventCounter
|
||||
|
||||
@ -31,6 +30,25 @@ log4j.threshold=ALL
|
||||
# Null Appender
|
||||
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
|
||||
|
||||
#
|
||||
# Rolling File Appender - cap space usage at 5gb.
|
||||
#
|
||||
hadoop.log.maxfilesize=256MB
|
||||
hadoop.log.maxbackupindex=20
|
||||
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
|
||||
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
|
||||
|
||||
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
# Pattern format: Date LogLevel LoggerName LogMessage
|
||||
log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
# Debugging Pattern format
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
|
||||
#
|
||||
# Daily Rolling File Appender
|
||||
#
|
||||
@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
#Security appender
|
||||
#
|
||||
hadoop.security.logger=INFO,console
|
||||
hadoop.security.log.maxfilesize=256MB
|
||||
hadoop.security.log.maxbackupindex=20
|
||||
log4j.category.SecurityLogger=${hadoop.security.logger}
|
||||
hadoop.security.log.file=SecurityAuth.audit
|
||||
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
|
||||
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
|
||||
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
|
||||
|
||||
#
|
||||
# Daily Rolling Security appender
|
||||
#
|
||||
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
|
||||
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
|
||||
|
||||
|
||||
#
|
||||
# hdfs audit logging
|
||||
#
|
||||
hdfs.audit.logger=INFO,console
|
||||
hdfs.audit.log.maxfilesize=256MB
|
||||
hdfs.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
|
||||
log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
|
||||
log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
|
||||
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
|
||||
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
|
||||
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
|
||||
|
||||
#
|
||||
# mapred audit logging
|
||||
#
|
||||
mapred.audit.logger=INFO,console
|
||||
mapred.audit.log.maxfilesize=256MB
|
||||
mapred.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
|
||||
log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
|
||||
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
|
||||
|
||||
#
|
||||
# Rolling File Appender
|
||||
#
|
||||
|
||||
#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
# Logfile size and and 30-day backups
|
||||
#log4j.appender.RFA.MaxFileSize=1MB
|
||||
#log4j.appender.RFA.MaxBackupIndex=30
|
||||
|
||||
#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
|
||||
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
|
||||
|
||||
# Custom Logging levels
|
||||
|
||||
@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
|
||||
# Job Summary Appender
|
||||
#
|
||||
# Use following logger to send summary to separate file defined by
|
||||
# hadoop.mapreduce.jobsummary.log.file rolled daily:
|
||||
# hadoop.mapreduce.jobsummary.log.file :
|
||||
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
|
||||
#
|
||||
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
|
||||
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
|
||||
log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
|
||||
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
|
||||
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
|
||||
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
|
||||
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
|
||||
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
|
||||
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
|
||||
log4j.appender.JSA.DatePattern=.yyyy-MM-dd
|
||||
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
# Set the ResourceManager summary log level and appender
|
||||
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
|
||||
|
||||
# Appender for ResourceManager Application Summary Log - rolled daily
|
||||
# Appender for ResourceManager Application Summary Log
|
||||
# Requires the following properties to be set
|
||||
# - hadoop.log.dir (Hadoop Log directory)
|
||||
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
|
||||
@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
|
||||
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
|
||||
#log4j.appender.RMSUMMARY.MaxFileSize=256MB
|
||||
#log4j.appender.RMSUMMARY.MaxBackupIndex=20
|
||||
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
|
||||
|
@ -145,5 +145,21 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||
public static final String HA_HM_RPC_TIMEOUT_KEY =
|
||||
"ha.health-monitor.rpc-timeout.ms";
|
||||
public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
|
||||
|
||||
/* Timeout that the FC waits for the new active to become active */
|
||||
public static final String HA_FC_NEW_ACTIVE_TIMEOUT_KEY =
|
||||
"ha.failover-controller.new-active.rpc-timeout.ms";
|
||||
public static final int HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT = 60000;
|
||||
|
||||
/* Timeout that the FC waits for the old active to go to standby */
|
||||
public static final String HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY =
|
||||
"ha.failover-controller.graceful-fence.rpc-timeout.ms";
|
||||
public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
|
||||
|
||||
/* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
|
||||
public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
|
||||
"ha.failover-controller.cli-check.rpc-timeout.ms";
|
||||
public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
|
||||
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
||||
@ -42,7 +43,22 @@ public class FailoverController {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FailoverController.class);
|
||||
|
||||
private static final int GRACEFUL_FENCE_TIMEOUT = 5000;
|
||||
private final int gracefulFenceTimeout;
|
||||
private final int rpcTimeoutToNewActive;
|
||||
|
||||
private final Configuration conf;
|
||||
|
||||
|
||||
public FailoverController(Configuration conf) {
|
||||
this.conf = conf;
|
||||
|
||||
this.gracefulFenceTimeout = conf.getInt(
|
||||
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY,
|
||||
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT);
|
||||
this.rpcTimeoutToNewActive = conf.getInt(
|
||||
CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_KEY,
|
||||
CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform pre-failover checks on the given service we plan to
|
||||
@ -54,18 +70,25 @@ public class FailoverController {
|
||||
* allow it to become active, eg because it triggers a log roll
|
||||
* so the standby can learn about new blocks and leave safemode.
|
||||
*
|
||||
* @param from currently active service
|
||||
* @param target service to make active
|
||||
* @param forceActive ignore toSvc if it reports that it is not ready
|
||||
* @throws FailoverFailedException if we should avoid failover
|
||||
*/
|
||||
private static void preFailoverChecks(HAServiceTarget target,
|
||||
private void preFailoverChecks(HAServiceTarget from,
|
||||
HAServiceTarget target,
|
||||
boolean forceActive)
|
||||
throws FailoverFailedException {
|
||||
HAServiceStatus toSvcStatus;
|
||||
HAServiceProtocol toSvc;
|
||||
|
||||
if (from.getAddress().equals(target.getAddress())) {
|
||||
throw new FailoverFailedException(
|
||||
"Can't failover a service to itself");
|
||||
}
|
||||
|
||||
try {
|
||||
toSvc = target.getProxy();
|
||||
toSvc = target.getProxy(conf, rpcTimeoutToNewActive);
|
||||
toSvcStatus = toSvc.getServiceStatus();
|
||||
} catch (IOException e) {
|
||||
String msg = "Unable to get service state for " + target;
|
||||
@ -108,11 +131,10 @@ private static void preFailoverChecks(HAServiceTarget target,
|
||||
* and no retries. Its only purpose is to avoid fencing a node that
|
||||
* has already restarted.
|
||||
*/
|
||||
static boolean tryGracefulFence(Configuration conf,
|
||||
HAServiceTarget svc) {
|
||||
boolean tryGracefulFence(HAServiceTarget svc) {
|
||||
HAServiceProtocol proxy = null;
|
||||
try {
|
||||
proxy = svc.getProxy(conf, GRACEFUL_FENCE_TIMEOUT);
|
||||
proxy = svc.getProxy(conf, gracefulFenceTimeout);
|
||||
proxy.transitionToStandby();
|
||||
return true;
|
||||
} catch (ServiceFailedException sfe) {
|
||||
@ -139,19 +161,19 @@ static boolean tryGracefulFence(Configuration conf,
|
||||
* @param forceActive try to make toSvc active even if it is not ready
|
||||
* @throws FailoverFailedException if the failover fails
|
||||
*/
|
||||
public static void failover(HAServiceTarget fromSvc,
|
||||
public void failover(HAServiceTarget fromSvc,
|
||||
HAServiceTarget toSvc,
|
||||
boolean forceFence,
|
||||
boolean forceActive)
|
||||
throws FailoverFailedException {
|
||||
Preconditions.checkArgument(fromSvc.getFencer() != null,
|
||||
"failover requires a fencer");
|
||||
preFailoverChecks(toSvc, forceActive);
|
||||
preFailoverChecks(fromSvc, toSvc, forceActive);
|
||||
|
||||
// Try to make fromSvc standby
|
||||
boolean tryFence = true;
|
||||
|
||||
if (tryGracefulFence(new Configuration(), fromSvc)) {
|
||||
if (tryGracefulFence(fromSvc)) {
|
||||
tryFence = forceFence;
|
||||
}
|
||||
|
||||
@ -167,7 +189,8 @@ public static void failover(HAServiceTarget fromSvc,
|
||||
boolean failed = false;
|
||||
Throwable cause = null;
|
||||
try {
|
||||
HAServiceProtocolHelper.transitionToActive(toSvc.getProxy());
|
||||
HAServiceProtocolHelper.transitionToActive(
|
||||
toSvc.getProxy(conf, rpcTimeoutToNewActive));
|
||||
} catch (ServiceFailedException sfe) {
|
||||
LOG.error("Unable to make " + toSvc + " active (" +
|
||||
sfe.getMessage() + "). Failing back.");
|
||||
|
@ -30,7 +30,9 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
@ -49,6 +51,8 @@ public abstract class HAAdmin extends Configured implements Tool {
|
||||
private static final String FORCEACTIVE = "forceactive";
|
||||
private static final Log LOG = LogFactory.getLog(HAAdmin.class);
|
||||
|
||||
private int rpcTimeoutForChecks = -1;
|
||||
|
||||
private static Map<String, UsageInfo> USAGE =
|
||||
ImmutableMap.<String, UsageInfo>builder()
|
||||
.put("-transitionToActive",
|
||||
@ -165,9 +169,10 @@ private int failover(final String[] argv)
|
||||
HAServiceTarget fromNode = resolveTarget(args[0]);
|
||||
HAServiceTarget toNode = resolveTarget(args[1]);
|
||||
|
||||
FailoverController fc = new FailoverController(getConf());
|
||||
|
||||
try {
|
||||
FailoverController.failover(fromNode, toNode,
|
||||
forceFence, forceActive);
|
||||
fc.failover(fromNode, toNode, forceFence, forceActive);
|
||||
out.println("Failover from "+args[0]+" to "+args[1]+" successful");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
errOut.println("Failover failed: " + ffe.getLocalizedMessage());
|
||||
@ -184,7 +189,8 @@ private int checkHealth(final String[] argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
|
||||
HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
|
||||
getConf(), rpcTimeoutForChecks);
|
||||
try {
|
||||
HAServiceProtocolHelper.monitorHealth(proto);
|
||||
} catch (HealthCheckFailedException e) {
|
||||
@ -202,7 +208,8 @@ private int getServiceState(final String[] argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
|
||||
HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
|
||||
getConf(), rpcTimeoutForChecks);
|
||||
out.println(proto.getServiceStatus().getState());
|
||||
return 0;
|
||||
}
|
||||
@ -215,6 +222,16 @@ protected String getServiceAddr(String serviceId) {
|
||||
return serviceId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setConf(Configuration conf) {
|
||||
super.setConf(conf);
|
||||
if (conf != null) {
|
||||
rpcTimeoutForChecks = conf.getInt(
|
||||
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
|
||||
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int run(String[] argv) throws Exception {
|
||||
try {
|
||||
|
@ -330,8 +330,8 @@ public void fenceOldActive(byte[] data) {
|
||||
HAServiceTarget target = dataToTarget(data);
|
||||
|
||||
LOG.info("Should fence: " + target);
|
||||
boolean gracefulWorked =
|
||||
FailoverController.tryGracefulFence(conf, target);
|
||||
boolean gracefulWorked = new FailoverController(conf)
|
||||
.tryGracefulFence(target);
|
||||
if (gracefulWorked) {
|
||||
// It's possible that it's in standby but just about to go into active,
|
||||
// no? Is there some race here?
|
||||
|
@ -570,31 +570,29 @@ public static void verifyHostnames(String[] names) throws UnknownHostException {
|
||||
}
|
||||
}
|
||||
|
||||
private static final Pattern ipPattern = // Pattern for matching hostname to ip:port
|
||||
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*");
|
||||
private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
|
||||
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
|
||||
|
||||
/**
|
||||
* Attempt to obtain the host name of a name specified by ip address.
|
||||
* Check that the node name is an ip addr and if so, attempt to determine
|
||||
* its host name. If the name is not an IP addr, or the actual name cannot
|
||||
* be determined, return null.
|
||||
* Attempt to obtain the host name of the given string which contains
|
||||
* an IP address and an optional port.
|
||||
*
|
||||
* @return Host name or null
|
||||
* @param ipPort string of form ip[:port]
|
||||
* @return Host name or null if the name can not be determined
|
||||
*/
|
||||
public static String getHostNameOfIP(String ip) {
|
||||
// If name is not an ip addr, don't bother looking it up
|
||||
if(!ipPattern.matcher(ip).matches())
|
||||
return null;
|
||||
|
||||
String hostname = "";
|
||||
try {
|
||||
String n = ip.substring(0, ip.indexOf(':'));
|
||||
hostname = InetAddress.getByName(n).getHostName();
|
||||
} catch (UnknownHostException e) {
|
||||
public static String getHostNameOfIP(String ipPort) {
|
||||
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return hostname;
|
||||
try {
|
||||
int colonIdx = ipPort.indexOf(':');
|
||||
String ip = (-1 == colonIdx) ? ipPort
|
||||
: ipPort.substring(0, ipPort.indexOf(':'));
|
||||
return InetAddress.getByName(ip).getHostName();
|
||||
} catch (UnknownHostException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -48,10 +48,10 @@ done
|
||||
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
|
||||
|
||||
# Command specific options appended to HADOOP_OPTS when specified
|
||||
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
|
||||
HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
|
||||
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
|
||||
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
|
||||
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
|
||||
|
||||
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
|
||||
export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
|
||||
|
@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
|
||||
hadoop.log.dir=.
|
||||
hadoop.log.file=hadoop.log
|
||||
|
||||
|
||||
# Define the root logger to the system property "hadoop.root.logger".
|
||||
log4j.rootLogger=${hadoop.root.logger}, EventCounter
|
||||
|
||||
@ -31,6 +30,25 @@ log4j.threshold=ALL
|
||||
# Null Appender
|
||||
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
|
||||
|
||||
#
|
||||
# Rolling File Appender - cap space usage at 5gb.
|
||||
#
|
||||
hadoop.log.maxfilesize=256MB
|
||||
hadoop.log.maxbackupindex=20
|
||||
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
|
||||
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
|
||||
|
||||
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
|
||||
|
||||
# Pattern format: Date LogLevel LoggerName LogMessage
|
||||
log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
# Debugging Pattern format
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
|
||||
#
|
||||
# Daily Rolling File Appender
|
||||
#
|
||||
@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
#Security appender
|
||||
#
|
||||
hadoop.security.logger=INFO,console
|
||||
hadoop.security.log.maxfilesize=256MB
|
||||
hadoop.security.log.maxbackupindex=20
|
||||
log4j.category.SecurityLogger=${hadoop.security.logger}
|
||||
hadoop.security.log.file=SecurityAuth.audit
|
||||
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
|
||||
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
|
||||
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
|
||||
|
||||
#
|
||||
# Daily Rolling Security appender
|
||||
#
|
||||
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
|
||||
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
|
||||
|
||||
|
||||
#
|
||||
# hdfs audit logging
|
||||
#
|
||||
hdfs.audit.logger=INFO,console
|
||||
hdfs.audit.log.maxfilesize=256MB
|
||||
hdfs.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
|
||||
log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
|
||||
log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
|
||||
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
|
||||
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
|
||||
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
|
||||
|
||||
#
|
||||
# mapred audit logging
|
||||
#
|
||||
mapred.audit.logger=INFO,console
|
||||
mapred.audit.log.maxfilesize=256MB
|
||||
mapred.audit.log.maxbackupindex=20
|
||||
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
|
||||
log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
|
||||
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
|
||||
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
|
||||
|
||||
#
|
||||
# Rolling File Appender
|
||||
#
|
||||
|
||||
#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
|
||||
|
||||
# Logfile size and and 30-day backups
|
||||
#log4j.appender.RFA.MaxFileSize=1MB
|
||||
#log4j.appender.RFA.MaxBackupIndex=30
|
||||
|
||||
#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
|
||||
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
|
||||
|
||||
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
|
||||
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
|
||||
|
||||
# Custom Logging levels
|
||||
|
||||
@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
|
||||
# Job Summary Appender
|
||||
#
|
||||
# Use following logger to send summary to separate file defined by
|
||||
# hadoop.mapreduce.jobsummary.log.file rolled daily:
|
||||
# hadoop.mapreduce.jobsummary.log.file :
|
||||
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
|
||||
#
|
||||
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
|
||||
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
|
||||
log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
|
||||
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
|
||||
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
|
||||
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
|
||||
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
|
||||
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
|
||||
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
|
||||
log4j.appender.JSA.DatePattern=.yyyy-MM-dd
|
||||
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
|
||||
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
# Set the ResourceManager summary log level and appender
|
||||
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
|
||||
|
||||
# Appender for ResourceManager Application Summary Log - rolled daily
|
||||
# Appender for ResourceManager Application Summary Log
|
||||
# Requires the following properties to be set
|
||||
# - hadoop.log.dir (Hadoop Log directory)
|
||||
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
|
||||
@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
|
||||
|
||||
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
|
||||
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
|
||||
#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
|
||||
#log4j.appender.RMSUMMARY.MaxFileSize=256MB
|
||||
#log4j.appender.RMSUMMARY.MaxBackupIndex=20
|
||||
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd
|
||||
|
@ -86,8 +86,6 @@ Deprecated Properties
|
||||
*---+---+
|
||||
|dfs.socket.timeout | dfs.client.socket-timeout
|
||||
*---+---+
|
||||
|dfs.upgrade.permission | dfs.namenode.upgrade.permission
|
||||
*---+---+
|
||||
|dfs.write.packet.size | dfs.client-write-packet-size
|
||||
*---+---+
|
||||
|fs.checkpoint.dir | dfs.namenode.checkpoint.dir
|
||||
|
@ -25,11 +25,13 @@
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
|
||||
import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
|
||||
import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.test.MockitoUtil;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
@ -41,6 +43,8 @@ public class TestFailoverController {
|
||||
private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234);
|
||||
private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678);
|
||||
|
||||
private Configuration conf = new Configuration();
|
||||
|
||||
HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY)
|
||||
.setNotReadyToBecomeActive("injected not ready");
|
||||
|
||||
@ -51,13 +55,13 @@ public void testFailoverAndFailback() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
AlwaysSucceedFencer.fenceCalled = 0;
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
|
||||
assertEquals(HAServiceState.STANDBY, svc1.state);
|
||||
assertEquals(HAServiceState.ACTIVE, svc2.state);
|
||||
|
||||
AlwaysSucceedFencer.fenceCalled = 0;
|
||||
FailoverController.failover(svc2, svc1, false, false);
|
||||
doFailover(svc2, svc1, false, false);
|
||||
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
|
||||
assertEquals(HAServiceState.ACTIVE, svc1.state);
|
||||
assertEquals(HAServiceState.STANDBY, svc2.state);
|
||||
@ -69,7 +73,7 @@ public void testFailoverFromStandbyToStandby() throws Exception {
|
||||
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
assertEquals(HAServiceState.STANDBY, svc1.state);
|
||||
assertEquals(HAServiceState.ACTIVE, svc2.state);
|
||||
}
|
||||
@ -81,7 +85,7 @@ public void testFailoverFromActiveToActive() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Can't failover to an already active service");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -102,7 +106,7 @@ public void testFailoverWithoutPermission() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Can't failover when access is denied");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
assertTrue(ffe.getCause().getMessage().contains("Access denied"));
|
||||
@ -118,7 +122,7 @@ public void testFailoverToUnreadyService() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Can't failover to a service that's not ready");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -131,7 +135,7 @@ public void testFailoverToUnreadyService() throws Exception {
|
||||
assertEquals(HAServiceState.STANDBY, svc2.state);
|
||||
|
||||
// Forcing it means we ignore readyToBecomeActive
|
||||
FailoverController.failover(svc1, svc2, false, true);
|
||||
doFailover(svc1, svc2, false, true);
|
||||
assertEquals(HAServiceState.STANDBY, svc1.state);
|
||||
assertEquals(HAServiceState.ACTIVE, svc2.state);
|
||||
}
|
||||
@ -145,7 +149,7 @@ public void testFailoverToUnhealthyServiceFailsAndFailsback() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failover to unhealthy service");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -165,7 +169,7 @@ public void testFailoverFromFaultyServiceSucceeds() throws Exception {
|
||||
|
||||
AlwaysSucceedFencer.fenceCalled = 0;
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
} catch (FailoverFailedException ffe) {
|
||||
fail("Faulty active prevented failover");
|
||||
}
|
||||
@ -188,7 +192,7 @@ public void testFailoverFromFaultyServiceFencingFailure() throws Exception {
|
||||
|
||||
AlwaysFailFencer.fenceCalled = 0;
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failed over even though fencing failed");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -208,7 +212,7 @@ public void testFencingFailureDuringFailover() throws Exception {
|
||||
|
||||
AlwaysFailFencer.fenceCalled = 0;
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, true, false);
|
||||
doFailover(svc1, svc2, true, false);
|
||||
fail("Failed over even though fencing requested and failed");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -232,15 +236,25 @@ public void testFailoverFromNonExistantServiceWithFencer() throws Exception {
|
||||
.defaultAnswer(new ThrowsException(
|
||||
new IOException("Could not connect to host")))
|
||||
.extraInterfaces(Closeable.class));
|
||||
Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy();
|
||||
Mockito.doNothing().when((Closeable)errorThrowingProxy).close();
|
||||
|
||||
Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(
|
||||
Mockito.<Configuration>any(),
|
||||
Mockito.anyInt());
|
||||
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
} catch (FailoverFailedException ffe) {
|
||||
fail("Non-existant active prevented failover");
|
||||
}
|
||||
// Verify that the proxy created to try to make it go to standby
|
||||
// gracefully used the right rpc timeout
|
||||
Mockito.verify(svc1).getProxy(
|
||||
Mockito.<Configuration>any(),
|
||||
Mockito.eq(
|
||||
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
|
||||
|
||||
// Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
|
||||
assertEquals(HAServiceState.ACTIVE, svc2.state);
|
||||
@ -256,7 +270,7 @@ public void testFailoverToNonExistantServiceFails() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failed over to a non-existant standby");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -274,7 +288,7 @@ public void testFailoverToFaultyServiceFailsbackOK() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failover to already active service");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -296,7 +310,7 @@ public void testWeDontFailbackIfActiveWasFenced() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, true, false);
|
||||
doFailover(svc1, svc2, true, false);
|
||||
fail("Failed over to service that won't transition to active");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -318,7 +332,7 @@ public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception {
|
||||
AlwaysSucceedFencer.fenceCalled = 0;
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failed over to service that won't transition to active");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -342,7 +356,7 @@ public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception {
|
||||
AlwaysFailFencer.fenceCalled = 0;
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failed over to service that won't transition to active");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -368,7 +382,7 @@ public void testFailbackToFaultyServiceFails() throws Exception {
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
|
||||
try {
|
||||
FailoverController.failover(svc1, svc2, false, false);
|
||||
doFailover(svc1, svc2, false, false);
|
||||
fail("Failover to already active service");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
@ -377,4 +391,37 @@ public void testFailbackToFaultyServiceFails() throws Exception {
|
||||
assertEquals(HAServiceState.STANDBY, svc1.state);
|
||||
assertEquals(HAServiceState.STANDBY, svc2.state);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSelfFailoverFails() throws Exception {
|
||||
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
|
||||
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
|
||||
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
|
||||
AlwaysSucceedFencer.fenceCalled = 0;
|
||||
|
||||
try {
|
||||
doFailover(svc1, svc1, false, false);
|
||||
fail("Can't failover to yourself");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
}
|
||||
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
|
||||
assertEquals(HAServiceState.ACTIVE, svc1.state);
|
||||
|
||||
try {
|
||||
doFailover(svc2, svc2, false, false);
|
||||
fail("Can't failover to yourself");
|
||||
} catch (FailoverFailedException ffe) {
|
||||
// Expected
|
||||
}
|
||||
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
|
||||
assertEquals(HAServiceState.STANDBY, svc2.state);
|
||||
}
|
||||
|
||||
private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
|
||||
boolean forceFence, boolean forceActive) throws FailoverFailedException {
|
||||
FailoverController fc = new FailoverController(conf);
|
||||
fc.failover(tgt1, tgt2, forceFence, forceActive);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -499,6 +499,18 @@ public void testCanonicalUriWithNoPortNoDefaultPort() {
|
||||
assertEquals("scheme://host.a.b/path", uri.toString());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetHostNameOfIP() {
|
||||
assertNull(NetUtils.getHostNameOfIP(null));
|
||||
assertNull(NetUtils.getHostNameOfIP(""));
|
||||
assertNull(NetUtils.getHostNameOfIP("crazytown"));
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
|
||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
|
||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
|
||||
}
|
||||
|
||||
private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
|
||||
String expectStr = StringUtils.join(expect, ", ");
|
||||
String gotStr = StringUtils.join(got, ", ");
|
||||
|
@ -55,8 +55,8 @@ if [ "${1}" = "stop" ]; then
|
||||
fi
|
||||
|
||||
if [ "${HTTPFS_SILENT}" != "true" ]; then
|
||||
${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
|
||||
exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
|
||||
else
|
||||
${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
|
||||
exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
|
||||
fi
|
||||
|
||||
|
@ -117,6 +117,12 @@ Release 2.0.0 - UNRELEASED
|
||||
|
||||
HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli)
|
||||
|
||||
HDFS-3137. Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16. (eli)
|
||||
|
||||
HDFS-3138. Move DatanodeInfo#ipcPort to DatanodeID. (eli)
|
||||
|
||||
HDFS-3164. Move DatanodeInfo#hostName to DatanodeID. (eli)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
|
||||
@ -171,6 +177,8 @@ Release 2.0.0 - UNRELEASED
|
||||
DistributedFileSystem to @InterfaceAudience.LimitedPrivate.
|
||||
(harsh via szetszwo)
|
||||
|
||||
HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-2018. Move all journal stream management code into one place.
|
||||
@ -279,6 +287,15 @@ Release 2.0.0 - UNRELEASED
|
||||
|
||||
HDFS-3155. Clean up FSDataset implemenation related code. (szetszwo)
|
||||
|
||||
HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
|
||||
space and capacity per DN. (atm)
|
||||
|
||||
HDFS-3172. dfs.upgrade.permission is dead code. (eli)
|
||||
|
||||
HDFS-3171. The DatanodeID "name" field is overloaded. (eli)
|
||||
|
||||
HDFS-3144. Refactor DatanodeID#getName by use. (eli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
@ -366,6 +383,15 @@ Release 2.0.0 - UNRELEASED
|
||||
|
||||
HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
|
||||
|
||||
HDFS-3142. TestHDFSCLI.testAll is failing. (Brandon Li via atm)
|
||||
|
||||
HDFS-3070. HDFS balancer doesn't ensure that hdfs-site.xml is loaded. (atm)
|
||||
|
||||
HDFS-2995. start-dfs.sh should only start the 2NN for namenodes
|
||||
with dfs.namenode.secondary.http-address configured. (eli)
|
||||
|
||||
HDFS-3174. Fix assert in TestPendingDataNodeMessages. (eli)
|
||||
|
||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||
|
||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||
@ -713,6 +739,9 @@ Release 0.23.2 - UNRELEASED
|
||||
|
||||
HDFS-3104. Add tests for HADOOP-8175. (Daryn Sharp via szetszwo)
|
||||
|
||||
HDFS-3066. Cap space usage of default log4j rolling policy.
|
||||
(Patrick Hunt via eli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
@ -764,6 +793,9 @@ Release 0.23.2 - UNRELEASED
|
||||
|
||||
HDFS-3101. Cannot read empty file using WebHDFS. (szetszwo)
|
||||
|
||||
HDFS-3160. httpfs should exec catalina instead of forking it.
|
||||
(Roman Shaposhnik via eli)
|
||||
|
||||
Release 0.23.1 - 2012-02-17
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -120,7 +120,7 @@ export CLASSPATH=$CLASSPATH
|
||||
|
||||
#turn security logger on the namenode
|
||||
if [ $COMMAND = "namenode" ]; then
|
||||
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
|
||||
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}"
|
||||
else
|
||||
HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
|
||||
fi
|
||||
|
@ -76,11 +76,13 @@ fi
|
||||
|
||||
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
|
||||
|
||||
echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
|
||||
if [ -n "$SECONDARY_NAMENODES" ]; then
|
||||
echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
|
||||
|
||||
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
|
||||
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
|
||||
--config "$HADOOP_CONF_DIR" \
|
||||
--hostnames "$SECONDARY_NAMENODES" \
|
||||
--script "$bin/hdfs" start secondarynamenode
|
||||
fi
|
||||
|
||||
# eof
|
||||
|
@ -52,11 +52,13 @@ fi
|
||||
|
||||
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
|
||||
|
||||
echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
|
||||
if [ -n "$SECONDARY_NAMENODES" ]; then
|
||||
echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
|
||||
|
||||
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
|
||||
"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
|
||||
--config "$HADOOP_CONF_DIR" \
|
||||
--hostnames "$SECONDARY_NAMENODES" \
|
||||
--script "$bin/hdfs" stop secondarynamenode
|
||||
fi
|
||||
|
||||
# eof
|
||||
|
@ -239,11 +239,6 @@ to the web server.</p>
|
||||
<br />The name of the group of super-users.
|
||||
</li>
|
||||
|
||||
<li><code>dfs.namenode.upgrade.permission = 0777</code>
|
||||
<br />The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files.
|
||||
For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
|
||||
</li>
|
||||
|
||||
<li><code>fs.permissions.umask-mode = 022</code>
|
||||
<br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal
|
||||
value <em>18<sub>10</sub></em> may be used.
|
||||
|
@ -240,7 +240,7 @@ private static synchronized LocalDatanodeInfo getLocalDatanodeInfo(int port) {
|
||||
private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
|
||||
DatanodeInfo node, Configuration conf, int timeout,
|
||||
Token<BlockTokenIdentifier> token) throws IOException {
|
||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
|
||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
|
||||
BlockLocalPathInfo pathinfo = null;
|
||||
ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
|
||||
conf, timeout);
|
||||
|
@ -1340,7 +1340,8 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
//connect to a datanode
|
||||
sock = socketFactory.createSocket();
|
||||
NetUtils.connect(sock,
|
||||
NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
|
||||
NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
|
||||
timeout);
|
||||
sock.setSoTimeout(timeout);
|
||||
|
||||
out = new DataOutputStream(
|
||||
@ -1349,7 +1350,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
in = new DataInputStream(NetUtils.getInputStream(sock));
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("write to " + datanodes[j].getName() + ": "
|
||||
LOG.debug("write to " + datanodes[j] + ": "
|
||||
+ Op.BLOCK_CHECKSUM + ", block=" + block);
|
||||
}
|
||||
// get block MD5
|
||||
@ -1364,7 +1365,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
|
||||
+ "for file " + src + " for block " + block
|
||||
+ " from datanode " + datanodes[j].getName()
|
||||
+ " from datanode " + datanodes[j]
|
||||
+ ". Will retry the block once.");
|
||||
}
|
||||
lastRetriedIndex = i;
|
||||
@ -1374,7 +1375,7 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
break;
|
||||
} else {
|
||||
throw new IOException("Bad response " + reply + " for block "
|
||||
+ block + " from datanode " + datanodes[j].getName());
|
||||
+ block + " from datanode " + datanodes[j]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1409,12 +1410,10 @@ else if (bpc != bytesPerCRC) {
|
||||
LOG.debug("set bytesPerCRC=" + bytesPerCRC
|
||||
+ ", crcPerBlock=" + crcPerBlock);
|
||||
}
|
||||
LOG.debug("got reply from " + datanodes[j].getName()
|
||||
+ ": md5=" + md5);
|
||||
LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
|
||||
}
|
||||
} catch (IOException ie) {
|
||||
LOG.warn("src=" + src + ", datanodes[" + j + "].getName()="
|
||||
+ datanodes[j].getName(), ie);
|
||||
LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
|
||||
} finally {
|
||||
IOUtils.closeStream(in);
|
||||
IOUtils.closeStream(out);
|
||||
|
@ -107,8 +107,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
|
||||
public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
|
||||
public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
|
||||
public static final String DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
|
||||
public static final int DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
|
||||
public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
|
||||
public static final int DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
|
||||
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
|
||||
|
@ -543,7 +543,7 @@ private synchronized int readBuffer(ReaderStrategy reader, int off, int len,
|
||||
return reader.doRead(blockReader, off, len);
|
||||
} catch ( ChecksumException ce ) {
|
||||
DFSClient.LOG.warn("Found Checksum error for "
|
||||
+ getCurrentBlock() + " from " + currentNode.getName()
|
||||
+ getCurrentBlock() + " from " + currentNode
|
||||
+ " at " + ce.getPos());
|
||||
ioe = ce;
|
||||
retryCurrentNode = false;
|
||||
@ -671,7 +671,7 @@ private DNAddrPair chooseDataNode(LocatedBlock block)
|
||||
try {
|
||||
DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
|
||||
InetSocketAddress targetAddr =
|
||||
NetUtils.createSocketAddr(chosenNode.getName());
|
||||
NetUtils.createSocketAddr(chosenNode.getXferAddr());
|
||||
return new DNAddrPair(chosenNode, targetAddr);
|
||||
} catch (IOException ie) {
|
||||
String blockInfo = block.getBlock() + " file=" + src;
|
||||
@ -746,7 +746,7 @@ private void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
||||
} catch (ChecksumException e) {
|
||||
DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
|
||||
src + " at " + block.getBlock() + ":" +
|
||||
e.getPos() + " from " + chosenNode.getName());
|
||||
e.getPos() + " from " + chosenNode);
|
||||
// we want to remember what we have tried
|
||||
addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
|
||||
} catch (AccessControlException ex) {
|
||||
|
@ -667,7 +667,7 @@ public void run() {
|
||||
throw new IOException("Bad response " + reply +
|
||||
" for block " + block +
|
||||
" from datanode " +
|
||||
targets[i].getName());
|
||||
targets[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -898,7 +898,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
|
||||
if (errorIndex >= 0) {
|
||||
StringBuilder pipelineMsg = new StringBuilder();
|
||||
for (int j = 0; j < nodes.length; j++) {
|
||||
pipelineMsg.append(nodes[j].getName());
|
||||
pipelineMsg.append(nodes[j]);
|
||||
if (j < nodes.length - 1) {
|
||||
pipelineMsg.append(", ");
|
||||
}
|
||||
@ -911,7 +911,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException {
|
||||
}
|
||||
DFSClient.LOG.warn("Error Recovery for block " + block +
|
||||
" in pipeline " + pipelineMsg +
|
||||
": bad datanode " + nodes[errorIndex].getName());
|
||||
": bad datanode " + nodes[errorIndex]);
|
||||
failed.add(nodes[errorIndex]);
|
||||
|
||||
DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
|
||||
@ -1005,7 +1005,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
|
||||
String firstBadLink = "";
|
||||
if (DFSClient.LOG.isDebugEnabled()) {
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
DFSClient.LOG.debug("pipeline = " + nodes[i].getName());
|
||||
DFSClient.LOG.debug("pipeline = " + nodes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1061,7 +1061,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes, long newGS,
|
||||
// find the datanode that matches
|
||||
if (firstBadLink.length() != 0) {
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
if (nodes[i].getName().equals(firstBadLink)) {
|
||||
if (nodes[i].getXferAddr().equals(firstBadLink)) {
|
||||
errorIndex = i;
|
||||
break;
|
||||
}
|
||||
@ -1165,9 +1165,10 @@ private void setLastException(IOException e) {
|
||||
static Socket createSocketForPipeline(final DatanodeInfo first,
|
||||
final int length, final DFSClient client) throws IOException {
|
||||
if(DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug("Connecting to datanode " + first.getName());
|
||||
DFSClient.LOG.debug("Connecting to datanode " + first);
|
||||
}
|
||||
final InetSocketAddress isa = NetUtils.createSocketAddr(first.getName());
|
||||
final InetSocketAddress isa =
|
||||
NetUtils.createSocketAddr(first.getXferAddr());
|
||||
final Socket sock = client.socketFactory.createSocket();
|
||||
final int timeout = client.getDatanodeReadTimeout(length);
|
||||
NetUtils.connect(sock, isa, timeout);
|
||||
|
@ -295,16 +295,16 @@ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
|
||||
assert idx < nrBlocks : "Incorrect index";
|
||||
DatanodeInfo[] locations = blk.getLocations();
|
||||
String[] hosts = new String[locations.length];
|
||||
String[] names = new String[locations.length];
|
||||
String[] xferAddrs = new String[locations.length];
|
||||
String[] racks = new String[locations.length];
|
||||
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
|
||||
hosts[hCnt] = locations[hCnt].getHostName();
|
||||
names[hCnt] = locations[hCnt].getName();
|
||||
NodeBase node = new NodeBase(names[hCnt],
|
||||
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
|
||||
NodeBase node = new NodeBase(xferAddrs[hCnt],
|
||||
locations[hCnt].getNetworkLocation());
|
||||
racks[hCnt] = node.toString();
|
||||
}
|
||||
blkLocations[idx] = new BlockLocation(names, hosts, racks,
|
||||
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks,
|
||||
blk.getStartOffset(),
|
||||
blk.getBlockSize(),
|
||||
blk.isCorrupt());
|
||||
|
@ -688,7 +688,7 @@ public boolean reportChecksumFailure(Path f,
|
||||
lblocks[0] = new LocatedBlock(dataBlock, dataNode);
|
||||
LOG.info("Found checksum error in data stream at block="
|
||||
+ dataBlock + " on datanode="
|
||||
+ dataNode[0].getName());
|
||||
+ dataNode[0]);
|
||||
|
||||
// Find block in checksum stream
|
||||
DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
|
||||
@ -700,8 +700,7 @@ public boolean reportChecksumFailure(Path f,
|
||||
DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
|
||||
lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
|
||||
LOG.info("Found checksum error in checksum stream at block="
|
||||
+ sumsBlock + " on datanode="
|
||||
+ sumsNode[0].getName());
|
||||
+ sumsBlock + " on datanode=" + sumsNode[0]);
|
||||
|
||||
// Ask client to delete blocks.
|
||||
dfs.reportChecksumFailure(f.toString(), lblocks);
|
||||
|
@ -86,7 +86,6 @@ private static void addDeprecatedKeys() {
|
||||
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
|
||||
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
|
||||
deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
|
||||
deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
|
||||
deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
|
||||
deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
|
||||
deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
|
||||
|
@ -24,7 +24,7 @@
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.WritableComparable;
|
||||
|
||||
/**
|
||||
@ -32,22 +32,32 @@
|
||||
* Datanodes are identified by how they can be contacted (hostname
|
||||
* and ports) and their storage ID, a unique number that associates
|
||||
* the Datanodes blocks with a particular Datanode.
|
||||
*
|
||||
* {@link DatanodeInfo#getName()} should be used to get the network
|
||||
* location (for topology) of a datanode, instead of using
|
||||
* {@link DatanodeID#getXferAddr()} here. Helpers are defined below
|
||||
* for each context in which a DatanodeID is used.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class DatanodeID implements WritableComparable<DatanodeID> {
|
||||
public static final DatanodeID[] EMPTY_ARRAY = {};
|
||||
|
||||
public String name; // hostname:port (data transfer port)
|
||||
public String storageID; // unique per cluster storageID
|
||||
protected String ipAddr; // IP address
|
||||
protected String hostName; // hostname
|
||||
protected String storageID; // unique per cluster storageID
|
||||
protected int xferPort; // data streaming port
|
||||
protected int infoPort; // info server port
|
||||
public int ipcPort; // ipc server port
|
||||
protected int ipcPort; // IPC server port
|
||||
|
||||
/** Equivalent to DatanodeID(""). */
|
||||
public DatanodeID() {this("");}
|
||||
|
||||
/** Equivalent to DatanodeID(nodeName, "", -1, -1). */
|
||||
public DatanodeID(String nodeName) {this(nodeName, "", -1, -1);}
|
||||
/** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */
|
||||
public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);}
|
||||
|
||||
/** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */
|
||||
public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);}
|
||||
|
||||
/**
|
||||
* DatanodeID copy constructor
|
||||
@ -55,29 +65,43 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
|
||||
* @param from
|
||||
*/
|
||||
public DatanodeID(DatanodeID from) {
|
||||
this(from.getName(),
|
||||
this(from.getIpAddr(),
|
||||
from.getHostName(),
|
||||
from.getStorageID(),
|
||||
from.getXferPort(),
|
||||
from.getInfoPort(),
|
||||
from.getIpcPort());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create DatanodeID
|
||||
* @param nodeName (hostname:portNumber)
|
||||
* @param ipAddr IP
|
||||
* @param hostName hostname
|
||||
* @param storageID data storage ID
|
||||
* @param xferPort data transfer port
|
||||
* @param infoPort info server port
|
||||
* @param ipcPort ipc server port
|
||||
*/
|
||||
public DatanodeID(String nodeName, String storageID,
|
||||
int infoPort, int ipcPort) {
|
||||
this.name = nodeName;
|
||||
public DatanodeID(String ipAddr, String hostName, String storageID,
|
||||
int xferPort, int infoPort, int ipcPort) {
|
||||
this.ipAddr = ipAddr;
|
||||
this.hostName = hostName;
|
||||
this.storageID = storageID;
|
||||
this.xferPort = xferPort;
|
||||
this.infoPort = infoPort;
|
||||
this.ipcPort = ipcPort;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
public void setIpAddr(String ipAddr) {
|
||||
this.ipAddr = ipAddr;
|
||||
}
|
||||
|
||||
public void setHostName(String hostName) {
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public void setXferPort(int xferPort) {
|
||||
this.xferPort = xferPort;
|
||||
}
|
||||
|
||||
public void setInfoPort(int infoPort) {
|
||||
@ -88,18 +112,64 @@ public void setIpcPort(int ipcPort) {
|
||||
this.ipcPort = ipcPort;
|
||||
}
|
||||
|
||||
public void setStorageID(String storageID) {
|
||||
this.storageID = storageID;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return hostname:portNumber.
|
||||
* @return ipAddr;
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
public String getIpAddr() {
|
||||
return ipAddr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return hostname
|
||||
*/
|
||||
public String getHostName() {
|
||||
return hostName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return IP:xferPort string
|
||||
*/
|
||||
public String getXferAddr() {
|
||||
return ipAddr + ":" + xferPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return IP:ipcPort string
|
||||
*/
|
||||
public String getIpcAddr() {
|
||||
return ipAddr + ":" + ipcPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return IP:infoPort string
|
||||
*/
|
||||
public String getInfoAddr() {
|
||||
return ipAddr + ":" + infoPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return hostname:xferPort
|
||||
*/
|
||||
public String getXferAddrWithHostname() {
|
||||
return hostName + ":" + xferPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return data storage ID.
|
||||
*/
|
||||
public String getStorageID() {
|
||||
return this.storageID;
|
||||
return storageID;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return xferPort (the port for data streaming)
|
||||
*/
|
||||
public int getXferPort() {
|
||||
return xferPort;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -116,33 +186,6 @@ public int getIpcPort() {
|
||||
return ipcPort;
|
||||
}
|
||||
|
||||
/**
|
||||
* sets the data storage ID.
|
||||
*/
|
||||
public void setStorageID(String storageID) {
|
||||
this.storageID = storageID;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return hostname and no :portNumber.
|
||||
*/
|
||||
public String getHost() {
|
||||
int colon = name.indexOf(":");
|
||||
if (colon < 0) {
|
||||
return name;
|
||||
} else {
|
||||
return name.substring(0, colon);
|
||||
}
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
int colon = name.indexOf(":");
|
||||
if (colon < 0) {
|
||||
return 50010; // default port.
|
||||
}
|
||||
return Integer.parseInt(name.substring(colon+1));
|
||||
}
|
||||
|
||||
public boolean equals(Object to) {
|
||||
if (this == to) {
|
||||
return true;
|
||||
@ -150,16 +193,16 @@ public boolean equals(Object to) {
|
||||
if (!(to instanceof DatanodeID)) {
|
||||
return false;
|
||||
}
|
||||
return (name.equals(((DatanodeID)to).getName()) &&
|
||||
return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
|
||||
storageID.equals(((DatanodeID)to).getStorageID()));
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return name.hashCode()^ storageID.hashCode();
|
||||
return getXferAddr().hashCode()^ storageID.hashCode();
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return name;
|
||||
return getXferAddr();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -167,39 +210,44 @@ public String toString() {
|
||||
* Note that this does not update storageID.
|
||||
*/
|
||||
public void updateRegInfo(DatanodeID nodeReg) {
|
||||
name = nodeReg.getName();
|
||||
ipAddr = nodeReg.getIpAddr();
|
||||
hostName = nodeReg.getHostName();
|
||||
xferPort = nodeReg.getXferPort();
|
||||
infoPort = nodeReg.getInfoPort();
|
||||
ipcPort = nodeReg.getIpcPort();
|
||||
// update any more fields added in future.
|
||||
}
|
||||
|
||||
/** Comparable.
|
||||
* Basis of compare is the String name (host:portNumber) only.
|
||||
/**
|
||||
* Compare based on data transfer address.
|
||||
*
|
||||
* @param that
|
||||
* @return as specified by Comparable.
|
||||
* @return as specified by Comparable
|
||||
*/
|
||||
public int compareTo(DatanodeID that) {
|
||||
return name.compareTo(that.getName());
|
||||
return getXferAddr().compareTo(that.getXferAddr());
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
DeprecatedUTF8.writeString(out, name);
|
||||
DeprecatedUTF8.writeString(out, storageID);
|
||||
Text.writeString(out, ipAddr);
|
||||
Text.writeString(out, hostName);
|
||||
Text.writeString(out, storageID);
|
||||
out.writeShort(xferPort);
|
||||
out.writeShort(infoPort);
|
||||
out.writeShort(ipcPort);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
name = DeprecatedUTF8.readString(in);
|
||||
storageID = DeprecatedUTF8.readString(in);
|
||||
// the infoPort read could be negative, if the port is a large number (more
|
||||
ipAddr = Text.readString(in);
|
||||
hostName = Text.readString(in);
|
||||
storageID = Text.readString(in);
|
||||
// The port read could be negative, if the port is a large number (more
|
||||
// than 15 bits in storage size (but less than 16 bits).
|
||||
// So chop off the first two bytes (and hence the signed bits) before
|
||||
// setting the field.
|
||||
this.infoPort = in.readShort() & 0x0000ffff;
|
||||
xferPort = in.readShort() & 0x0000ffff;
|
||||
infoPort = in.readShort() & 0x0000ffff;
|
||||
ipcPort = in.readShort() & 0x0000ffff;
|
||||
}
|
||||
}
|
||||
|
@ -52,9 +52,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
||||
protected int xceiverCount;
|
||||
protected String location = NetworkTopology.DEFAULT_RACK;
|
||||
|
||||
// The FQDN of the IP associated with the Datanode's hostname
|
||||
protected String hostName = null;
|
||||
|
||||
// Datanode administrative states
|
||||
public enum AdminStates {
|
||||
NORMAL("In Service"),
|
||||
@ -110,30 +107,27 @@ public DatanodeInfo(DatanodeID nodeID) {
|
||||
this.adminState = null;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
|
||||
public DatanodeInfo(DatanodeID nodeID, String location) {
|
||||
this(nodeID);
|
||||
this.location = location;
|
||||
this.hostName = hostName;
|
||||
}
|
||||
|
||||
public DatanodeInfo(DatanodeID nodeID, String location, String hostName,
|
||||
public DatanodeInfo(DatanodeID nodeID, String location,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||
final AdminStates adminState) {
|
||||
this(nodeID.getName(), nodeID.getStorageID(), nodeID.getInfoPort(), nodeID
|
||||
.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate,
|
||||
xceiverCount, location, hostName, adminState);
|
||||
this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(),
|
||||
nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining,
|
||||
blockPoolUsed, lastUpdate, xceiverCount, location, adminState);
|
||||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String name, final String storageID,
|
||||
final int infoPort, final int ipcPort,
|
||||
public DatanodeInfo(final String name, final String hostName,
|
||||
final String storageID, final int xferPort, final int infoPort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||
final String networkLocation, final String hostName,
|
||||
final AdminStates adminState) {
|
||||
super(name, storageID, infoPort, ipcPort);
|
||||
|
||||
final String networkLocation, final AdminStates adminState) {
|
||||
super(name, hostName, storageID, xferPort, infoPort, ipcPort);
|
||||
this.capacity = capacity;
|
||||
this.dfsUsed = dfsUsed;
|
||||
this.remaining = remaining;
|
||||
@ -141,10 +135,14 @@ public DatanodeInfo(final String name, final String storageID,
|
||||
this.lastUpdate = lastUpdate;
|
||||
this.xceiverCount = xceiverCount;
|
||||
this.location = networkLocation;
|
||||
this.hostName = hostName;
|
||||
this.adminState = adminState;
|
||||
}
|
||||
|
||||
/** Network location name */
|
||||
public String getName() {
|
||||
return getXferAddr();
|
||||
}
|
||||
|
||||
/** The raw capacity. */
|
||||
public long getCapacity() { return capacity; }
|
||||
|
||||
@ -222,14 +220,6 @@ public synchronized void setNetworkLocation(String location) {
|
||||
this.location = NodeBase.normalize(location);
|
||||
}
|
||||
|
||||
public String getHostName() {
|
||||
return (hostName == null || hostName.length()==0) ? getHost() : hostName;
|
||||
}
|
||||
|
||||
public void setHostName(String host) {
|
||||
hostName = host;
|
||||
}
|
||||
|
||||
/** A formatted string for reporting the status of the DataNode. */
|
||||
public String getDatanodeReport() {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
@ -239,9 +229,9 @@ public String getDatanodeReport() {
|
||||
long nonDFSUsed = getNonDfsUsed();
|
||||
float usedPercent = getDfsUsedPercent();
|
||||
float remainingPercent = getRemainingPercent();
|
||||
String lookupName = NetUtils.getHostNameOfIP(name);
|
||||
String lookupName = NetUtils.getHostNameOfIP(getName());
|
||||
|
||||
buffer.append("Name: "+ name);
|
||||
buffer.append("Name: "+ getName());
|
||||
if (lookupName != null) {
|
||||
buffer.append(" (" + lookupName + ")");
|
||||
}
|
||||
@ -275,7 +265,7 @@ public String dumpDatanode() {
|
||||
long c = getCapacity();
|
||||
long r = getRemaining();
|
||||
long u = getDfsUsed();
|
||||
buffer.append(name);
|
||||
buffer.append(ipAddr);
|
||||
if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
|
||||
buffer.append(" "+location);
|
||||
}
|
||||
@ -380,10 +370,6 @@ protected void setAdminState(AdminStates newState) {
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
super.write(out);
|
||||
|
||||
//TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
|
||||
out.writeShort(ipcPort);
|
||||
|
||||
out.writeLong(capacity);
|
||||
out.writeLong(dfsUsed);
|
||||
out.writeLong(remaining);
|
||||
@ -391,17 +377,12 @@ public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(lastUpdate);
|
||||
out.writeInt(xceiverCount);
|
||||
Text.writeString(out, location);
|
||||
Text.writeString(out, hostName == null? "": hostName);
|
||||
WritableUtils.writeEnum(out, getAdminState());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
super.readFields(in);
|
||||
|
||||
//TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
|
||||
this.ipcPort = in.readShort() & 0x0000ffff;
|
||||
|
||||
this.capacity = in.readLong();
|
||||
this.dfsUsed = in.readLong();
|
||||
this.remaining = in.readLong();
|
||||
@ -409,7 +390,6 @@ public void readFields(DataInput in) throws IOException {
|
||||
this.lastUpdate = in.readLong();
|
||||
this.xceiverCount = in.readInt();
|
||||
this.location = Text.readString(in);
|
||||
this.hostName = Text.readString(in);
|
||||
setAdminState(WritableUtils.readEnum(in, AdminStates.class));
|
||||
}
|
||||
|
||||
|
@ -84,8 +84,10 @@ public static ExtendedBlock fromProto(HdfsProtos.ExtendedBlockProto proto) {
|
||||
private static HdfsProtos.DatanodeIDProto toProto(
|
||||
DatanodeID dni) {
|
||||
return HdfsProtos.DatanodeIDProto.newBuilder()
|
||||
.setName(dni.getName())
|
||||
.setIpAddr(dni.getIpAddr())
|
||||
.setHostName(dni.getHostName())
|
||||
.setStorageID(dni.getStorageID())
|
||||
.setXferPort(dni.getXferPort())
|
||||
.setInfoPort(dni.getInfoPort())
|
||||
.setIpcPort(dni.getIpcPort())
|
||||
.build();
|
||||
@ -93,8 +95,10 @@ private static HdfsProtos.DatanodeIDProto toProto(
|
||||
|
||||
private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
|
||||
return new DatanodeID(
|
||||
idProto.getName(),
|
||||
idProto.getIpAddr(),
|
||||
idProto.getHostName(),
|
||||
idProto.getStorageID(),
|
||||
idProto.getXferPort(),
|
||||
idProto.getInfoPort(),
|
||||
idProto.getIpcPort());
|
||||
}
|
||||
@ -111,7 +115,6 @@ public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) {
|
||||
.setLastUpdate(dni.getLastUpdate())
|
||||
.setXceiverCount(dni.getXceiverCount())
|
||||
.setLocation(dni.getNetworkLocation())
|
||||
.setHostName(dni.getHostName())
|
||||
.setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf(
|
||||
dni.getAdminState().name()))
|
||||
.build();
|
||||
@ -119,7 +122,7 @@ public static HdfsProtos.DatanodeInfoProto toProto(DatanodeInfo dni) {
|
||||
|
||||
public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) {
|
||||
DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()),
|
||||
dniProto.getLocation(), dniProto.getHostName());
|
||||
dniProto.getLocation());
|
||||
|
||||
dniObj.setCapacity(dniProto.getCapacity());
|
||||
dniObj.setDfsUsed(dniProto.getDfsUsed());
|
||||
|
@ -45,9 +45,8 @@ public UnregisteredNodeException(NodeRegistration nodeReg) {
|
||||
* @param storedNode data-node stored in the system with this storage id
|
||||
*/
|
||||
public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) {
|
||||
super("Data node " + nodeID.getName()
|
||||
+ " is attempting to report storage ID "
|
||||
super("Data node " + nodeID + " is attempting to report storage ID "
|
||||
+ nodeID.getStorageID() + ". Node "
|
||||
+ storedNode.getName() + " is expected to serve this storage.");
|
||||
+ storedNode + " is expected to serve this storage.");
|
||||
}
|
||||
}
|
||||
|
@ -97,8 +97,7 @@ public ClientDatanodeProtocolTranslatorPB(InetSocketAddress addr,
|
||||
*/
|
||||
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
|
||||
Configuration conf, int socketTimeout) throws IOException {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost()
|
||||
+ ":" + datanodeid.getIpcPort());
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
|
||||
rpcProxy = createClientDatanodeProtocolProxy(addr,
|
||||
UserGroupInformation.getCurrentUser(), conf,
|
||||
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
|
||||
@ -107,8 +106,7 @@ public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
|
||||
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
|
||||
DatanodeID datanodeid, Configuration conf, int socketTimeout,
|
||||
LocatedBlock locatedBlock) throws IOException {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(
|
||||
datanodeid.getHost() + ":" + datanodeid.getIpcPort());
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("ClientDatanodeProtocol addr=" + addr);
|
||||
}
|
||||
|
@ -204,14 +204,18 @@ public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
|
||||
|
||||
// DatanodeId
|
||||
public static DatanodeID convert(DatanodeIDProto dn) {
|
||||
return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
|
||||
dn.getIpcPort());
|
||||
return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
|
||||
dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort());
|
||||
}
|
||||
|
||||
public static DatanodeIDProto convert(DatanodeID dn) {
|
||||
return DatanodeIDProto.newBuilder().setName(dn.getName())
|
||||
.setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
|
||||
.setStorageID(dn.getStorageID()).build();
|
||||
return DatanodeIDProto.newBuilder()
|
||||
.setIpAddr(dn.getIpAddr())
|
||||
.setHostName(dn.getHostName())
|
||||
.setStorageID(dn.getStorageID())
|
||||
.setXferPort(dn.getXferPort())
|
||||
.setInfoPort(dn.getInfoPort())
|
||||
.setIpcPort(dn.getIpcPort()).build();
|
||||
}
|
||||
|
||||
// Arrays of DatanodeId
|
||||
@ -442,7 +446,6 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
|
||||
return new DatanodeInfo(
|
||||
PBHelper.convert(di.getId()),
|
||||
di.hasLocation() ? di.getLocation() : null ,
|
||||
di.hasHostName() ? di.getHostName() : null,
|
||||
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
|
||||
di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() ,
|
||||
PBHelper.convert(di.getAdminState()));
|
||||
@ -451,9 +454,6 @@ static public DatanodeInfo convert(DatanodeInfoProto di) {
|
||||
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
|
||||
if (di == null) return null;
|
||||
DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
|
||||
if (di.getHostName() != null) {
|
||||
builder.setHostName(di.getHostName());
|
||||
}
|
||||
if (di.getNetworkLocation() != null) {
|
||||
builder.setLocation(di.getNetworkLocation());
|
||||
}
|
||||
@ -503,7 +503,6 @@ public static DatanodeInfoProto convert(DatanodeInfo info) {
|
||||
builder.setAdminState(PBHelper.convert(info.getAdminState()));
|
||||
builder.setCapacity(info.getCapacity())
|
||||
.setDfsUsed(info.getDfsUsed())
|
||||
.setHostName(info.getHostName())
|
||||
.setId(PBHelper.convert((DatanodeID)info))
|
||||
.setLastUpdate(info.getLastUpdate())
|
||||
.setLocation(info.getNetworkLocation())
|
||||
@ -610,8 +609,8 @@ public static DatanodeRegistrationProto convert(
|
||||
DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
|
||||
.newBuilder();
|
||||
return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
|
||||
.setStorageInfo(PBHelper.convert(registration.storageInfo))
|
||||
.setKeys(PBHelper.convert(registration.exportedKeys)).build();
|
||||
.setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
|
||||
.setKeys(PBHelper.convert(registration.getExportedKeys())).build();
|
||||
}
|
||||
|
||||
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
|
||||
|
@ -51,6 +51,7 @@
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
@ -304,8 +305,9 @@ private void dispatch() {
|
||||
DataOutputStream out = null;
|
||||
DataInputStream in = null;
|
||||
try {
|
||||
sock.connect(NetUtils.createSocketAddr(
|
||||
target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
|
||||
sock.connect(
|
||||
NetUtils.createSocketAddr(target.datanode.getXferAddr()),
|
||||
HdfsServerConstants.READ_TIMEOUT);
|
||||
sock.setKeepAlive(true);
|
||||
out = new DataOutputStream( new BufferedOutputStream(
|
||||
sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
@ -586,7 +588,7 @@ private Source(DatanodeInfo node, BalancingPolicy policy, double threshold) {
|
||||
/** Add a node task */
|
||||
private void addNodeTask(NodeTask task) {
|
||||
assert (task.datanode != this) :
|
||||
"Source and target are the same " + datanode.getName();
|
||||
"Source and target are the same " + datanode;
|
||||
incScheduledSize(task.getSize());
|
||||
nodeTasks.add(task);
|
||||
}
|
||||
@ -1006,7 +1008,7 @@ private boolean chooseTarget(Source source,
|
||||
targetCandidates.remove();
|
||||
}
|
||||
LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
|
||||
+source.datanode.getName() + " to " + target.datanode.getName());
|
||||
+source.datanode + " to " + target.datanode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -1054,7 +1056,7 @@ private boolean chooseSource(BalancerDatanode target,
|
||||
sourceCandidates.remove();
|
||||
}
|
||||
LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
|
||||
+source.datanode.getName() + " to " + target.datanode.getName());
|
||||
+source.datanode + " to " + target.datanode);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -1550,7 +1552,7 @@ private static void printUsage() {
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
try {
|
||||
System.exit(ToolRunner.run(null, new Cli(), args));
|
||||
System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
|
||||
} catch (Throwable e) {
|
||||
LOG.error("Exiting balancer due an exception", e);
|
||||
System.exit(-1);
|
||||
|
@ -808,9 +808,9 @@ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
|
||||
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
|
||||
if (node == null) {
|
||||
NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
|
||||
+ "Asking for blocks from an unrecorded node " + datanode.getName());
|
||||
+ "Asking for blocks from an unrecorded node " + datanode);
|
||||
throw new HadoopIllegalArgumentException(
|
||||
"Datanode " + datanode.getName() + " not found.");
|
||||
"Datanode " + datanode + " not found.");
|
||||
}
|
||||
|
||||
int numBlocks = node.numBlocks();
|
||||
@ -882,7 +882,7 @@ private void addToInvalidates(Block b) {
|
||||
.hasNext();) {
|
||||
DatanodeDescriptor node = it.next();
|
||||
invalidateBlocks.add(b, node, false);
|
||||
datanodes.append(node.getName()).append(" ");
|
||||
datanodes.append(node).append(" ");
|
||||
}
|
||||
if (datanodes.length() != 0) {
|
||||
NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
|
||||
@ -921,7 +921,7 @@ private void markBlockAsCorrupt(BlockInfo storedBlock,
|
||||
if (node == null) {
|
||||
throw new IOException("Cannot mark block " +
|
||||
storedBlock.getBlockName() +
|
||||
" as corrupt because datanode " + dn.getName() +
|
||||
" as corrupt because datanode " + dn +
|
||||
" does not exist. ");
|
||||
}
|
||||
|
||||
@ -955,11 +955,11 @@ private void markBlockAsCorrupt(BlockInfo storedBlock,
|
||||
private void invalidateBlock(Block blk, DatanodeInfo dn)
|
||||
throws IOException {
|
||||
NameNode.stateChangeLog.info("BLOCK* invalidateBlock: "
|
||||
+ blk + " on " + dn.getName());
|
||||
+ blk + " on " + dn);
|
||||
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
|
||||
if (node == null) {
|
||||
throw new IOException("Cannot invalidate block " + blk
|
||||
+ " because datanode " + dn.getName() + " does not exist.");
|
||||
+ " because datanode " + dn + " does not exist.");
|
||||
}
|
||||
|
||||
// Check how many copies we have of the block
|
||||
@ -977,11 +977,11 @@ private void invalidateBlock(Block blk, DatanodeInfo dn)
|
||||
removeStoredBlock(blk, node);
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
|
||||
+ blk + " on " + dn.getName() + " listed for deletion.");
|
||||
+ blk + " on " + dn + " listed for deletion.");
|
||||
}
|
||||
} else {
|
||||
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on "
|
||||
+ dn.getName() + " is the only copy and was not deleted.");
|
||||
+ dn + " is the only copy and was not deleted.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1224,11 +1224,11 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||
StringBuilder targetList = new StringBuilder("datanode(s)");
|
||||
for (int k = 0; k < targets.length; k++) {
|
||||
targetList.append(' ');
|
||||
targetList.append(targets[k].getName());
|
||||
targetList.append(targets[k]);
|
||||
}
|
||||
NameNode.stateChangeLog.info(
|
||||
"BLOCK* ask "
|
||||
+ rw.srcNode.getName() + " to replicate "
|
||||
+ rw.srcNode + " to replicate "
|
||||
+ rw.block + " to " + targetList);
|
||||
}
|
||||
}
|
||||
@ -1410,15 +1410,15 @@ public void processReport(final DatanodeID nodeID, final String poolId,
|
||||
try {
|
||||
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
|
||||
if (node == null || !node.isAlive) {
|
||||
throw new IOException("ProcessReport from dead or unregistered node: "
|
||||
+ nodeID.getName());
|
||||
throw new IOException(
|
||||
"ProcessReport from dead or unregistered node: " + nodeID);
|
||||
}
|
||||
|
||||
// To minimize startup time, we discard any second (or later) block reports
|
||||
// that we receive while still in startup phase.
|
||||
if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
|
||||
NameNode.stateChangeLog.info("BLOCK* processReport: "
|
||||
+ "discarded non-initial block report from " + nodeID.getName()
|
||||
+ "discarded non-initial block report from " + nodeID
|
||||
+ " because namenode still in startup phase");
|
||||
return;
|
||||
}
|
||||
@ -1451,7 +1451,7 @@ public void processReport(final DatanodeID nodeID, final String poolId,
|
||||
// Log the block report processing stats from Namenode perspective
|
||||
NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
|
||||
NameNode.stateChangeLog.info("BLOCK* processReport: from "
|
||||
+ nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
|
||||
+ nodeID + ", blocks: " + newReport.getNumberOfBlocks()
|
||||
+ ", processing time: " + (endTime - startTime) + " msecs");
|
||||
}
|
||||
|
||||
@ -1511,7 +1511,7 @@ private void processReport(final DatanodeDescriptor node,
|
||||
}
|
||||
for (Block b : toInvalidate) {
|
||||
NameNode.stateChangeLog.info("BLOCK* processReport: block "
|
||||
+ b + " on " + node.getName() + " size " + b.getNumBytes()
|
||||
+ b + " on " + node + " size " + b.getNumBytes()
|
||||
+ " does not belong to any file.");
|
||||
addToInvalidates(b, node);
|
||||
}
|
||||
@ -1662,7 +1662,7 @@ private BlockInfo processReportedBlock(final DatanodeDescriptor dn,
|
||||
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Reported block " + block
|
||||
+ " on " + dn.getName() + " size " + block.getNumBytes()
|
||||
+ " on " + dn + " size " + block.getNumBytes()
|
||||
+ " replicaState = " + reportedState);
|
||||
}
|
||||
|
||||
@ -1837,7 +1837,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt(
|
||||
// closed. So, ignore this report, assuming we will get a
|
||||
// FINALIZED replica later. See HDFS-2791
|
||||
LOG.info("Received an RBW replica for block " + storedBlock +
|
||||
" on " + dn.getName() + ": ignoring it, since the block is " +
|
||||
" on " + dn + ": ignoring it, since the block is " +
|
||||
"complete with the same generation stamp.");
|
||||
return null;
|
||||
} else {
|
||||
@ -1850,7 +1850,7 @@ private BlockToMarkCorrupt checkReplicaCorrupt(
|
||||
default:
|
||||
String msg = "Unexpected replica state " + reportedState
|
||||
+ " for block: " + storedBlock +
|
||||
" on " + dn.getName() + " size " + storedBlock.getNumBytes();
|
||||
" on " + dn + " size " + storedBlock.getNumBytes();
|
||||
// log here at WARN level since this is really a broken HDFS
|
||||
// invariant
|
||||
LOG.warn(msg);
|
||||
@ -1949,7 +1949,7 @@ private Block addStoredBlock(final BlockInfo block,
|
||||
if (storedBlock == null || storedBlock.getINode() == null) {
|
||||
// If this block does not belong to anyfile, then we are done.
|
||||
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
|
||||
+ node.getName() + " size " + block.getNumBytes()
|
||||
+ node + " size " + block.getNumBytes()
|
||||
+ " but it does not belong to any file.");
|
||||
// we could add this block to invalidate set of this datanode.
|
||||
// it will happen in next block report otherwise.
|
||||
@ -1972,7 +1972,7 @@ private Block addStoredBlock(final BlockInfo block,
|
||||
curReplicaDelta = 0;
|
||||
NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
|
||||
+ "Redundant addStoredBlock request received for " + storedBlock
|
||||
+ " on " + node.getName() + " size " + storedBlock.getNumBytes());
|
||||
+ " on " + node + " size " + storedBlock.getNumBytes());
|
||||
}
|
||||
|
||||
// Now check for completion of blocks and safe block count
|
||||
@ -2035,7 +2035,7 @@ private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
||||
|
||||
StringBuilder sb = new StringBuilder(500);
|
||||
sb.append("BLOCK* addStoredBlock: blockMap updated: ")
|
||||
.append(node.getName())
|
||||
.append(node)
|
||||
.append(" is added to ");
|
||||
storedBlock.appendStringTo(sb);
|
||||
sb.append(" size " )
|
||||
@ -2069,7 +2069,7 @@ private void invalidateCorruptReplicas(Block blk) {
|
||||
} catch (IOException e) {
|
||||
NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
|
||||
"error in deleting bad block " + blk +
|
||||
" on " + node + e);
|
||||
" on " + node, e);
|
||||
gotException = true;
|
||||
}
|
||||
}
|
||||
@ -2335,7 +2335,7 @@ private void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
|
||||
//
|
||||
addToInvalidates(b, cur);
|
||||
NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
|
||||
+"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
|
||||
+"("+cur+", "+b+") is added to invalidated blocks set.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -2350,7 +2350,7 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) {
|
||||
excessBlocksCount++;
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
|
||||
+ " (" + dn.getName() + ", " + block
|
||||
+ " (" + dn + ", " + block
|
||||
+ ") is added to excessReplicateMap");
|
||||
}
|
||||
}
|
||||
@ -2363,7 +2363,7 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) {
|
||||
public void removeStoredBlock(Block block, DatanodeDescriptor node) {
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
|
||||
+ block + " from " + node.getName());
|
||||
+ block + " from " + node);
|
||||
}
|
||||
assert (namesystem.hasWriteLock());
|
||||
{
|
||||
@ -2476,7 +2476,7 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, Block block,
|
||||
}
|
||||
for (Block b : toInvalidate) {
|
||||
NameNode.stateChangeLog.info("BLOCK* addBlock: block "
|
||||
+ b + " on " + node.getName() + " size " + b.getNumBytes()
|
||||
+ b + " on " + node + " size " + b.getNumBytes()
|
||||
+ " does not belong to any file.");
|
||||
addToInvalidates(b, node);
|
||||
}
|
||||
@ -2504,7 +2504,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||
NameNode.stateChangeLog
|
||||
.warn("BLOCK* processIncrementalBlockReport"
|
||||
+ " is received from dead or unregistered node "
|
||||
+ nodeID.getName());
|
||||
+ nodeID);
|
||||
throw new IOException(
|
||||
"Got incremental block report from unregistered or dead node");
|
||||
}
|
||||
@ -2526,7 +2526,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||
break;
|
||||
default:
|
||||
String msg =
|
||||
"Unknown block status code reported by " + nodeID.getName() +
|
||||
"Unknown block status code reported by " + nodeID +
|
||||
": " + rdbi;
|
||||
NameNode.stateChangeLog.warn(msg);
|
||||
assert false : msg; // if assertions are enabled, throw.
|
||||
@ -2535,14 +2535,14 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug("BLOCK* block "
|
||||
+ (rdbi.getStatus()) + ": " + rdbi.getBlock()
|
||||
+ " is received from " + nodeID.getName());
|
||||
+ " is received from " + nodeID);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
namesystem.writeUnlock();
|
||||
NameNode.stateChangeLog
|
||||
.debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
|
||||
+ nodeID.getName()
|
||||
+ nodeID
|
||||
+ " receiving: " + receiving + ", "
|
||||
+ " received: " + received + ", "
|
||||
+ " deleted: " + deleted);
|
||||
@ -2618,7 +2618,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
|
||||
StringBuilder nodeList = new StringBuilder();
|
||||
while (nodeIter.hasNext()) {
|
||||
DatanodeDescriptor node = nodeIter.next();
|
||||
nodeList.append(node.name);
|
||||
nodeList.append(node);
|
||||
nodeList.append(" ");
|
||||
}
|
||||
LOG.info("Block: " + block + ", Expected Replicas: "
|
||||
@ -2628,7 +2628,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
|
||||
+ ", excess replicas: " + num.excessReplicas()
|
||||
+ ", Is Open File: " + fileINode.isUnderConstruction()
|
||||
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
|
||||
+ srcNode.name + ", Is current datanode decommissioning: "
|
||||
+ srcNode + ", Is current datanode decommissioning: "
|
||||
+ srcNode.isDecommissionInProgress());
|
||||
}
|
||||
|
||||
|
@ -65,14 +65,14 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
|
||||
nodes.add(dn);
|
||||
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
||||
blk.getBlockName() +
|
||||
" added as corrupt on " + dn.getName() +
|
||||
" added as corrupt on " + dn +
|
||||
" by " + Server.getRemoteIp() +
|
||||
reasonText);
|
||||
} else {
|
||||
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
||||
"duplicate requested for " +
|
||||
blk.getBlockName() + " to add as corrupt " +
|
||||
"on " + dn.getName() +
|
||||
"on " + dn +
|
||||
" by " + Server.getRemoteIp() +
|
||||
reasonText);
|
||||
}
|
||||
|
@ -175,19 +175,7 @@ public DatanodeDescriptor(DatanodeID nodeID) {
|
||||
*/
|
||||
public DatanodeDescriptor(DatanodeID nodeID,
|
||||
String networkLocation) {
|
||||
this(nodeID, networkLocation, null);
|
||||
}
|
||||
|
||||
/** DatanodeDescriptor constructor
|
||||
*
|
||||
* @param nodeID id of the data node
|
||||
* @param networkLocation location of the data node in network
|
||||
* @param hostName it could be different from host specified for DatanodeID
|
||||
*/
|
||||
public DatanodeDescriptor(DatanodeID nodeID,
|
||||
String networkLocation,
|
||||
String hostName) {
|
||||
this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0L, 0, 0);
|
||||
this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0);
|
||||
}
|
||||
|
||||
/** DatanodeDescriptor constructor
|
||||
@ -223,14 +211,13 @@ public DatanodeDescriptor(DatanodeID nodeID,
|
||||
*/
|
||||
public DatanodeDescriptor(DatanodeID nodeID,
|
||||
String networkLocation,
|
||||
String hostName,
|
||||
long capacity,
|
||||
long dfsUsed,
|
||||
long remaining,
|
||||
long bpused,
|
||||
int xceiverCount,
|
||||
int failedVolumes) {
|
||||
super(nodeID, networkLocation, hostName);
|
||||
super(nodeID, networkLocation);
|
||||
updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount,
|
||||
failedVolumes);
|
||||
}
|
||||
@ -436,23 +423,6 @@ public Block[] getInvalidateBlocks(int maxblocks) {
|
||||
}
|
||||
}
|
||||
|
||||
/** Serialization for FSEditLog */
|
||||
public void readFieldsFromFSEditLog(DataInput in) throws IOException {
|
||||
this.name = DeprecatedUTF8.readString(in);
|
||||
this.storageID = DeprecatedUTF8.readString(in);
|
||||
this.infoPort = in.readShort() & 0x0000ffff;
|
||||
|
||||
this.capacity = in.readLong();
|
||||
this.dfsUsed = in.readLong();
|
||||
this.remaining = in.readLong();
|
||||
this.blockPoolUsed = in.readLong();
|
||||
this.lastUpdate = in.readLong();
|
||||
this.xceiverCount = in.readInt();
|
||||
this.location = Text.readString(in);
|
||||
this.hostName = Text.readString(in);
|
||||
setAdminState(WritableUtils.readEnum(in, AdminStates.class));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Approximate number of blocks currently scheduled to be written
|
||||
* to this datanode.
|
||||
|
@ -238,7 +238,7 @@ public DatanodeDescriptor getDatanode(DatanodeID nodeID
|
||||
final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
|
||||
if (node == null)
|
||||
return null;
|
||||
if (!node.getName().equals(nodeID.getName())) {
|
||||
if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
|
||||
final UnregisteredNodeException e = new UnregisteredNodeException(
|
||||
nodeID, node);
|
||||
NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
|
||||
@ -270,7 +270,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo) {
|
||||
networktopology.remove(nodeInfo);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("remove datanode " + nodeInfo.getName());
|
||||
LOG.debug("remove datanode " + nodeInfo);
|
||||
}
|
||||
namesystem.checkSafeMode();
|
||||
}
|
||||
@ -288,7 +288,7 @@ public void removeDatanode(final DatanodeID node
|
||||
removeDatanode(descriptor);
|
||||
} else {
|
||||
NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
|
||||
+ node.getName() + " does not exist");
|
||||
+ node + " does not exist");
|
||||
}
|
||||
} finally {
|
||||
namesystem.writeUnlock();
|
||||
@ -306,7 +306,7 @@ void removeDeadDatanode(final DatanodeID nodeID) {
|
||||
}
|
||||
if (d != null && isDatanodeDead(d)) {
|
||||
NameNode.stateChangeLog.info(
|
||||
"BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
|
||||
"BLOCK* removeDeadDatanode: lost heartbeat from " + d);
|
||||
removeDatanode(d);
|
||||
}
|
||||
}
|
||||
@ -332,19 +332,19 @@ private void addDatanode(final DatanodeDescriptor node) {
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
|
||||
+ "node " + node.getName() + " is added to datanodeMap.");
|
||||
+ "node " + node + " is added to datanodeMap.");
|
||||
}
|
||||
}
|
||||
|
||||
/** Physically remove node from datanodeMap. */
|
||||
private void wipeDatanode(final DatanodeID node) throws IOException {
|
||||
private void wipeDatanode(final DatanodeID node) {
|
||||
final String key = node.getStorageID();
|
||||
synchronized (datanodeMap) {
|
||||
host2DatanodeMap.remove(datanodeMap.remove(key));
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
|
||||
+ node.getName() + "): storage " + key
|
||||
+ node + "): storage " + key
|
||||
+ " is removed from datanodeMap.");
|
||||
}
|
||||
}
|
||||
@ -354,7 +354,7 @@ private void resolveNetworkLocation (DatanodeDescriptor node) {
|
||||
List<String> names = new ArrayList<String>(1);
|
||||
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
|
||||
// get the node's IP address
|
||||
names.add(node.getHost());
|
||||
names.add(node.getIpAddr());
|
||||
} else {
|
||||
// get the node's host name
|
||||
String hostName = node.getHostName();
|
||||
@ -376,12 +376,12 @@ private void resolveNetworkLocation (DatanodeDescriptor node) {
|
||||
node.setNetworkLocation(networkLocation);
|
||||
}
|
||||
|
||||
private boolean inHostsList(DatanodeID node, String ipAddr) {
|
||||
return checkInList(node, ipAddr, hostsReader.getHosts(), false);
|
||||
private boolean inHostsList(DatanodeID node) {
|
||||
return checkInList(node, hostsReader.getHosts(), false);
|
||||
}
|
||||
|
||||
private boolean inExcludedHostsList(DatanodeID node, String ipAddr) {
|
||||
return checkInList(node, ipAddr, hostsReader.getExcludedHosts(), true);
|
||||
private boolean inExcludedHostsList(DatanodeID node) {
|
||||
return checkInList(node, hostsReader.getExcludedHosts(), true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -419,7 +419,7 @@ private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
|
||||
|
||||
for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
|
||||
DatanodeDescriptor node = it.next();
|
||||
if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null))
|
||||
if ((!inHostsList(node)) && (!inExcludedHostsList(node))
|
||||
&& node.isDecommissioned()) {
|
||||
// Include list is not empty, an existing datanode does not appear
|
||||
// in both include or exclude lists and it has been decommissioned.
|
||||
@ -430,38 +430,24 @@ private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the given node (of DatanodeID or ipAddress) is in the (include or
|
||||
* exclude) list. If ipAddress in null, check only based upon the given
|
||||
* DatanodeID. If ipAddress is not null, the ipAddress should refers to the
|
||||
* same host that given DatanodeID refers to.
|
||||
* Check if the given DatanodeID is in the given (include or exclude) list.
|
||||
*
|
||||
* @param node, the host DatanodeID
|
||||
* @param ipAddress, if not null, should refers to the same host
|
||||
* that DatanodeID refers to
|
||||
* @param hostsList, the list of hosts in the include/exclude file
|
||||
* @param isExcludeList, boolean, true if this is the exclude list
|
||||
* @return boolean, if in the list
|
||||
* @param node the DatanodeID to check
|
||||
* @param hostsList the list of hosts in the include/exclude file
|
||||
* @param isExcludeList true if this is the exclude list
|
||||
* @return true if the node is in the list, false otherwise
|
||||
*/
|
||||
private static boolean checkInList(final DatanodeID node,
|
||||
final String ipAddress,
|
||||
final Set<String> hostsList,
|
||||
final boolean isExcludeList) {
|
||||
final InetAddress iaddr;
|
||||
if (ipAddress != null) {
|
||||
|
||||
try {
|
||||
iaddr = InetAddress.getByName(ipAddress);
|
||||
iaddr = InetAddress.getByName(node.getIpAddr());
|
||||
} catch (UnknownHostException e) {
|
||||
LOG.warn("Unknown ip address: " + ipAddress, e);
|
||||
LOG.warn("Unknown IP: " + node.getIpAddr(), e);
|
||||
return isExcludeList;
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
iaddr = InetAddress.getByName(node.getHost());
|
||||
} catch (UnknownHostException e) {
|
||||
LOG.warn("Unknown host: " + node.getHost(), e);
|
||||
return isExcludeList;
|
||||
}
|
||||
}
|
||||
|
||||
// if include list is empty, host is in include list
|
||||
if ( (!isExcludeList) && (hostsList.isEmpty()) ){
|
||||
@ -470,10 +456,10 @@ private static boolean checkInList(final DatanodeID node,
|
||||
return // compare ipaddress(:port)
|
||||
(hostsList.contains(iaddr.getHostAddress().toString()))
|
||||
|| (hostsList.contains(iaddr.getHostAddress().toString() + ":"
|
||||
+ node.getPort()))
|
||||
+ node.getXferPort()))
|
||||
// compare hostname(:port)
|
||||
|| (hostsList.contains(iaddr.getHostName()))
|
||||
|| (hostsList.contains(iaddr.getHostName() + ":" + node.getPort()))
|
||||
|| (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
|
||||
|| ((node instanceof DatanodeInfo) && hostsList
|
||||
.contains(((DatanodeInfo) node).getHostName()));
|
||||
}
|
||||
@ -481,10 +467,9 @@ private static boolean checkInList(final DatanodeID node,
|
||||
/**
|
||||
* Decommission the node if it is in exclude list.
|
||||
*/
|
||||
private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr)
|
||||
throws IOException {
|
||||
private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) {
|
||||
// If the registered node is in exclude list, then decommission it
|
||||
if (inExcludedHostsList(nodeReg, ipAddr)) {
|
||||
if (inExcludedHostsList(nodeReg)) {
|
||||
startDecommission(nodeReg);
|
||||
}
|
||||
}
|
||||
@ -499,16 +484,16 @@ boolean checkDecommissionState(DatanodeDescriptor node) {
|
||||
if (node.isDecommissionInProgress()) {
|
||||
if (!blockManager.isReplicationInProgress(node)) {
|
||||
node.setDecommissioned();
|
||||
LOG.info("Decommission complete for node " + node.getName());
|
||||
LOG.info("Decommission complete for node " + node);
|
||||
}
|
||||
}
|
||||
return node.isDecommissioned();
|
||||
}
|
||||
|
||||
/** Start decommissioning the specified datanode. */
|
||||
private void startDecommission(DatanodeDescriptor node) throws IOException {
|
||||
private void startDecommission(DatanodeDescriptor node) {
|
||||
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
|
||||
LOG.info("Start Decommissioning node " + node.getName() + " with " +
|
||||
LOG.info("Start Decommissioning node " + node + " with " +
|
||||
node.numBlocks() + " blocks.");
|
||||
heartbeatManager.startDecommission(node);
|
||||
node.decommissioningStatus.setStartTime(now());
|
||||
@ -519,9 +504,9 @@ private void startDecommission(DatanodeDescriptor node) throws IOException {
|
||||
}
|
||||
|
||||
/** Stop decommissioning the specified datanodes. */
|
||||
void stopDecommission(DatanodeDescriptor node) throws IOException {
|
||||
void stopDecommission(DatanodeDescriptor node) {
|
||||
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
|
||||
LOG.info("Stop Decommissioning node " + node.getName());
|
||||
LOG.info("Stop Decommissioning node " + node);
|
||||
heartbeatManager.stopDecommission(node);
|
||||
blockManager.processOverReplicatedBlocksOnReCommission(node);
|
||||
}
|
||||
@ -545,41 +530,44 @@ private String newStorageID() {
|
||||
return newID;
|
||||
}
|
||||
|
||||
public void registerDatanode(DatanodeRegistration nodeReg
|
||||
) throws IOException {
|
||||
/**
|
||||
* Register the given datanode with the namenode. NB: the given
|
||||
* registration is mutated and given back to the datanode.
|
||||
*
|
||||
* @param nodeReg the datanode registration
|
||||
* @throws DisallowedDatanodeException if the registration request is
|
||||
* denied because the datanode does not match includes/excludes
|
||||
*/
|
||||
public void registerDatanode(DatanodeRegistration nodeReg)
|
||||
throws DisallowedDatanodeException {
|
||||
String dnAddress = Server.getRemoteAddress();
|
||||
if (dnAddress == null) {
|
||||
// Mostly called inside an RPC.
|
||||
// But if not, use address passed by the data-node.
|
||||
dnAddress = nodeReg.getHost();
|
||||
dnAddress = nodeReg.getIpAddr();
|
||||
}
|
||||
|
||||
// Update the IP to the address of the RPC request that is
|
||||
// registering this datanode.
|
||||
nodeReg.setIpAddr(dnAddress);
|
||||
nodeReg.setExportedKeys(blockManager.getBlockKeys());
|
||||
|
||||
// Checks if the node is not on the hosts list. If it is not, then
|
||||
// it will be disallowed from registering.
|
||||
if (!inHostsList(nodeReg, dnAddress)) {
|
||||
if (!inHostsList(nodeReg)) {
|
||||
throw new DisallowedDatanodeException(nodeReg);
|
||||
}
|
||||
|
||||
String hostName = nodeReg.getHost();
|
||||
|
||||
// update the datanode's name with ip:port
|
||||
DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(),
|
||||
nodeReg.getStorageID(),
|
||||
nodeReg.getInfoPort(),
|
||||
nodeReg.getIpcPort());
|
||||
nodeReg.updateRegInfo(dnReg);
|
||||
nodeReg.exportedKeys = blockManager.getBlockKeys();
|
||||
|
||||
NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
|
||||
+ "node registration from " + nodeReg.getName()
|
||||
+ "node registration from " + nodeReg
|
||||
+ " storage " + nodeReg.getStorageID());
|
||||
|
||||
DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
|
||||
DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
|
||||
DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
|
||||
|
||||
if (nodeN != null && nodeN != nodeS) {
|
||||
NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
|
||||
+ "node from name: " + nodeN.getName());
|
||||
+ "node from name: " + nodeN);
|
||||
// nodeN previously served a different data storage,
|
||||
// which is not served by anybody anymore.
|
||||
removeDatanode(nodeN);
|
||||
@ -608,15 +596,14 @@ nodes with its data cleared (or user can just remove the StorageID
|
||||
but this is might not work if VERSION file format has changed
|
||||
*/
|
||||
NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
|
||||
+ "node " + nodeS.getName()
|
||||
+ " is replaced by " + nodeReg.getName() +
|
||||
+ "node " + nodeS
|
||||
+ " is replaced by " + nodeReg +
|
||||
" with the same storageID " +
|
||||
nodeReg.getStorageID());
|
||||
}
|
||||
// update cluster map
|
||||
getNetworkTopology().remove(nodeS);
|
||||
nodeS.updateRegInfo(nodeReg);
|
||||
nodeS.setHostName(hostName);
|
||||
nodeS.setDisallowed(false); // Node is in the include list
|
||||
|
||||
// resolve network location
|
||||
@ -630,11 +617,11 @@ nodes with its data cleared (or user can just remove the StorageID
|
||||
}
|
||||
|
||||
// this is a new datanode serving a new data storage
|
||||
if (nodeReg.getStorageID().equals("")) {
|
||||
if ("".equals(nodeReg.getStorageID())) {
|
||||
// this data storage has never been registered
|
||||
// it is either empty or was created by pre-storageID version of DFS
|
||||
nodeReg.storageID = newStorageID();
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
nodeReg.setStorageID(newStorageID());
|
||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
NameNode.stateChangeLog.debug(
|
||||
"BLOCK* NameSystem.registerDatanode: "
|
||||
+ "new storageID " + nodeReg.getStorageID() + " assigned.");
|
||||
@ -642,7 +629,7 @@ nodes with its data cleared (or user can just remove the StorageID
|
||||
}
|
||||
// register new datanode
|
||||
DatanodeDescriptor nodeDescr
|
||||
= new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
|
||||
= new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK);
|
||||
resolveNetworkLocation(nodeDescr);
|
||||
addDatanode(nodeDescr);
|
||||
checkDecommissioning(nodeDescr, dnAddress);
|
||||
@ -690,10 +677,10 @@ private void refreshHostsReader(Configuration conf) throws IOException {
|
||||
private void refreshDatanodes() throws IOException {
|
||||
for(DatanodeDescriptor node : datanodeMap.values()) {
|
||||
// Check if not include.
|
||||
if (!inHostsList(node, null)) {
|
||||
if (!inHostsList(node)) {
|
||||
node.setDisallowed(true); // case 2.
|
||||
} else {
|
||||
if (inExcludedHostsList(node, null)) {
|
||||
if (inExcludedHostsList(node)) {
|
||||
startDecommission(node); // case 3.
|
||||
} else {
|
||||
stopDecommission(node); // case 4.
|
||||
@ -820,16 +807,16 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
||||
}
|
||||
//Remove any form of the this datanode in include/exclude lists.
|
||||
try {
|
||||
InetAddress inet = InetAddress.getByName(dn.getHost());
|
||||
InetAddress inet = InetAddress.getByName(dn.getIpAddr());
|
||||
// compare hostname(:port)
|
||||
mustList.remove(inet.getHostName());
|
||||
mustList.remove(inet.getHostName()+":"+dn.getPort());
|
||||
mustList.remove(inet.getHostName()+":"+dn.getXferPort());
|
||||
// compare ipaddress(:port)
|
||||
mustList.remove(inet.getHostAddress().toString());
|
||||
mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getPort());
|
||||
mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
|
||||
} catch ( UnknownHostException e ) {
|
||||
mustList.remove(dn.getName());
|
||||
mustList.remove(dn.getHost());
|
||||
mustList.remove(dn.getIpAddr());
|
||||
LOG.warn(e);
|
||||
}
|
||||
}
|
||||
|
@ -39,10 +39,10 @@ boolean contains(DatanodeDescriptor node) {
|
||||
return false;
|
||||
}
|
||||
|
||||
String host = node.getHost();
|
||||
String ipAddr = node.getIpAddr();
|
||||
hostmapLock.readLock().lock();
|
||||
try {
|
||||
DatanodeDescriptor[] nodes = map.get(host);
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
if (nodes != null) {
|
||||
for(DatanodeDescriptor containedNode:nodes) {
|
||||
if (node==containedNode) {
|
||||
@ -66,8 +66,8 @@ boolean add(DatanodeDescriptor node) {
|
||||
return false;
|
||||
}
|
||||
|
||||
String host = node.getHost();
|
||||
DatanodeDescriptor[] nodes = map.get(host);
|
||||
String ipAddr = node.getIpAddr();
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
DatanodeDescriptor[] newNodes;
|
||||
if (nodes==null) {
|
||||
newNodes = new DatanodeDescriptor[1];
|
||||
@ -77,7 +77,7 @@ boolean add(DatanodeDescriptor node) {
|
||||
System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
|
||||
newNodes[nodes.length] = node;
|
||||
}
|
||||
map.put(host, newNodes);
|
||||
map.put(ipAddr, newNodes);
|
||||
return true;
|
||||
} finally {
|
||||
hostmapLock.writeLock().unlock();
|
||||
@ -92,17 +92,17 @@ boolean remove(DatanodeDescriptor node) {
|
||||
return false;
|
||||
}
|
||||
|
||||
String host = node.getHost();
|
||||
String ipAddr = node.getIpAddr();
|
||||
hostmapLock.writeLock().lock();
|
||||
try {
|
||||
|
||||
DatanodeDescriptor[] nodes = map.get(host);
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
if (nodes==null) {
|
||||
return false;
|
||||
}
|
||||
if (nodes.length==1) {
|
||||
if (nodes[0]==node) {
|
||||
map.remove(host);
|
||||
map.remove(ipAddr);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -122,7 +122,7 @@ boolean remove(DatanodeDescriptor node) {
|
||||
newNodes = new DatanodeDescriptor[nodes.length-1];
|
||||
System.arraycopy(nodes, 0, newNodes, 0, i);
|
||||
System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1);
|
||||
map.put(host, newNodes);
|
||||
map.put(ipAddr, newNodes);
|
||||
return true;
|
||||
}
|
||||
} finally {
|
||||
@ -130,17 +130,18 @@ boolean remove(DatanodeDescriptor node) {
|
||||
}
|
||||
}
|
||||
|
||||
/** get a data node by its host.
|
||||
* @return DatanodeDescriptor if found; otherwise null.
|
||||
/**
|
||||
* Get a data node by its IP address.
|
||||
* @return DatanodeDescriptor if found, null otherwise
|
||||
*/
|
||||
DatanodeDescriptor getDatanodeByHost(String host) {
|
||||
if (host==null) {
|
||||
DatanodeDescriptor getDatanodeByHost(String ipAddr) {
|
||||
if (ipAddr == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
hostmapLock.readLock().lock();
|
||||
try {
|
||||
DatanodeDescriptor[] nodes = map.get(host);
|
||||
DatanodeDescriptor[] nodes = map.get(ipAddr);
|
||||
// no entry
|
||||
if (nodes== null) {
|
||||
return null;
|
||||
@ -155,40 +156,4 @@ DatanodeDescriptor getDatanodeByHost(String host) {
|
||||
hostmapLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find data node by its name.
|
||||
*
|
||||
* @return DatanodeDescriptor if found or null otherwise
|
||||
*/
|
||||
public DatanodeDescriptor getDatanodeByName(String name) {
|
||||
if (name==null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
int colon = name.indexOf(":");
|
||||
String host;
|
||||
if (colon < 0) {
|
||||
host = name;
|
||||
} else {
|
||||
host = name.substring(0, colon);
|
||||
}
|
||||
|
||||
hostmapLock.readLock().lock();
|
||||
try {
|
||||
DatanodeDescriptor[] nodes = map.get(host);
|
||||
// no entry
|
||||
if (nodes== null) {
|
||||
return null;
|
||||
}
|
||||
for(DatanodeDescriptor containedNode:nodes) {
|
||||
if (name.equals(containedNode.getName())) {
|
||||
return containedNode;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} finally {
|
||||
hostmapLock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ synchronized void add(final Block block, final DatanodeInfo datanode,
|
||||
numBlocks++;
|
||||
if (log) {
|
||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||
+ ": add " + block + " to " + datanode.getName());
|
||||
+ ": add " + block + " to " + datanode);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -111,7 +111,8 @@ synchronized void dump(final PrintWriter out) {
|
||||
for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
|
||||
final LightWeightHashSet<Block> blocks = entry.getValue();
|
||||
if (blocks.size() > 0) {
|
||||
out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks);
|
||||
out.println(datanodeManager.getDatanode(entry.getKey()));
|
||||
out.println(blocks);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -135,7 +136,7 @@ int invalidateWork(final String storageId) {
|
||||
|
||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||
+ ": ask " + dn.getName() + " to delete " + toInvalidate);
|
||||
+ ": ask " + dn + " to delete " + toInvalidate);
|
||||
}
|
||||
return toInvalidate.size();
|
||||
}
|
||||
|
@ -88,9 +88,6 @@ private JspHelper() {}
|
||||
private static class NodeRecord extends DatanodeInfo {
|
||||
int frequency;
|
||||
|
||||
public NodeRecord() {
|
||||
frequency = -1;
|
||||
}
|
||||
public NodeRecord(DatanodeInfo info, int count) {
|
||||
super(info);
|
||||
this.frequency = count;
|
||||
@ -172,7 +169,7 @@ public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom,
|
||||
|
||||
//just ping to check whether the node is alive
|
||||
InetSocketAddress targetAddr = NetUtils.createSocketAddr(
|
||||
chosenNode.getHost() + ":" + chosenNode.getInfoPort());
|
||||
chosenNode.getInfoAddr());
|
||||
|
||||
try {
|
||||
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
|
@ -64,18 +64,12 @@
|
||||
public abstract class Storage extends StorageInfo {
|
||||
public static final Log LOG = LogFactory.getLog(Storage.class.getName());
|
||||
|
||||
// Constants
|
||||
|
||||
// last layout version that did not support upgrades
|
||||
public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
|
||||
|
||||
// this corresponds to Hadoop-0.14.
|
||||
public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
|
||||
protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
|
||||
|
||||
/* this should be removed when LAST_UPGRADABLE_LV goes beyond -13.
|
||||
* any upgrade code that uses this constant should also be removed. */
|
||||
public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
|
||||
// this corresponds to Hadoop-0.18
|
||||
public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16;
|
||||
protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18";
|
||||
|
||||
/** Layout versions of 0.20.203 release */
|
||||
public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
|
||||
|
@ -325,10 +325,10 @@ synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOExcep
|
||||
void registrationSucceeded(BPServiceActor bpServiceActor,
|
||||
DatanodeRegistration reg) throws IOException {
|
||||
if (bpRegistration != null) {
|
||||
checkNSEquality(bpRegistration.storageInfo.getNamespaceID(),
|
||||
reg.storageInfo.getNamespaceID(), "namespace ID");
|
||||
checkNSEquality(bpRegistration.storageInfo.getClusterID(),
|
||||
reg.storageInfo.getClusterID(), "cluster ID");
|
||||
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
|
||||
reg.getStorageInfo().getNamespaceID(), "namespace ID");
|
||||
checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
|
||||
reg.getStorageInfo().getClusterID(), "cluster ID");
|
||||
} else {
|
||||
bpRegistration = reg;
|
||||
}
|
||||
|
@ -602,7 +602,7 @@ void register() throws IOException {
|
||||
|
||||
while (shouldRun()) {
|
||||
try {
|
||||
// Use returned registration from namenode with updated machine name.
|
||||
// Use returned registration from namenode with updated fields
|
||||
bpRegistration = bpNamenode.registerDatanode(bpRegistration);
|
||||
break;
|
||||
} catch(SocketTimeoutException e) { // namenode is busy
|
||||
|
@ -164,9 +164,9 @@
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
|
||||
/**********************************************************
|
||||
* DataNode is a class (and program) that stores a set of
|
||||
* blocks for a DFS deployment. A single deployment can
|
||||
@ -244,9 +244,10 @@ public static InetSocketAddress createSocketAddr(String target) {
|
||||
private DataStorage storage = null;
|
||||
private HttpServer infoServer = null;
|
||||
DataNodeMetrics metrics;
|
||||
private InetSocketAddress selfAddr;
|
||||
private InetSocketAddress streamingAddr;
|
||||
|
||||
private volatile String hostName; // Host name of this datanode
|
||||
private String hostName;
|
||||
private DatanodeID id;
|
||||
|
||||
boolean isBlockTokenEnabled;
|
||||
BlockPoolTokenSecretManager blockPoolTokenSecretManager;
|
||||
@ -288,6 +289,7 @@ public static InetSocketAddress createSocketAddr(String target) {
|
||||
.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
|
||||
try {
|
||||
hostName = getHostName(conf);
|
||||
LOG.info("Configured hostname is " + hostName);
|
||||
startDataNode(conf, dataDirs, resources);
|
||||
} catch (IOException ie) {
|
||||
shutdown();
|
||||
@ -305,15 +307,24 @@ private synchronized void setClusterId(final String nsCid, final String bpid
|
||||
clusterId = nsCid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the hostname for this datanode. If the hostname is not
|
||||
* explicitly configured in the given config, then it is determined
|
||||
* via the DNS class.
|
||||
*
|
||||
* @param config
|
||||
* @return the hostname (NB: may not be a FQDN)
|
||||
* @throws UnknownHostException if the dfs.datanode.dns.interface
|
||||
* option is used and the hostname can not be determined
|
||||
*/
|
||||
private static String getHostName(Configuration config)
|
||||
throws UnknownHostException {
|
||||
// use configured nameserver & interface to get local hostname
|
||||
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
|
||||
if (name == null) {
|
||||
name = DNS
|
||||
.getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
|
||||
DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
|
||||
DFS_DATANODE_DNS_NAMESERVER_KEY,
|
||||
name = DNS.getDefaultHost(
|
||||
config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
|
||||
DFS_DATANODE_DNS_INTERFACE_DEFAULT),
|
||||
config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
|
||||
DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
|
||||
}
|
||||
return name;
|
||||
@ -485,23 +496,22 @@ private synchronized void shutdownDirectoryScanner() {
|
||||
}
|
||||
|
||||
private void initDataXceiver(Configuration conf) throws IOException {
|
||||
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
|
||||
|
||||
// find free port or use privileged port provided
|
||||
ServerSocket ss;
|
||||
if(secureResources == null) {
|
||||
if (secureResources == null) {
|
||||
InetSocketAddress addr = DataNode.getStreamingAddr(conf);
|
||||
ss = (dnConf.socketWriteTimeout > 0) ?
|
||||
ServerSocketChannel.open().socket() : new ServerSocket();
|
||||
Server.bind(ss, streamingAddr, 0);
|
||||
Server.bind(ss, addr, 0);
|
||||
} else {
|
||||
ss = secureResources.getStreamingSocket();
|
||||
}
|
||||
ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
|
||||
// adjust machine name with the actual port
|
||||
int tmpPort = ss.getLocalPort();
|
||||
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
|
||||
tmpPort);
|
||||
LOG.info("Opened streaming server at " + selfAddr);
|
||||
|
||||
streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
|
||||
ss.getLocalPort());
|
||||
|
||||
LOG.info("Opened streaming server at " + streamingAddr);
|
||||
this.threadGroup = new ThreadGroup("dataXceiverServer");
|
||||
this.dataXceiverServer = new Daemon(threadGroup,
|
||||
new DataXceiverServer(ss, conf, this));
|
||||
@ -646,7 +656,7 @@ void startDataNode(Configuration conf,
|
||||
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
|
||||
initIpcServer(conf);
|
||||
|
||||
metrics = DataNodeMetrics.create(conf, getMachineName());
|
||||
metrics = DataNodeMetrics.create(conf, getDisplayName());
|
||||
|
||||
blockPoolManager = new BlockPoolManager(this);
|
||||
blockPoolManager.refreshNamenodes(conf);
|
||||
@ -657,14 +667,18 @@ void startDataNode(Configuration conf,
|
||||
* @param nsInfo the namespace info from the first part of the NN handshake
|
||||
*/
|
||||
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||
DatanodeRegistration bpRegistration = createUnknownBPRegistration();
|
||||
String blockPoolId = nsInfo.getBlockPoolID();
|
||||
|
||||
final String xferIp = streamingAddr.getAddress().getHostAddress();
|
||||
DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp);
|
||||
bpRegistration.setXferPort(getXferPort());
|
||||
bpRegistration.setInfoPort(getInfoPort());
|
||||
bpRegistration.setIpcPort(getIpcPort());
|
||||
bpRegistration.setHostName(hostName);
|
||||
bpRegistration.setStorageID(getStorageId());
|
||||
StorageInfo storageInfo = storage.getBPStorage(blockPoolId);
|
||||
|
||||
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
|
||||
if (storageInfo == null) {
|
||||
// it's null in the case of SimulatedDataSet
|
||||
bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
||||
bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
|
||||
bpRegistration.setStorageInfo(nsInfo);
|
||||
} else {
|
||||
bpRegistration.setStorageInfo(storageInfo);
|
||||
@ -679,17 +693,18 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||
* Also updates the block pool's state in the secret manager.
|
||||
*/
|
||||
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
|
||||
String blockPoolId)
|
||||
throws IOException {
|
||||
hostName = bpRegistration.getHost();
|
||||
String blockPoolId) throws IOException {
|
||||
// Set the ID if we haven't already
|
||||
if (null == id) {
|
||||
id = bpRegistration;
|
||||
}
|
||||
|
||||
if (storage.getStorageID().equals("")) {
|
||||
// This is a fresh datanode -- take the storage ID provided by the
|
||||
// NN and persist it.
|
||||
// This is a fresh datanode, persist the NN-provided storage ID
|
||||
storage.setStorageID(bpRegistration.getStorageID());
|
||||
storage.writeAll();
|
||||
LOG.info("New storage id " + bpRegistration.getStorageID()
|
||||
+ " is assigned to data-node " + bpRegistration.getName());
|
||||
+ " is assigned to data-node " + bpRegistration);
|
||||
} else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) {
|
||||
throw new IOException("Inconsistent storage IDs. Name-node returned "
|
||||
+ bpRegistration.getStorageID()
|
||||
@ -708,7 +723,7 @@ synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
|
||||
*/
|
||||
private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration,
|
||||
String blockPoolId) throws IOException {
|
||||
ExportedBlockKeys keys = bpRegistration.exportedKeys;
|
||||
ExportedBlockKeys keys = bpRegistration.getExportedKeys();
|
||||
isBlockTokenEnabled = keys.isBlockTokenEnabled();
|
||||
// TODO should we check that all federated nns are either enabled or
|
||||
// disabled?
|
||||
@ -728,8 +743,8 @@ private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistrat
|
||||
}
|
||||
|
||||
blockPoolTokenSecretManager.setKeys(blockPoolId,
|
||||
bpRegistration.exportedKeys);
|
||||
bpRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS;
|
||||
bpRegistration.getExportedKeys());
|
||||
bpRegistration.setExportedKeys(ExportedBlockKeys.DUMMY_KEYS);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -783,18 +798,6 @@ void initBlockPool(BPOfferService bpos) throws IOException {
|
||||
data.addBlockPool(nsInfo.getBlockPoolID(), conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a DatanodeRegistration object with no valid StorageInfo.
|
||||
* This is used when reporting an error during handshake - ie
|
||||
* before we can load any specific block pool.
|
||||
*/
|
||||
private DatanodeRegistration createUnknownBPRegistration() {
|
||||
DatanodeRegistration reg = new DatanodeRegistration(getMachineName());
|
||||
reg.setInfoPort(infoServer.getPort());
|
||||
reg.setIpcPort(getIpcPort());
|
||||
return reg;
|
||||
}
|
||||
|
||||
BPOfferService[] getAllBpOs() {
|
||||
return blockPoolManager.getAllNamenodeThreads();
|
||||
}
|
||||
@ -844,8 +847,8 @@ private void registerMXBean() {
|
||||
MBeans.register("DataNode", "DataNodeInfo", this);
|
||||
}
|
||||
|
||||
int getPort() {
|
||||
return selfAddr.getPort();
|
||||
int getXferPort() {
|
||||
return streamingAddr.getPort();
|
||||
}
|
||||
|
||||
String getStorageId() {
|
||||
@ -853,14 +856,28 @@ String getStorageId() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get host:port with host set to Datanode host and port set to the
|
||||
* port {@link DataXceiver} is serving.
|
||||
* @return host:port string
|
||||
* @return name useful for logging
|
||||
*/
|
||||
public String getMachineName() {
|
||||
return hostName + ":" + getPort();
|
||||
public String getDisplayName() {
|
||||
// NB: our DatanodeID may not be set yet
|
||||
return hostName + ":" + getIpcPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* NB: The datanode can perform data transfer on the streaming
|
||||
* address however clients are given the IPC IP address for data
|
||||
* transfer, and that may be be a different address.
|
||||
*
|
||||
* @return socket address for data transfer
|
||||
*/
|
||||
public InetSocketAddress getXferAddress() {
|
||||
return streamingAddr;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the datanode's IPC port
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public int getIpcPort() {
|
||||
return ipcServer.getListenerAddress().getPort();
|
||||
}
|
||||
@ -880,25 +897,6 @@ DatanodeRegistration getDNRegistrationForBP(String bpid)
|
||||
return bpos.bpRegistration;
|
||||
}
|
||||
|
||||
/**
|
||||
* get BP registration by machine and port name (host:port)
|
||||
* @param mName - the name that the NN used
|
||||
* @return BP registration
|
||||
* @throws IOException
|
||||
*/
|
||||
DatanodeRegistration getDNRegistrationByMachineName(String mName) {
|
||||
// TODO: all the BPs should have the same name as each other, they all come
|
||||
// from getName() here! and the use cases only are in tests where they just
|
||||
// call with getName(). So we could probably just make this method return
|
||||
// the first BPOS's registration. See HDFS-2609.
|
||||
BPOfferService [] bposArray = blockPoolManager.getAllNamenodeThreads();
|
||||
for (BPOfferService bpos : bposArray) {
|
||||
if(bpos.bpRegistration.getName().equals(mName))
|
||||
return bpos.bpRegistration;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates either NIO or regular depending on socketWriteTimeout.
|
||||
*/
|
||||
@ -918,8 +916,8 @@ DatanodeProtocolClientSideTranslatorPB connectToNN(
|
||||
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
|
||||
DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
|
||||
throws IOException {
|
||||
final InetSocketAddress addr = NetUtils.createSocketAddr(
|
||||
datanodeid.getHost() + ":" + datanodeid.getIpcPort());
|
||||
final InetSocketAddress addr =
|
||||
NetUtils.createSocketAddr(datanodeid.getIpcAddr());
|
||||
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
|
||||
InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
|
||||
}
|
||||
@ -937,17 +935,13 @@ public InterDatanodeProtocol run() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
public InetSocketAddress getSelfAddr() {
|
||||
return selfAddr;
|
||||
}
|
||||
|
||||
DataNodeMetrics getMetrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
public static void setNewStorageID(DatanodeID dnId) {
|
||||
LOG.info("Datanode is " + dnId);
|
||||
dnId.storageID = createNewStorageId(dnId.getPort());
|
||||
dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
|
||||
}
|
||||
|
||||
static String createNewStorageId(int port) {
|
||||
@ -1223,7 +1217,7 @@ private void transferBlock( ExtendedBlock block,
|
||||
if (LOG.isInfoEnabled()) {
|
||||
StringBuilder xfersBuilder = new StringBuilder();
|
||||
for (int i = 0; i < numTargets; i++) {
|
||||
xfersBuilder.append(xferTargets[i].getName());
|
||||
xfersBuilder.append(xferTargets[i]);
|
||||
xfersBuilder.append(" ");
|
||||
}
|
||||
LOG.info(bpReg + " Starting thread to transfer block " +
|
||||
@ -1381,7 +1375,7 @@ public void run() {
|
||||
|
||||
try {
|
||||
InetSocketAddress curTarget =
|
||||
NetUtils.createSocketAddr(targets[0].getName());
|
||||
NetUtils.createSocketAddr(targets[0].getXferAddr());
|
||||
sock = newSocket();
|
||||
NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
|
||||
sock.setSoTimeout(targets.length * dnConf.socketTimeout);
|
||||
@ -1434,9 +1428,8 @@ public void run() {
|
||||
}
|
||||
}
|
||||
} catch (IOException ie) {
|
||||
LOG.warn(
|
||||
bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
|
||||
+ " got ", ie);
|
||||
LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
|
||||
targets[0] + " got ", ie);
|
||||
// check if there are any disk problem
|
||||
checkDiskError();
|
||||
|
||||
@ -1632,7 +1625,7 @@ static ArrayList<File> getDataDirsFromURIs(Collection<URI> dataDirs,
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DataNode{data=" + data + ", localName='" + getMachineName()
|
||||
return "DataNode{data=" + data + ", localName='" + getDisplayName()
|
||||
+ "', storageID='" + getStorageId() + "', xmitsInProgress="
|
||||
+ xmitsInProgress.get() + "}";
|
||||
}
|
||||
@ -1990,15 +1983,14 @@ void syncBlock(RecoveringBlock rBlock,
|
||||
|
||||
private static void logRecoverBlock(String who,
|
||||
ExtendedBlock block, DatanodeID[] targets) {
|
||||
StringBuilder msg = new StringBuilder(targets[0].getName());
|
||||
StringBuilder msg = new StringBuilder(targets[0].toString());
|
||||
for (int i = 1; i < targets.length; i++) {
|
||||
msg.append(", " + targets[i].getName());
|
||||
msg.append(", " + targets[i]);
|
||||
}
|
||||
LOG.info(who + " calls recoverBlock(block=" + block
|
||||
+ ", targets=[" + msg + "])");
|
||||
}
|
||||
|
||||
// ClientDataNodeProtocol implementation
|
||||
@Override // ClientDataNodeProtocol
|
||||
public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
|
||||
checkWriteAccess(block);
|
||||
@ -2076,8 +2068,7 @@ void finalizeUpgradeForPool(String blockPoolId) throws IOException {
|
||||
storage.finalizeUpgrade(blockPoolId);
|
||||
}
|
||||
|
||||
// Determine a Datanode's streaming address
|
||||
public static InetSocketAddress getStreamingAddr(Configuration conf) {
|
||||
static InetSocketAddress getStreamingAddr(Configuration conf) {
|
||||
return NetUtils.createSocketAddr(
|
||||
conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
|
||||
}
|
||||
@ -2099,8 +2090,11 @@ public String getHttpPort(){
|
||||
return this.getConf().get("dfs.datanode.info.port");
|
||||
}
|
||||
|
||||
public int getInfoPort(){
|
||||
return this.infoServer.getPort();
|
||||
/**
|
||||
* @return the datanode's http port
|
||||
*/
|
||||
public int getInfoPort() {
|
||||
return infoServer.getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2142,7 +2136,7 @@ public void refreshNamenodes(Configuration conf) throws IOException {
|
||||
blockPoolManager.refreshNamenodes(conf);
|
||||
}
|
||||
|
||||
@Override //ClientDatanodeProtocol
|
||||
@Override // ClientDatanodeProtocol
|
||||
public void refreshNamenodes() throws IOException {
|
||||
conf = new Configuration();
|
||||
refreshNamenodes(conf);
|
||||
@ -2204,10 +2198,9 @@ public boolean isDatanodeFullyStarted() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Methods used by fault injection tests */
|
||||
@VisibleForTesting
|
||||
public DatanodeID getDatanodeId() {
|
||||
return new DatanodeID(getMachineName(), getStorageId(),
|
||||
infoServer.getPort(), getIpcPort());
|
||||
return id;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -73,9 +73,6 @@ public class DataStorage extends Storage {
|
||||
public final static String STORAGE_DIR_FINALIZED = "finalized";
|
||||
public final static String STORAGE_DIR_TMP = "tmp";
|
||||
|
||||
private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN =
|
||||
Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
|
||||
|
||||
/** Access to this variable is guarded by "this" */
|
||||
private String storageID;
|
||||
|
||||
@ -197,7 +194,7 @@ synchronized void recoverTransitionRead(DataNode datanode,
|
||||
}
|
||||
|
||||
// make sure we have storage id set - if not - generate new one
|
||||
createStorageID(datanode.getPort());
|
||||
createStorageID(datanode.getXferPort());
|
||||
|
||||
// 3. Update all storages. Some of them might have just been formatted.
|
||||
this.writeAll();
|
||||
@ -669,13 +666,6 @@ static void linkBlocks(File from, File to, int oldLV, HardLink hl)
|
||||
in.close();
|
||||
}
|
||||
} else {
|
||||
|
||||
//check if we are upgrading from pre-generation stamp version.
|
||||
if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
|
||||
// Link to the new file name.
|
||||
to = new File(convertMetatadataFileName(to.getAbsolutePath()));
|
||||
}
|
||||
|
||||
HardLink.createHardLink(from, to);
|
||||
hl.linkStats.countSingleLinks++;
|
||||
}
|
||||
@ -687,31 +677,14 @@ static void linkBlocks(File from, File to, int oldLV, HardLink hl)
|
||||
if (!to.mkdirs())
|
||||
throw new IOException("Cannot create directory " + to);
|
||||
|
||||
//If upgrading from old stuff, need to munge the filenames. That has to
|
||||
//be done one file at a time, so hardlink them one at a time (slow).
|
||||
if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
|
||||
String[] blockNames = from.list(new java.io.FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|
||||
|| name.startsWith(BLOCK_FILE_PREFIX)
|
||||
|| name.startsWith(COPY_FILE_PREFIX);
|
||||
}
|
||||
});
|
||||
if (blockNames.length == 0) {
|
||||
hl.linkStats.countEmptyDirs++;
|
||||
}
|
||||
else for(int i = 0; i < blockNames.length; i++)
|
||||
linkBlocks(new File(from, blockNames[i]),
|
||||
new File(to, blockNames[i]), oldLV, hl);
|
||||
}
|
||||
else {
|
||||
//If upgrading from a relatively new version, we only need to create
|
||||
//links with the same filename. This can be done in bulk (much faster).
|
||||
String[] blockNames = from.list(new java.io.FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith(BLOCK_FILE_PREFIX);
|
||||
}
|
||||
});
|
||||
|
||||
// Block files just need hard links with the same file names
|
||||
// but a different directory
|
||||
if (blockNames.length > 0) {
|
||||
HardLink.createHardLinkMult(from, blockNames, to);
|
||||
hl.linkStats.countMultLinks++;
|
||||
@ -720,7 +693,7 @@ public boolean accept(File dir, String name) {
|
||||
hl.linkStats.countEmptyDirs++;
|
||||
}
|
||||
|
||||
//now take care of the rest of the files and subdirectories
|
||||
// Now take care of the rest of the files and subdirectories
|
||||
String[] otherNames = from.list(new java.io.FilenameFilter() {
|
||||
public boolean accept(File dir, String name) {
|
||||
return name.startsWith(BLOCK_SUBDIR_PREFIX)
|
||||
@ -731,7 +704,6 @@ public boolean accept(File dir, String name) {
|
||||
linkBlocks(new File(from, otherNames[i]),
|
||||
new File(to, otherNames[i]), oldLV, hl);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
|
||||
NamespaceInfo nsInfo
|
||||
@ -741,22 +713,6 @@ private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
|
||||
um.initializeUpgrade(nsInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is invoked on target file names when upgrading from pre generation
|
||||
* stamp version (version -13) to correct the metatadata file name.
|
||||
* @param oldFileName
|
||||
* @return the new metadata file name with the default generation stamp.
|
||||
*/
|
||||
private static String convertMetatadataFileName(String oldFileName) {
|
||||
Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName);
|
||||
if (matcher.matches()) {
|
||||
//return the current metadata file name
|
||||
return DatanodeUtil.getMetaFileName(matcher.group(1),
|
||||
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
|
||||
}
|
||||
return oldFileName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add bpStorage into bpStorageMap
|
||||
*/
|
||||
|
@ -168,13 +168,13 @@ public void run() {
|
||||
++opsProcessed;
|
||||
} while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
|
||||
} catch (Throwable t) {
|
||||
LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
|
||||
LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
|
||||
((op == null) ? "unknown" : op.name()) + " operation " +
|
||||
" src: " + remoteAddress +
|
||||
" dest: " + localAddress, t);
|
||||
} finally {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
|
||||
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
|
||||
+ datanode.getXceiverCount());
|
||||
}
|
||||
updateCurrentThreadName("Cleaning up");
|
||||
@ -352,7 +352,7 @@ public void writeBlock(final ExtendedBlock block,
|
||||
if (targets.length > 0) {
|
||||
InetSocketAddress mirrorTarget = null;
|
||||
// Connect to backup machine
|
||||
mirrorNode = targets[0].getName();
|
||||
mirrorNode = targets[0].getXferAddr();
|
||||
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
|
||||
mirrorSock = datanode.newSocket();
|
||||
try {
|
||||
@ -667,8 +667,8 @@ public void replaceBlock(final ExtendedBlock block,
|
||||
|
||||
try {
|
||||
// get the output stream to the proxy
|
||||
InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
|
||||
proxySource.getName());
|
||||
InetSocketAddress proxyAddr =
|
||||
NetUtils.createSocketAddr(proxySource.getXferAddr());
|
||||
proxySock = datanode.newSocket();
|
||||
NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
|
||||
proxySock.setSoTimeout(dnConf.socketTimeout);
|
||||
@ -820,7 +820,7 @@ private void checkAccess(DataOutputStream out, final boolean reply,
|
||||
if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
|
||||
DatanodeRegistration dnR =
|
||||
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
|
||||
resp.setFirstBadLink(dnR.getName());
|
||||
resp.setFirstBadLink(dnR.getXferAddr());
|
||||
}
|
||||
resp.build().writeDelimitedTo(out);
|
||||
out.flush();
|
||||
|
@ -152,11 +152,11 @@ public void run() {
|
||||
// another thread closed our listener socket - that's expected during shutdown,
|
||||
// but not in other circumstances
|
||||
if (datanode.shouldRun) {
|
||||
LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace);
|
||||
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
|
||||
}
|
||||
} catch (IOException ie) {
|
||||
IOUtils.closeSocket(s);
|
||||
LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie);
|
||||
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
|
||||
} catch (OutOfMemoryError ie) {
|
||||
IOUtils.closeSocket(s);
|
||||
// DataNode can run out of memory if there is too many transfers.
|
||||
@ -169,7 +169,7 @@ public void run() {
|
||||
// ignore
|
||||
}
|
||||
} catch (Throwable te) {
|
||||
LOG.error(datanode.getMachineName()
|
||||
LOG.error(datanode.getDisplayName()
|
||||
+ ":DataXceiverServer: Exiting due to: ", te);
|
||||
datanode.shouldRun = false;
|
||||
}
|
||||
@ -177,7 +177,7 @@ public void run() {
|
||||
try {
|
||||
ss.close();
|
||||
} catch (IOException ie) {
|
||||
LOG.warn(datanode.getMachineName()
|
||||
LOG.warn(datanode.getDisplayName()
|
||||
+ " :DataXceiverServer: close exception", ie);
|
||||
}
|
||||
}
|
||||
@ -188,7 +188,7 @@ void kill() {
|
||||
try {
|
||||
this.ss.close();
|
||||
} catch (IOException ie) {
|
||||
LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
|
||||
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
|
||||
}
|
||||
|
||||
// close all the sockets that were accepted earlier
|
||||
|
@ -136,10 +136,8 @@ static void generateDirectoryStructure(JspWriter out,
|
||||
out.print("Empty file");
|
||||
} else {
|
||||
DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
|
||||
String fqdn = canonicalize(chosenNode.getHost());
|
||||
String datanodeAddr = chosenNode.getName();
|
||||
int datanodePort = Integer.parseInt(datanodeAddr.substring(
|
||||
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
|
||||
String fqdn = canonicalize(chosenNode.getIpAddr());
|
||||
int datanodePort = chosenNode.getXferPort();
|
||||
String redirectLocation = "http://" + fqdn + ":"
|
||||
+ chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
|
||||
+ firstBlock.getBlock().getBlockId() + "&blockSize="
|
||||
@ -313,7 +311,7 @@ static void generateFileDetails(JspWriter out,
|
||||
dfs.close();
|
||||
return;
|
||||
}
|
||||
String fqdn = canonicalize(chosenNode.getHost());
|
||||
String fqdn = canonicalize(chosenNode.getIpAddr());
|
||||
String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
|
||||
+ "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
|
||||
+ "&namenodeInfoPort=" + namenodeInfoPort
|
||||
@ -360,10 +358,9 @@ static void generateFileDetails(JspWriter out,
|
||||
out.print("<td>" + blockidstring + ":</td>");
|
||||
DatanodeInfo[] locs = cur.getLocations();
|
||||
for (int j = 0; j < locs.length; j++) {
|
||||
String datanodeAddr = locs[j].getName();
|
||||
datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
|
||||
.indexOf(':') + 1, datanodeAddr.length()));
|
||||
fqdn = canonicalize(locs[j].getHost());
|
||||
String datanodeAddr = locs[j].getXferAddr();
|
||||
datanodePort = locs[j].getXferPort();
|
||||
fqdn = canonicalize(locs[j].getIpAddr());
|
||||
String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
|
||||
+ "/browseBlock.jsp?blockId=" + blockidstring
|
||||
+ "&blockSize=" + blockSize
|
||||
@ -519,10 +516,8 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req,
|
||||
nextStartOffset = 0;
|
||||
nextBlockSize = nextBlock.getBlock().getNumBytes();
|
||||
DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
|
||||
String datanodeAddr = d.getName();
|
||||
nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
|
||||
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
|
||||
nextHost = d.getHost();
|
||||
nextDatanodePort = d.getXferPort();
|
||||
nextHost = d.getIpAddr();
|
||||
nextPort = d.getInfoPort();
|
||||
}
|
||||
}
|
||||
@ -573,10 +568,8 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req,
|
||||
prevStartOffset = 0;
|
||||
prevBlockSize = prevBlock.getBlock().getNumBytes();
|
||||
DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
|
||||
String datanodeAddr = d.getName();
|
||||
prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
|
||||
datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
|
||||
prevHost = d.getHost();
|
||||
prevDatanodePort = d.getXferPort();
|
||||
prevHost = d.getIpAddr();
|
||||
prevPort = d.getInfoPort();
|
||||
}
|
||||
}
|
||||
@ -693,7 +686,8 @@ static void generateFileChunksForTail(JspWriter out, HttpServletRequest req,
|
||||
dfs.close();
|
||||
return;
|
||||
}
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
|
||||
InetSocketAddress addr =
|
||||
NetUtils.createSocketAddr(chosenNode.getXferAddr());
|
||||
// view the last chunkSizeToView bytes while Tailing
|
||||
final long startOffset = blockSize >= chunkSizeToView ? blockSize
|
||||
- chunkSizeToView : 0;
|
||||
|
@ -55,7 +55,7 @@ synchronized void initializeUpgrade(NamespaceInfo nsInfo) throws IOException {
|
||||
if( ! super.initializeUpgrade())
|
||||
return; // distr upgrade is not needed
|
||||
DataNode.LOG.info("\n Distributed upgrade for DataNode "
|
||||
+ dataNode.getMachineName()
|
||||
+ dataNode.getDisplayName()
|
||||
+ " version " + getUpgradeVersion() + " to current LV "
|
||||
+ HdfsConstants.LAYOUT_VERSION + " is initialized.");
|
||||
UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
|
||||
@ -113,7 +113,7 @@ public synchronized boolean startUpgrade() throws IOException {
|
||||
upgradeDaemon = new Daemon(curUO);
|
||||
upgradeDaemon.start();
|
||||
DataNode.LOG.info("\n Distributed upgrade for DataNode "
|
||||
+ dataNode.getMachineName()
|
||||
+ dataNode.getDisplayName()
|
||||
+ " version " + getUpgradeVersion() + " to current LV "
|
||||
+ HdfsConstants.LAYOUT_VERSION + " is started.");
|
||||
return true;
|
||||
@ -128,7 +128,7 @@ synchronized void processUpgradeCommand(UpgradeCommand command
|
||||
if(startUpgrade()) // upgrade started
|
||||
return;
|
||||
throw new IOException(
|
||||
"Distributed upgrade for DataNode " + dataNode.getMachineName()
|
||||
"Distributed upgrade for DataNode " + dataNode.getDisplayName()
|
||||
+ " version " + getUpgradeVersion() + " to current LV "
|
||||
+ HdfsConstants.LAYOUT_VERSION + " cannot be started. "
|
||||
+ "The upgrade object is not defined.");
|
||||
@ -143,7 +143,7 @@ public synchronized void completeUpgrade() throws IOException {
|
||||
currentUpgrades = null;
|
||||
upgradeDaemon = null;
|
||||
DataNode.LOG.info("\n Distributed upgrade for DataNode "
|
||||
+ dataNode.getMachineName()
|
||||
+ dataNode.getDisplayName()
|
||||
+ " version " + getUpgradeVersion() + " to current LV "
|
||||
+ HdfsConstants.LAYOUT_VERSION + " is complete.");
|
||||
}
|
||||
|
@ -62,7 +62,6 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||
import org.apache.hadoop.hdfs.util.Holder;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
@ -231,37 +230,13 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
// get name and replication
|
||||
final short replication = fsNamesys.getBlockManager(
|
||||
).adjustReplication(addCloseOp.replication);
|
||||
PermissionStatus permissions = fsNamesys.getUpgradePermission();
|
||||
if (addCloseOp.permissions != null) {
|
||||
permissions = addCloseOp.permissions;
|
||||
}
|
||||
long blockSize = addCloseOp.blockSize;
|
||||
|
||||
// Versions of HDFS prior to 0.17 may log an OP_ADD transaction
|
||||
// which includes blocks in it. When we update the minimum
|
||||
// upgrade version to something more recent than 0.17, we can
|
||||
// simplify this code by asserting that OP_ADD transactions
|
||||
// don't have any blocks.
|
||||
|
||||
// Older versions of HDFS does not store the block size in inode.
|
||||
// If the file has more than one block, use the size of the
|
||||
// first block as the blocksize. Otherwise use the default
|
||||
// block size.
|
||||
if (-8 <= logVersion && blockSize == 0) {
|
||||
if (addCloseOp.blocks.length > 1) {
|
||||
blockSize = addCloseOp.blocks[0].getNumBytes();
|
||||
} else {
|
||||
long first = ((addCloseOp.blocks.length == 1)?
|
||||
addCloseOp.blocks[0].getNumBytes(): 0);
|
||||
blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
|
||||
}
|
||||
}
|
||||
assert addCloseOp.blocks.length == 0;
|
||||
|
||||
// add to the file tree
|
||||
newFile = (INodeFile)fsDir.unprotectedAddFile(
|
||||
addCloseOp.path, permissions,
|
||||
addCloseOp.path, addCloseOp.permissions,
|
||||
replication, addCloseOp.mtime,
|
||||
addCloseOp.atime, blockSize,
|
||||
addCloseOp.atime, addCloseOp.blockSize,
|
||||
true, addCloseOp.clientName, addCloseOp.clientMachine);
|
||||
fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
|
||||
|
||||
@ -373,12 +348,7 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
}
|
||||
case OP_MKDIR: {
|
||||
MkdirOp mkdirOp = (MkdirOp)op;
|
||||
PermissionStatus permissions = fsNamesys.getUpgradePermission();
|
||||
if (mkdirOp.permissions != null) {
|
||||
permissions = mkdirOp.permissions;
|
||||
}
|
||||
|
||||
fsDir.unprotectedMkdir(mkdirOp.path, permissions,
|
||||
fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
|
||||
mkdirOp.timestamp);
|
||||
break;
|
||||
}
|
||||
@ -493,9 +463,6 @@ private void applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
|
||||
// no data in here currently.
|
||||
break;
|
||||
}
|
||||
case OP_DATANODE_ADD:
|
||||
case OP_DATANODE_REMOVE:
|
||||
break;
|
||||
default:
|
||||
throw new IOException("Invalid operation read " + op.opCode);
|
||||
}
|
||||
|
@ -30,11 +30,8 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
||||
import org.apache.hadoop.util.PureJavaCrc32;
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
|
||||
@ -81,8 +78,6 @@ protected EnumMap<FSEditLogOpCodes, FSEditLogOp> initialValue() {
|
||||
instances.put(OP_DELETE, new DeleteOp());
|
||||
instances.put(OP_MKDIR, new MkdirOp());
|
||||
instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
|
||||
instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
|
||||
instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
|
||||
instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
|
||||
instances.put(OP_SET_OWNER, new SetOwnerOp());
|
||||
instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
|
||||
@ -147,7 +142,6 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin
|
||||
PermissionStatus permissions;
|
||||
String clientName;
|
||||
String clientMachine;
|
||||
//final DatanodeDescriptor[] dataNodeDescriptors; UNUSED
|
||||
|
||||
private AddCloseOp(FSEditLogOpCodes opCode) {
|
||||
super(opCode);
|
||||
@ -226,13 +220,10 @@ void writeFields(DataOutputStream out) throws IOException {
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion)
|
||||
throws IOException {
|
||||
// versions > 0 support per file replication
|
||||
// get name and replication
|
||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
this.length = in.readInt();
|
||||
}
|
||||
if (-7 == logVersion && length != 3||
|
||||
-17 < logVersion && logVersion < -7 && length != 4 ||
|
||||
if ((-17 < logVersion && length != 4) ||
|
||||
(logVersion <= -17 && length != 5 && !LayoutVersion.supports(
|
||||
Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
|
||||
throw new IOException("Incorrect data format." +
|
||||
@ -259,49 +250,26 @@ void readFields(DataInputStream in, int logVersion)
|
||||
} else {
|
||||
this.atime = 0;
|
||||
}
|
||||
if (logVersion < -7) {
|
||||
|
||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
this.blockSize = FSImageSerialization.readLong(in);
|
||||
} else {
|
||||
this.blockSize = readLong(in);
|
||||
}
|
||||
} else {
|
||||
this.blockSize = 0;
|
||||
}
|
||||
|
||||
// get blocks
|
||||
this.blocks = readBlocks(in, logVersion);
|
||||
|
||||
if (logVersion <= -11) {
|
||||
this.permissions = PermissionStatus.read(in);
|
||||
} else {
|
||||
this.permissions = null;
|
||||
}
|
||||
|
||||
// clientname, clientMachine and block locations of last block.
|
||||
if (this.opCode == OP_ADD && logVersion <= -12) {
|
||||
if (this.opCode == OP_ADD) {
|
||||
this.clientName = FSImageSerialization.readString(in);
|
||||
this.clientMachine = FSImageSerialization.readString(in);
|
||||
if (-13 <= logVersion) {
|
||||
readDatanodeDescriptorArray(in);
|
||||
}
|
||||
} else {
|
||||
this.clientName = "";
|
||||
this.clientMachine = "";
|
||||
}
|
||||
}
|
||||
|
||||
/** This method is defined for compatibility reason. */
|
||||
private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in)
|
||||
throws IOException {
|
||||
DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()];
|
||||
for (int i = 0; i < locations.length; i++) {
|
||||
locations[i] = new DatanodeDescriptor();
|
||||
locations[i].readFieldsFromFSEditLog(in);
|
||||
}
|
||||
return locations;
|
||||
}
|
||||
|
||||
private static Block[] readBlocks(
|
||||
DataInputStream in,
|
||||
int logVersion) throws IOException {
|
||||
@ -309,14 +277,7 @@ private static Block[] readBlocks(
|
||||
Block[] blocks = new Block[numBlocks];
|
||||
for (int i = 0; i < numBlocks; i++) {
|
||||
Block blk = new Block();
|
||||
if (logVersion <= -14) {
|
||||
blk.readFields(in);
|
||||
} else {
|
||||
BlockTwo oldblk = new BlockTwo();
|
||||
oldblk.readFields(in);
|
||||
blk.set(oldblk.blkid, oldblk.len,
|
||||
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
|
||||
}
|
||||
blocks[i] = blk;
|
||||
}
|
||||
return blocks;
|
||||
@ -788,17 +749,14 @@ void writeFields(DataOutputStream out) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion)
|
||||
throws IOException {
|
||||
|
||||
void readFields(DataInputStream in, int logVersion) throws IOException {
|
||||
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
this.length = in.readInt();
|
||||
}
|
||||
if (-17 < logVersion && length != 2 ||
|
||||
logVersion <= -17 && length != 3
|
||||
&& !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
throw new IOException("Incorrect data format. "
|
||||
+ "Mkdir operation.");
|
||||
throw new IOException("Incorrect data format. Mkdir operation.");
|
||||
}
|
||||
this.path = FSImageSerialization.readString(in);
|
||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
@ -811,7 +769,6 @@ void readFields(DataInputStream in, int logVersion)
|
||||
// However, currently this is not being updated/used because of
|
||||
// performance reasons.
|
||||
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
|
||||
/* unused this.atime = */
|
||||
if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
|
||||
FSImageSerialization.readLong(in);
|
||||
} else {
|
||||
@ -819,11 +776,7 @@ void readFields(DataInputStream in, int logVersion)
|
||||
}
|
||||
}
|
||||
|
||||
if (logVersion <= -11) {
|
||||
this.permissions = PermissionStatus.read(in);
|
||||
} else {
|
||||
this.permissions = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -888,77 +841,6 @@ public String toString() {
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
static class DatanodeAddOp extends FSEditLogOp {
|
||||
private DatanodeAddOp() {
|
||||
super(OP_DATANODE_ADD);
|
||||
}
|
||||
|
||||
static DatanodeAddOp getInstance() {
|
||||
return (DatanodeAddOp)opInstances.get()
|
||||
.get(OP_DATANODE_ADD);
|
||||
}
|
||||
|
||||
@Override
|
||||
void writeFields(DataOutputStream out) throws IOException {
|
||||
throw new IOException("Deprecated, should not write");
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion)
|
||||
throws IOException {
|
||||
//Datanodes are not persistent any more.
|
||||
FSImageSerialization.DatanodeImage.skipOne(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("DatanodeAddOp [opCode=");
|
||||
builder.append(opCode);
|
||||
builder.append(", txid=");
|
||||
builder.append(txid);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
static class DatanodeRemoveOp extends FSEditLogOp {
|
||||
private DatanodeRemoveOp() {
|
||||
super(OP_DATANODE_REMOVE);
|
||||
}
|
||||
|
||||
static DatanodeRemoveOp getInstance() {
|
||||
return (DatanodeRemoveOp)opInstances.get()
|
||||
.get(OP_DATANODE_REMOVE);
|
||||
}
|
||||
|
||||
@Override
|
||||
void writeFields(DataOutputStream out) throws IOException {
|
||||
throw new IOException("Deprecated, should not write");
|
||||
}
|
||||
|
||||
@Override
|
||||
void readFields(DataInputStream in, int logVersion)
|
||||
throws IOException {
|
||||
DatanodeID nodeID = new DatanodeID();
|
||||
nodeID.readFields(in);
|
||||
//Datanodes are not persistent any more.
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("DatanodeRemoveOp [opCode=");
|
||||
builder.append(opCode);
|
||||
builder.append(", txid=");
|
||||
builder.append(txid);
|
||||
builder.append("]");
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
static class SetPermissionsOp extends FSEditLogOp {
|
||||
String src;
|
||||
FsPermission permissions;
|
||||
|
@ -36,8 +36,8 @@ public enum FSEditLogOpCodes {
|
||||
OP_DELETE ((byte) 2),
|
||||
OP_MKDIR ((byte) 3),
|
||||
OP_SET_REPLICATION ((byte) 4),
|
||||
@Deprecated OP_DATANODE_ADD ((byte) 5),
|
||||
@Deprecated OP_DATANODE_REMOVE((byte) 6),
|
||||
@Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete
|
||||
@Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete
|
||||
OP_SET_PERMISSIONS ((byte) 7),
|
||||
OP_SET_OWNER ((byte) 8),
|
||||
OP_CLOSE ((byte) 9),
|
||||
|
@ -131,34 +131,22 @@ void load(File curFile)
|
||||
|
||||
DataInputStream in = new DataInputStream(fin);
|
||||
try {
|
||||
/*
|
||||
* Note: Remove any checks for version earlier than
|
||||
* Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get
|
||||
* to here with older images.
|
||||
*/
|
||||
|
||||
/*
|
||||
* TODO we need to change format of the image file
|
||||
* it should not contain version and namespace fields
|
||||
*/
|
||||
// read image version: first appeared in version -1
|
||||
int imgVersion = in.readInt();
|
||||
if(getLayoutVersion() != imgVersion)
|
||||
if (getLayoutVersion() != imgVersion) {
|
||||
throw new InconsistentFSStateException(curFile,
|
||||
"imgVersion " + imgVersion +
|
||||
" expected to be " + getLayoutVersion());
|
||||
}
|
||||
|
||||
// read namespaceID: first appeared in version -2
|
||||
in.readInt();
|
||||
|
||||
// read number of files
|
||||
long numFiles = readNumFiles(in);
|
||||
long numFiles = in.readLong();
|
||||
|
||||
// read in the last generation stamp.
|
||||
if (imgVersion <= -12) {
|
||||
long genstamp = in.readLong();
|
||||
namesystem.setGenerationStamp(genstamp);
|
||||
}
|
||||
|
||||
// read the transaction ID of the last edit represented by
|
||||
// this image
|
||||
@ -168,7 +156,6 @@ void load(File curFile)
|
||||
imgTxId = 0;
|
||||
}
|
||||
|
||||
|
||||
// read compression related info
|
||||
FSImageCompression compression;
|
||||
if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
|
||||
@ -189,13 +176,9 @@ void load(File curFile)
|
||||
loadFullNameINodes(numFiles, in);
|
||||
}
|
||||
|
||||
// load datanode info
|
||||
this.loadDatanodes(in);
|
||||
loadFilesUnderConstruction(in);
|
||||
|
||||
// load Files Under Construction
|
||||
this.loadFilesUnderConstruction(in);
|
||||
|
||||
this.loadSecretManagerState(in);
|
||||
loadSecretManagerState(in);
|
||||
|
||||
// make sure to read to the end of file
|
||||
int eof = in.read();
|
||||
@ -335,44 +318,21 @@ private INode loadINode(DataInputStream in)
|
||||
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
|
||||
atime = in.readLong();
|
||||
}
|
||||
if (imgVersion <= -8) {
|
||||
blockSize = in.readLong();
|
||||
}
|
||||
int numBlocks = in.readInt();
|
||||
BlockInfo blocks[] = null;
|
||||
|
||||
// for older versions, a blocklist of size 0
|
||||
// indicates a directory.
|
||||
if ((-9 <= imgVersion && numBlocks > 0) ||
|
||||
(imgVersion < -9 && numBlocks >= 0)) {
|
||||
if (numBlocks >= 0) {
|
||||
blocks = new BlockInfo[numBlocks];
|
||||
for (int j = 0; j < numBlocks; j++) {
|
||||
blocks[j] = new BlockInfo(replication);
|
||||
if (-14 < imgVersion) {
|
||||
blocks[j].set(in.readLong(), in.readLong(),
|
||||
GenerationStamp.GRANDFATHER_GENERATION_STAMP);
|
||||
} else {
|
||||
blocks[j].readFields(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Older versions of HDFS does not store the block size in inode.
|
||||
// If the file has more than one block, use the size of the
|
||||
// first block as the blocksize. Otherwise use the default block size.
|
||||
//
|
||||
if (-8 <= imgVersion && blockSize == 0) {
|
||||
if (numBlocks > 1) {
|
||||
blockSize = blocks[0].getNumBytes();
|
||||
} else {
|
||||
long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
|
||||
blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
|
||||
}
|
||||
}
|
||||
|
||||
// get quota only when the node is a directory
|
||||
long nsQuota = -1L;
|
||||
if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
|
||||
&& blocks == null && numBlocks == -1) {
|
||||
if (blocks == null && numBlocks == -1) {
|
||||
nsQuota = in.readLong();
|
||||
}
|
||||
long dsQuota = -1L;
|
||||
@ -387,37 +347,15 @@ private INode loadINode(DataInputStream in)
|
||||
symlink = Text.readString(in);
|
||||
}
|
||||
|
||||
PermissionStatus permissions = namesystem.getUpgradePermission();
|
||||
if (imgVersion <= -11) {
|
||||
permissions = PermissionStatus.read(in);
|
||||
}
|
||||
PermissionStatus permissions = PermissionStatus.read(in);
|
||||
|
||||
return INode.newINode(permissions, blocks, symlink, replication,
|
||||
modificationTime, atime, nsQuota, dsQuota, blockSize);
|
||||
}
|
||||
|
||||
private void loadDatanodes(DataInputStream in)
|
||||
throws IOException {
|
||||
int imgVersion = getLayoutVersion();
|
||||
|
||||
if (imgVersion > -3) // pre datanode image version
|
||||
return;
|
||||
if (imgVersion <= -12) {
|
||||
return; // new versions do not store the datanodes any more.
|
||||
}
|
||||
int size = in.readInt();
|
||||
for(int i = 0; i < size; i++) {
|
||||
// We don't need to add these descriptors any more.
|
||||
FSImageSerialization.DatanodeImage.skipOne(in);
|
||||
}
|
||||
}
|
||||
|
||||
private void loadFilesUnderConstruction(DataInputStream in)
|
||||
throws IOException {
|
||||
FSDirectory fsDir = namesystem.dir;
|
||||
int imgVersion = getLayoutVersion();
|
||||
if (imgVersion > -13) // pre lease image version
|
||||
return;
|
||||
int size = in.readInt();
|
||||
|
||||
LOG.info("Number of files under construction = " + size);
|
||||
@ -457,17 +395,6 @@ private int getLayoutVersion() {
|
||||
return namesystem.getFSImage().getStorage().getLayoutVersion();
|
||||
}
|
||||
|
||||
private long readNumFiles(DataInputStream in)
|
||||
throws IOException {
|
||||
int imgVersion = getLayoutVersion();
|
||||
|
||||
if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
|
||||
return in.readLong();
|
||||
} else {
|
||||
return in.readInt();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isRoot(byte[][] path) {
|
||||
return path.length == 1 &&
|
||||
path[0] == null;
|
||||
|
@ -17,9 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
@ -31,7 +29,6 @@
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DeprecatedUTF8;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
@ -39,7 +36,6 @@
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.ShortWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
||||
/**
|
||||
@ -107,13 +103,10 @@ static INodeFileUnderConstruction readINodeUnderConstruction(
|
||||
String clientName = readString(in);
|
||||
String clientMachine = readString(in);
|
||||
|
||||
// These locations are not used at all
|
||||
// We previously stored locations for the last block, now we
|
||||
// just record that there are none
|
||||
int numLocs = in.readInt();
|
||||
DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
|
||||
for (i = 0; i < numLocs; i++) {
|
||||
locations[i] = new DatanodeDescriptor();
|
||||
locations[i].readFields(in);
|
||||
}
|
||||
assert numLocs == 0 : "Unexpected block locations";
|
||||
|
||||
return new INodeFileUnderConstruction(name,
|
||||
blockReplication,
|
||||
@ -320,53 +313,4 @@ public static Block[] readCompactBlockArray(
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* DatanodeImage is used to store persistent information
|
||||
* about datanodes into the fsImage.
|
||||
*/
|
||||
static class DatanodeImage implements Writable {
|
||||
DatanodeDescriptor node = new DatanodeDescriptor();
|
||||
|
||||
static void skipOne(DataInput in) throws IOException {
|
||||
DatanodeImage nodeImage = new DatanodeImage();
|
||||
nodeImage.readFields(in);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Writable
|
||||
/////////////////////////////////////////////////
|
||||
/**
|
||||
* Public method that serializes the information about a
|
||||
* Datanode to be stored in the fsImage.
|
||||
*/
|
||||
public void write(DataOutput out) throws IOException {
|
||||
new DatanodeID(node).write(out);
|
||||
out.writeLong(node.getCapacity());
|
||||
out.writeLong(node.getRemaining());
|
||||
out.writeLong(node.getLastUpdate());
|
||||
out.writeInt(node.getXceiverCount());
|
||||
}
|
||||
|
||||
/**
|
||||
* Public method that reads a serialized Datanode
|
||||
* from the fsImage.
|
||||
*/
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
DatanodeID id = new DatanodeID();
|
||||
id.readFields(in);
|
||||
long capacity = in.readLong();
|
||||
long remaining = in.readLong();
|
||||
long lastUpdate = in.readLong();
|
||||
int xceiverCount = in.readInt();
|
||||
|
||||
// update the DatanodeDescriptor with the data we read in
|
||||
node.updateRegInfo(id);
|
||||
node.setStorageID(id.getStorageID());
|
||||
node.setCapacity(capacity);
|
||||
node.setRemaining(remaining);
|
||||
node.setLastUpdate(lastUpdate);
|
||||
node.setXceiverCount(xceiverCount);
|
||||
}
|
||||
}
|
||||
}
|
@ -52,8 +52,6 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
|
||||
@ -118,7 +116,6 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.ha.ServiceFailedException;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
@ -267,7 +264,6 @@ private static final void logAuditEvent(UserGroupInformation ugi,
|
||||
private boolean persistBlocks;
|
||||
private UserGroupInformation fsOwner;
|
||||
private String supergroup;
|
||||
private PermissionStatus defaultPermission;
|
||||
private boolean standbyShouldCheckpoint;
|
||||
|
||||
// Scan interval is not configurable.
|
||||
@ -846,11 +842,6 @@ private void setConfigurationParameters(Configuration conf)
|
||||
"must not be specified if HA is not enabled.");
|
||||
}
|
||||
|
||||
short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
|
||||
DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
|
||||
this.defaultPermission = PermissionStatus.createImmutable(
|
||||
fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
|
||||
|
||||
this.serverDefaults = new FsServerDefaults(
|
||||
conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
|
||||
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
|
||||
@ -878,14 +869,6 @@ private void setConfigurationParameters(Configuration conf)
|
||||
DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the default path permission when upgrading from releases with no
|
||||
* permissions (<=0.15) to releases with permissions (>=0.16)
|
||||
*/
|
||||
protected PermissionStatus getUpgradePermission() {
|
||||
return defaultPermission;
|
||||
}
|
||||
|
||||
NamespaceInfo getNamespaceInfo() {
|
||||
readLock();
|
||||
try {
|
||||
@ -5072,6 +5055,8 @@ public String getLiveNodes() {
|
||||
innerinfo.put("lastContact", getLastContact(node));
|
||||
innerinfo.put("usedSpace", getDfsUsed(node));
|
||||
innerinfo.put("adminState", node.getAdminState().toString());
|
||||
innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
|
||||
innerinfo.put("capacity", node.getCapacity());
|
||||
info.put(node.getHostName(), innerinfo);
|
||||
}
|
||||
return JSON.toString(info);
|
||||
|
@ -59,7 +59,7 @@ private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
|
||||
HttpServletRequest request, NameNode nn)
|
||||
throws IOException {
|
||||
final String hostname = host instanceof DatanodeInfo
|
||||
? ((DatanodeInfo)host).getHostName() : host.getHost();
|
||||
? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
|
||||
final String scheme = request.getScheme();
|
||||
final int port = "https".equals(scheme)
|
||||
? (Integer)getServletContext().getAttribute("datanode.https.port")
|
||||
|
@ -59,7 +59,7 @@ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus st
|
||||
if (host instanceof DatanodeInfo) {
|
||||
hostname = ((DatanodeInfo)host).getHostName();
|
||||
} else {
|
||||
hostname = host.getHost();
|
||||
hostname = host.getIpAddr();
|
||||
}
|
||||
final int port = "https".equals(scheme)
|
||||
? (Integer)getServletContext().getAttribute("datanode.https.port")
|
||||
|
@ -854,7 +854,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
|
||||
BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
|
||||
if(stateChangeLog.isDebugEnabled()) {
|
||||
stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
|
||||
+ "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
|
||||
+ "from " + nodeReg + " " + blist.getNumberOfBlocks()
|
||||
+ " blocks");
|
||||
}
|
||||
|
||||
@ -870,7 +870,7 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
||||
verifyRequest(nodeReg);
|
||||
if(stateChangeLog.isDebugEnabled()) {
|
||||
stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
|
||||
+"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
|
||||
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
|
||||
+" blocks.");
|
||||
}
|
||||
namesystem.getBlockManager().processIncrementalBlockReport(
|
||||
@ -880,7 +880,8 @@ public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
||||
@Override // DatanodeProtocol
|
||||
public void errorReport(DatanodeRegistration nodeReg,
|
||||
int errorCode, String msg) throws IOException {
|
||||
String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
|
||||
String dnName =
|
||||
(nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
|
||||
|
||||
if (errorCode == DatanodeProtocol.NOTIFY) {
|
||||
LOG.info("Error report from " + dnName + ": " + msg);
|
||||
@ -909,13 +910,10 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify request.
|
||||
* Verifies the given registration.
|
||||
*
|
||||
* Verifies correctness of the datanode version, registration ID, and
|
||||
* if the datanode does not need to be shutdown.
|
||||
*
|
||||
* @param nodeReg data node registration
|
||||
* @throws IOException
|
||||
* @param nodeReg node registration
|
||||
* @throws UnregisteredNodeException if the registration is invalid
|
||||
*/
|
||||
void verifyRequest(NodeRegistration nodeReg) throws IOException {
|
||||
verifyVersion(nodeReg.getVersion());
|
||||
|
@ -496,7 +496,7 @@ private void copyBlock(DFSClient dfs, LocatedBlock lblock,
|
||||
|
||||
try {
|
||||
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
|
||||
targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
|
||||
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
|
||||
} catch (IOException ie) {
|
||||
if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
|
||||
throw new IOException("Could not obtain block " + lblock);
|
||||
|
@ -260,14 +260,14 @@ void generateHealthReport(JspWriter out, NameNode nn,
|
||||
// Find out common suffix. Should this be before or after the sort?
|
||||
String port_suffix = null;
|
||||
if (live.size() > 0) {
|
||||
String name = live.get(0).getName();
|
||||
String name = live.get(0).getXferAddr();
|
||||
int idx = name.indexOf(':');
|
||||
if (idx > 0) {
|
||||
port_suffix = name.substring(idx);
|
||||
}
|
||||
|
||||
for (int i = 1; port_suffix != null && i < live.size(); i++) {
|
||||
if (live.get(i).getName().endsWith(port_suffix) == false) {
|
||||
if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
|
||||
port_suffix = null;
|
||||
break;
|
||||
}
|
||||
@ -404,7 +404,7 @@ static void redirectToRandomDataNode(ServletContext context,
|
||||
final String nodeToRedirect;
|
||||
int redirectPort;
|
||||
if (datanode != null) {
|
||||
nodeToRedirect = datanode.getHost();
|
||||
nodeToRedirect = datanode.getIpAddr();
|
||||
redirectPort = datanode.getInfoPort();
|
||||
} else {
|
||||
nodeToRedirect = nn.getHttpAddress().getHostName();
|
||||
@ -466,14 +466,14 @@ private void generateNodeDataHeader(JspWriter out, DatanodeDescriptor d,
|
||||
+ URLEncoder.encode("/", "UTF-8")
|
||||
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
|
||||
|
||||
String name = d.getHostName() + ":" + d.getPort();
|
||||
String name = d.getXferAddrWithHostname();
|
||||
if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
|
||||
name = name.replaceAll("\\.[^.:]*", "");
|
||||
int idx = (suffix != null && name.endsWith(suffix)) ? name
|
||||
.indexOf(suffix) : -1;
|
||||
|
||||
out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":"
|
||||
+ d.getPort() + "\" href=\"" + url + "\">"
|
||||
out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getXferAddr()
|
||||
+ "\" href=\"" + url + "\">"
|
||||
+ ((idx > 0) ? name.substring(0, idx) : name) + "</a>"
|
||||
+ ((alive) ? "" : "\n"));
|
||||
}
|
||||
@ -599,14 +599,14 @@ void generateNodesList(ServletContext context, JspWriter out,
|
||||
// Find out common suffix. Should this be before or after the sort?
|
||||
String port_suffix = null;
|
||||
if (live.size() > 0) {
|
||||
String name = live.get(0).getName();
|
||||
String name = live.get(0).getXferAddr();
|
||||
int idx = name.indexOf(':');
|
||||
if (idx > 0) {
|
||||
port_suffix = name.substring(idx);
|
||||
}
|
||||
|
||||
for (int i = 1; port_suffix != null && i < live.size(); i++) {
|
||||
if (live.get(i).getName().endsWith(port_suffix) == false) {
|
||||
if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
|
||||
port_suffix = null;
|
||||
break;
|
||||
}
|
||||
|
@ -80,9 +80,8 @@ public interface DatanodeProtocol {
|
||||
*
|
||||
* @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
|
||||
* @param registration datanode registration information
|
||||
* @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
|
||||
* new storageID if the datanode did not have one and
|
||||
* registration ID for further communication.
|
||||
* @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with
|
||||
* updated registration information
|
||||
*/
|
||||
public DatanodeRegistration registerDatanode(DatanodeRegistration registration
|
||||
) throws IOException;
|
||||
|
@ -49,8 +49,8 @@ public class DatanodeRegistration extends DatanodeID
|
||||
});
|
||||
}
|
||||
|
||||
public StorageInfo storageInfo;
|
||||
public ExportedBlockKeys exportedKeys;
|
||||
private StorageInfo storageInfo;
|
||||
private ExportedBlockKeys exportedKeys;
|
||||
|
||||
/**
|
||||
* Default constructor.
|
||||
@ -62,8 +62,8 @@ public DatanodeRegistration() {
|
||||
/**
|
||||
* Create DatanodeRegistration
|
||||
*/
|
||||
public DatanodeRegistration(String nodeName) {
|
||||
this(nodeName, new StorageInfo(), new ExportedBlockKeys());
|
||||
public DatanodeRegistration(String ipAddr) {
|
||||
this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
|
||||
}
|
||||
|
||||
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
||||
@ -73,9 +73,9 @@ public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
||||
this.exportedKeys = keys;
|
||||
}
|
||||
|
||||
public DatanodeRegistration(String nodeName, StorageInfo info,
|
||||
public DatanodeRegistration(String ipAddr, StorageInfo info,
|
||||
ExportedBlockKeys keys) {
|
||||
super(nodeName);
|
||||
super(ipAddr);
|
||||
this.storageInfo = info;
|
||||
this.exportedKeys = keys;
|
||||
}
|
||||
@ -84,6 +84,18 @@ public void setStorageInfo(StorageInfo storage) {
|
||||
this.storageInfo = new StorageInfo(storage);
|
||||
}
|
||||
|
||||
public StorageInfo getStorageInfo() {
|
||||
return storageInfo;
|
||||
}
|
||||
|
||||
public void setExportedKeys(ExportedBlockKeys keys) {
|
||||
this.exportedKeys = keys;
|
||||
}
|
||||
|
||||
public ExportedBlockKeys getExportedKeys() {
|
||||
return exportedKeys;
|
||||
}
|
||||
|
||||
@Override // NodeRegistration
|
||||
public int getVersion() {
|
||||
return storageInfo.getLayoutVersion();
|
||||
@ -96,13 +108,13 @@ public String getRegistrationID() {
|
||||
|
||||
@Override // NodeRegistration
|
||||
public String getAddress() {
|
||||
return getName();
|
||||
return getXferAddr();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName()
|
||||
+ "(" + name
|
||||
+ "(" + ipAddr
|
||||
+ ", storageID=" + storageID
|
||||
+ ", infoPort=" + infoPort
|
||||
+ ", ipcPort=" + ipcPort
|
||||
|
@ -38,6 +38,6 @@ public class DisallowedDatanodeException extends IOException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public DisallowedDatanodeException(DatanodeID nodeID) {
|
||||
super("Datanode denied communication with namenode: " + nodeID.getName());
|
||||
super("Datanode denied communication with namenode: " + nodeID);
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@
|
||||
public interface NodeRegistration {
|
||||
/**
|
||||
* Get address of the server node.
|
||||
* @return hostname:portNumber
|
||||
* @return ipAddr:portNumber
|
||||
*/
|
||||
public String getAddress();
|
||||
|
||||
|
@ -280,10 +280,11 @@ private static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
|
||||
}
|
||||
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("name", datanodeinfo.getName());
|
||||
m.put("ipAddr", datanodeinfo.getIpAddr());
|
||||
m.put("hostName", datanodeinfo.getHostName());
|
||||
m.put("storageID", datanodeinfo.getStorageID());
|
||||
m.put("xferPort", datanodeinfo.getXferPort());
|
||||
m.put("infoPort", datanodeinfo.getInfoPort());
|
||||
|
||||
m.put("ipcPort", datanodeinfo.getIpcPort());
|
||||
|
||||
m.put("capacity", datanodeinfo.getCapacity());
|
||||
@ -293,7 +294,6 @@ private static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
|
||||
m.put("lastUpdate", datanodeinfo.getLastUpdate());
|
||||
m.put("xceiverCount", datanodeinfo.getXceiverCount());
|
||||
m.put("networkLocation", datanodeinfo.getNetworkLocation());
|
||||
m.put("hostName", datanodeinfo.getHostName());
|
||||
m.put("adminState", datanodeinfo.getAdminState().name());
|
||||
return m;
|
||||
}
|
||||
@ -306,7 +306,9 @@ private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
|
||||
|
||||
return new DatanodeInfo(
|
||||
(String)m.get("name"),
|
||||
(String)m.get("hostName"),
|
||||
(String)m.get("storageID"),
|
||||
(int)(long)(Long)m.get("xferPort"),
|
||||
(int)(long)(Long)m.get("infoPort"),
|
||||
(int)(long)(Long)m.get("ipcPort"),
|
||||
|
||||
@ -317,7 +319,6 @@ private static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
|
||||
(Long)m.get("lastUpdate"),
|
||||
(int)(long)(Long)m.get("xceiverCount"),
|
||||
(String)m.get("networkLocation"),
|
||||
(String)m.get("hostName"),
|
||||
AdminStates.valueOf((String)m.get("adminState")));
|
||||
}
|
||||
|
||||
|
@ -48,10 +48,12 @@ message BlockTokenIdentifierProto {
|
||||
* Identifies a Datanode
|
||||
*/
|
||||
message DatanodeIDProto {
|
||||
required string name = 1; // hostname:portNumber
|
||||
required string storageID = 2; // Unique storage id
|
||||
required uint32 infoPort = 3; // the port where the infoserver is running
|
||||
required uint32 ipcPort = 4; // the port where the ipc Server is running
|
||||
required string ipAddr = 1; // IP address
|
||||
required string hostName = 2; // hostname
|
||||
required string storageID = 3; // unique storage id
|
||||
required uint32 xferPort = 4; // data streaming port
|
||||
required uint32 infoPort = 5; // info server port
|
||||
required uint32 ipcPort = 6; // ipc server port
|
||||
}
|
||||
|
||||
/**
|
||||
@ -73,7 +75,6 @@ message DatanodeInfoProto {
|
||||
optional uint64 lastUpdate = 6 [default = 0];
|
||||
optional uint32 xceiverCount = 7 [default = 0];
|
||||
optional string location = 8;
|
||||
optional string hostName = 9;
|
||||
enum AdminState {
|
||||
NORMAL = 0;
|
||||
DECOMMISSION_INPROGRESS = 1;
|
||||
|
@ -143,7 +143,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
|
||||
Socket sock = null;
|
||||
ExtendedBlock block = testBlock.getBlock();
|
||||
DatanodeInfo[] nodes = testBlock.getLocations();
|
||||
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
|
||||
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
|
||||
sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
|
||||
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
||||
@ -162,7 +162,7 @@ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToR
|
||||
*/
|
||||
public DataNode getDataNode(LocatedBlock testBlock) {
|
||||
DatanodeInfo[] nodes = testBlock.getLocations();
|
||||
int ipcport = nodes[0].ipcPort;
|
||||
int ipcport = nodes[0].getIpcPort();
|
||||
return cluster.getDataNode(ipcport);
|
||||
}
|
||||
|
||||
|
@ -339,7 +339,7 @@ public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns,
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait up to 20s for the given DN (host:port) to be decommissioned.
|
||||
* Wait up to 20s for the given DN (IP:port) to be decommissioned
|
||||
*/
|
||||
public static void waitForDecommission(FileSystem fs, String name)
|
||||
throws IOException, InterruptedException, TimeoutException {
|
||||
@ -351,7 +351,7 @@ public static void waitForDecommission(FileSystem fs, String name)
|
||||
Thread.sleep(1000);
|
||||
DistributedFileSystem dfs = (DistributedFileSystem)fs;
|
||||
for (DatanodeInfo info : dfs.getDataNodeStats()) {
|
||||
if (name.equals(info.getName())) {
|
||||
if (name.equals(info.getXferAddr())) {
|
||||
dn = info;
|
||||
}
|
||||
}
|
||||
|
@ -1041,9 +1041,9 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
|
||||
// hadoop.security.token.service.use_ip=true
|
||||
//since the HDFS does things based on IP:port, we need to add the mapping
|
||||
//for IP:port to rackId
|
||||
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
|
||||
String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
|
||||
if (racks != null) {
|
||||
int port = dn.getSelfAddr().getPort();
|
||||
int port = dn.getXferAddress().getPort();
|
||||
LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
|
||||
" to rack " + racks[i-curDatanodesNum]);
|
||||
StaticMapping.addNodeToRack(ipAddr + ":" + port,
|
||||
@ -1422,7 +1422,7 @@ public synchronized DataNodeProperties stopDataNode(int i) {
|
||||
DataNodeProperties dnprop = dataNodes.remove(i);
|
||||
DataNode dn = dnprop.datanode;
|
||||
LOG.info("MiniDFSCluster Stopping DataNode " +
|
||||
dn.getMachineName() +
|
||||
dn.getDisplayName() +
|
||||
" from a total of " + (dataNodes.size() + 1) +
|
||||
" datanodes.");
|
||||
dn.shutdown();
|
||||
@ -1433,16 +1433,13 @@ public synchronized DataNodeProperties stopDataNode(int i) {
|
||||
/*
|
||||
* Shutdown a datanode by name.
|
||||
*/
|
||||
public synchronized DataNodeProperties stopDataNode(String name) {
|
||||
public synchronized DataNodeProperties stopDataNode(String dnName) {
|
||||
int i;
|
||||
for (i = 0; i < dataNodes.size(); i++) {
|
||||
DataNode dn = dataNodes.get(i).datanode;
|
||||
// get BP registration
|
||||
DatanodeRegistration dnR =
|
||||
DataNodeTestUtils.getDNRegistrationByMachineName(dn, name);
|
||||
LOG.info("for name=" + name + " found bp=" + dnR +
|
||||
"; with dnMn=" + dn.getMachineName());
|
||||
if(dnR != null) {
|
||||
LOG.info("DN name=" + dnName + " found DN=" + dn +
|
||||
" with name=" + dn.getDisplayName());
|
||||
if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1472,9 +1469,9 @@ public synchronized boolean restartDataNode(DataNodeProperties dnprop,
|
||||
String[] args = dnprop.dnArgs;
|
||||
Configuration newconf = new HdfsConfiguration(conf); // save cloned config
|
||||
if (keepPort) {
|
||||
InetSocketAddress addr = dnprop.datanode.getSelfAddr();
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
|
||||
+ addr.getPort());
|
||||
InetSocketAddress addr = dnprop.datanode.getXferAddress();
|
||||
conf.set(DFS_DATANODE_ADDRESS_KEY,
|
||||
addr.getAddress().getHostAddress() + ":" + addr.getPort());
|
||||
}
|
||||
dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
|
||||
newconf, args));
|
||||
|
@ -220,7 +220,7 @@ private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
|
||||
final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
|
||||
corruptBlock(block, dn);
|
||||
LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
|
||||
+ dninfo.getName());
|
||||
+ dninfo);
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ public void testSocketCache() throws IOException {
|
||||
testFile.toString(), 0, FILE_SIZE)
|
||||
.getLocatedBlocks().get(0);
|
||||
DataNode dn = util.getDataNode(block);
|
||||
InetSocketAddress dnAddr = dn.getSelfAddr();
|
||||
InetSocketAddress dnAddr = dn.getXferAddress();
|
||||
|
||||
// Make some sockets to the DN
|
||||
Socket[] dnSockets = new Socket[CACHE_SIZE];
|
||||
|
@ -50,7 +50,7 @@ public void testDFSAddressConfig() throws IOException {
|
||||
ArrayList<DataNode> dns = cluster.getDataNodes();
|
||||
DataNode dn = dns.get(0);
|
||||
|
||||
String selfSocketAddr = dn.getSelfAddr().toString();
|
||||
String selfSocketAddr = dn.getXferAddress().toString();
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||
|
||||
@ -75,7 +75,7 @@ public void testDFSAddressConfig() throws IOException {
|
||||
dns = cluster.getDataNodes();
|
||||
dn = dns.get(0);
|
||||
|
||||
selfSocketAddr = dn.getSelfAddr().toString();
|
||||
selfSocketAddr = dn.getXferAddress().toString();
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
// assert that default self socket address is 127.0.0.1
|
||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||
@ -100,7 +100,7 @@ public void testDFSAddressConfig() throws IOException {
|
||||
dns = cluster.getDataNodes();
|
||||
dn = dns.get(0);
|
||||
|
||||
selfSocketAddr = dn.getSelfAddr().toString();
|
||||
selfSocketAddr = dn.getXferAddress().toString();
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
// assert that default self socket address is 0.0.0.0
|
||||
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
|
||||
|
@ -334,7 +334,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
|
||||
LocatedBlock badLocatedBlock = new LocatedBlock(
|
||||
goodLocatedBlock.getBlock(),
|
||||
new DatanodeInfo[] {
|
||||
new DatanodeInfo(new DatanodeID("255.255.255.255:234"))
|
||||
new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
|
||||
},
|
||||
goodLocatedBlock.getStartOffset(),
|
||||
false);
|
||||
@ -608,7 +608,7 @@ public void testGetFileChecksum() throws Exception {
|
||||
cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
|
||||
.getLocatedBlocks();
|
||||
final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
|
||||
cluster.stopDataNode(first.getName());
|
||||
cluster.stopDataNode(first.getXferAddr());
|
||||
|
||||
//get checksum again
|
||||
final FileChecksum cs2 = fs.getFileChecksum(p);
|
||||
@ -629,7 +629,7 @@ public void testClientDNProtocolTimeout() throws IOException {
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
DatanodeID fakeDnId = new DatanodeID(
|
||||
"localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
|
||||
"localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
|
||||
|
||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
||||
|
@ -52,7 +52,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
|
||||
.getLog(TestDFSUpgradeFromImage.class);
|
||||
private static File TEST_ROOT_DIR =
|
||||
new File(MiniDFSCluster.getBaseDirectory());
|
||||
private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
|
||||
private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
|
||||
private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
|
||||
|
||||
@ -68,10 +67,6 @@ private static class ReferenceFileInfo {
|
||||
|
||||
boolean printChecksum = false;
|
||||
|
||||
public void unpackStorage() throws IOException {
|
||||
unpackStorage(HADOOP14_IMAGE);
|
||||
}
|
||||
|
||||
private void unpackStorage(String tarFileName)
|
||||
throws IOException {
|
||||
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||
@ -227,14 +222,6 @@ public void testFailOnPreUpgradeImage() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test upgrade from an 0.14 image
|
||||
*/
|
||||
public void testUpgradeFromRel14Image() throws IOException {
|
||||
unpackStorage();
|
||||
upgradeAndVerify();
|
||||
}
|
||||
|
||||
/**
|
||||
* Test upgrade from 0.22 image
|
||||
*/
|
||||
|
@ -128,8 +128,7 @@ private void sendRecvData(String testDescription,
|
||||
|
||||
if (eofExpected) {
|
||||
throw new IOException("Did not recieve IOException when an exception " +
|
||||
"is expected while reading from " +
|
||||
datanode.getName());
|
||||
"is expected while reading from " + datanode);
|
||||
}
|
||||
|
||||
byte[] needed = recvBuf.toByteArray();
|
||||
@ -215,7 +214,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n
|
||||
String poolId = cluster.getNamesystem().getBlockPoolId();
|
||||
datanode = DataNodeTestUtils.getDNRegistrationForBP(
|
||||
cluster.getDataNodes().get(0), poolId);
|
||||
dnAddr = NetUtils.createSocketAddr(datanode.getName());
|
||||
dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
|
||||
FileSystem fileSys = cluster.getFileSystem();
|
||||
|
||||
/* Test writing to finalized replicas */
|
||||
@ -349,7 +348,7 @@ private void testWrite(ExtendedBlock block, BlockConstructionStage stage, long n
|
||||
new InetSocketAddress("localhost", cluster.getNameNodePort()),
|
||||
conf);
|
||||
datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
|
||||
dnAddr = NetUtils.createSocketAddr(datanode.getName());
|
||||
dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
|
||||
FileSystem fileSys = cluster.getFileSystem();
|
||||
|
||||
int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
|
||||
|
@ -269,7 +269,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes,
|
||||
if (corruptReplica(block, i)) {
|
||||
corruptReplicasDNIDs[j++] = i;
|
||||
LOG.info("successfully corrupted block " + block + " on node "
|
||||
+ i + " " + cluster.getDataNodes().get(i).getSelfAddr());
|
||||
+ i + " " + cluster.getDataNodes().get(i).getDisplayName());
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,7 +281,7 @@ private void blockCorruptionRecoveryPolicy(int numDataNodes,
|
||||
for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
|
||||
LOG.info("restarting node with corrupt replica: position "
|
||||
+ i + " node " + corruptReplicasDNIDs[i] + " "
|
||||
+ cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getSelfAddr());
|
||||
+ cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
|
||||
cluster.restartDataNode(corruptReplicasDNIDs[i]);
|
||||
}
|
||||
|
||||
@ -343,7 +343,7 @@ public void testTruncatedBlockReport() throws Exception {
|
||||
if (!changeReplicaLength(block, 0, -1)) {
|
||||
throw new IOException(
|
||||
"failed to find or change length of replica on node 0 "
|
||||
+ cluster.getDataNodes().get(0).getSelfAddr());
|
||||
+ cluster.getDataNodes().get(0).getDisplayName());
|
||||
}
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
|
@ -389,9 +389,8 @@ private void simpleTest(int datanodeToKill) throws IOException {
|
||||
cluster.stopDataNode(victim);
|
||||
} else {
|
||||
int victim = datanodeToKill;
|
||||
System.out.println("SimpleTest stopping datanode " +
|
||||
targets[victim].getName());
|
||||
cluster.stopDataNode(targets[victim].getName());
|
||||
System.out.println("SimpleTest stopping datanode " + targets[victim]);
|
||||
cluster.stopDataNode(targets[victim].getXferAddr());
|
||||
}
|
||||
System.out.println("SimpleTest stopping datanode complete");
|
||||
|
||||
|
@ -151,27 +151,27 @@ private String checkFile(FileSystem fileSys, Path name, int repl,
|
||||
int hasdown = 0;
|
||||
DatanodeInfo[] nodes = blk.getLocations();
|
||||
for (int j = 0; j < nodes.length; j++) { // for each replica
|
||||
if (isNodeDown && nodes[j].getName().equals(downnode)) {
|
||||
if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
|
||||
hasdown++;
|
||||
//Downnode must actually be decommissioned
|
||||
if (!nodes[j].isDecommissioned()) {
|
||||
return "For block " + blk.getBlock() + " replica on " +
|
||||
nodes[j].getName() + " is given as downnode, " +
|
||||
nodes[j] + " is given as downnode, " +
|
||||
"but is not decommissioned";
|
||||
}
|
||||
//Decommissioned node (if any) should only be last node in list.
|
||||
if (j != nodes.length - 1) {
|
||||
return "For block " + blk.getBlock() + " decommissioned node "
|
||||
+ nodes[j].getName() + " was not last node in list: "
|
||||
+ nodes[j] + " was not last node in list: "
|
||||
+ (j + 1) + " of " + nodes.length;
|
||||
}
|
||||
LOG.info("Block " + blk.getBlock() + " replica on " +
|
||||
nodes[j].getName() + " is decommissioned.");
|
||||
nodes[j] + " is decommissioned.");
|
||||
} else {
|
||||
//Non-downnodes must not be decommissioned
|
||||
if (nodes[j].isDecommissioned()) {
|
||||
return "For block " + blk.getBlock() + " replica on " +
|
||||
nodes[j].getName() + " is unexpectedly decommissioned";
|
||||
nodes[j] + " is unexpectedly decommissioned";
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -215,7 +215,7 @@ private DatanodeInfo decommissionNode(int nnIndex,
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
String nodename = info[index].getName();
|
||||
String nodename = info[index].getXferAddr();
|
||||
LOG.info("Decommissioning node: " + nodename);
|
||||
|
||||
// write nodename into the exclude file.
|
||||
@ -236,7 +236,7 @@ private DatanodeInfo decommissionNode(int nnIndex,
|
||||
|
||||
/* stop decommission of the datanode and wait for each to reach the NORMAL state */
|
||||
private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
|
||||
LOG.info("Recommissioning node: " + decommissionedNode.getName());
|
||||
LOG.info("Recommissioning node: " + decommissionedNode);
|
||||
writeConfigFile(excludeFile, null);
|
||||
refreshNodes(cluster.getNamesystem(), conf);
|
||||
waitNodeState(decommissionedNode, AdminStates.NORMAL);
|
||||
@ -373,7 +373,7 @@ private void testDecommission(int numNamenodes, int numDatanodes)
|
||||
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
|
||||
assertEquals("All datanodes must be alive", numDatanodes,
|
||||
client.datanodeReport(DatanodeReportType.LIVE).length);
|
||||
assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
|
||||
assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
|
||||
cleanupFile(fileSys, file1);
|
||||
}
|
||||
}
|
||||
@ -414,7 +414,7 @@ private void testRecommission(int numNamenodes, int numDatanodes)
|
||||
DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
|
||||
assertEquals("All datanodes must be alive", numDatanodes,
|
||||
client.datanodeReport(DatanodeReportType.LIVE).length);
|
||||
assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
|
||||
assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
|
||||
|
||||
// stop decommission and check if the new replicas are removed
|
||||
recomissionNode(decomNode);
|
||||
|
@ -844,7 +844,7 @@ public void testLeaseExpireHardLimit() throws Exception {
|
||||
LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
|
||||
int successcount = 0;
|
||||
for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
|
||||
DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
|
||||
DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
|
||||
ExtendedBlock blk = locatedblock.getBlock();
|
||||
Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
|
||||
blk.getBlockPoolId(), blk.getBlockId());
|
||||
|
@ -147,7 +147,7 @@ private void testDataNodeRedirect(Path path) throws IOException {
|
||||
// if we were redirected to the right DN.
|
||||
BlockLocation[] locations =
|
||||
hdfs.getFileBlockLocations(path, 0, 10);
|
||||
String locationName = locations[0].getNames()[0];
|
||||
String xferAddr = locations[0].getNames()[0];
|
||||
|
||||
// Connect to the NN to get redirected
|
||||
URL u = hftpFs.getNamenodeURL(
|
||||
@ -164,7 +164,7 @@ private void testDataNodeRedirect(Path path) throws IOException {
|
||||
for (DataNode node : cluster.getDataNodes()) {
|
||||
DatanodeRegistration dnR =
|
||||
DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
|
||||
if (dnR.getName().equals(locationName)) {
|
||||
if (dnR.getXferAddr().equals(xferAddr)) {
|
||||
checked = true;
|
||||
assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
@ -58,8 +59,9 @@ public static void setUp() throws Exception {
|
||||
cluster = (new MiniDFSCluster.Builder(conf))
|
||||
.numDataNodes(1).build();
|
||||
nnAddress = cluster.getNameNode().getNameNodeAddress();
|
||||
dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0)
|
||||
.getDatanodeId().getHost(), cluster.getDataNodes().get(0).getIpcPort());
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
|
||||
dn.getIpcPort());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -117,7 +117,7 @@ private void checkFile(FileSystem fileSys, Path name, int repl)
|
||||
isOnSameRack = false;
|
||||
isNotOnSameRack = false;
|
||||
for (int i = 0; i < datanodes.length-1; i++) {
|
||||
LOG.info("datanode "+ i + ": "+ datanodes[i].getName());
|
||||
LOG.info("datanode "+ i + ": "+ datanodes[i]);
|
||||
boolean onRack = false;
|
||||
for( int j=i+1; j<datanodes.length; j++) {
|
||||
if( datanodes[i].getNetworkLocation().equals(
|
||||
|
@ -130,19 +130,19 @@ public void testConvertNamenodeRegistration() {
|
||||
|
||||
@Test
|
||||
public void testConvertDatanodeID() {
|
||||
DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
|
||||
DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
|
||||
DatanodeIDProto dnProto = PBHelper.convert(dn);
|
||||
DatanodeID dn2 = PBHelper.convert(dnProto);
|
||||
compare(dn, dn2);
|
||||
}
|
||||
|
||||
void compare(DatanodeID dn, DatanodeID dn2) {
|
||||
assertEquals(dn.getHost(), dn2.getHost());
|
||||
assertEquals(dn.getIpAddr(), dn2.getIpAddr());
|
||||
assertEquals(dn.getHostName(), dn2.getHostName());
|
||||
assertEquals(dn.getStorageID(), dn2.getStorageID());
|
||||
assertEquals(dn.getXferPort(), dn2.getXferPort());
|
||||
assertEquals(dn.getInfoPort(), dn2.getInfoPort());
|
||||
assertEquals(dn.getIpcPort(), dn2.getIpcPort());
|
||||
assertEquals(dn.getName(), dn2.getName());
|
||||
assertEquals(dn.getPort(), dn2.getPort());
|
||||
assertEquals(dn.getStorageID(), dn2.getStorageID());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -279,8 +279,8 @@ public ExtendedBlock getExtendedBlock(long blkid) {
|
||||
return new ExtendedBlock("bpid", blkid, 100, 2);
|
||||
}
|
||||
|
||||
public DatanodeInfo getDNInfo() {
|
||||
return new DatanodeInfo(new DatanodeID("node", "sid", 1, 2));
|
||||
private DatanodeInfo getDNInfo() {
|
||||
return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
|
||||
}
|
||||
|
||||
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
|
||||
@ -291,7 +291,7 @@ private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
|
||||
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
|
||||
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
|
||||
assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
|
||||
assertEquals(dn1.getHost(), dn2.getHost());
|
||||
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
|
||||
assertEquals(dn1.getHostName(), dn2.getHostName());
|
||||
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
|
||||
assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
|
||||
@ -400,12 +400,12 @@ private void compare(Token<BlockTokenIdentifier> expected,
|
||||
@Test
|
||||
public void testConvertLocatedBlock() {
|
||||
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
|
||||
dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS);
|
||||
dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED);
|
||||
dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", "host1", AdminStates.NORMAL);
|
||||
dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
|
||||
dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", AdminStates.DECOMMISSIONED);
|
||||
dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
|
||||
59, 69, 32, "local", AdminStates.NORMAL);
|
||||
LocatedBlock lb = new LocatedBlock(
|
||||
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
|
||||
LocatedBlockProto lbProto = PBHelper.convert(lb);
|
||||
@ -423,7 +423,7 @@ public void testConvertLocatedBlock() {
|
||||
|
||||
@Test
|
||||
public void testConvertDatanodeRegistration() {
|
||||
DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0);
|
||||
DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
|
||||
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
|
||||
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
|
||||
getBlockKey(1), keys);
|
||||
@ -431,8 +431,8 @@ public void testConvertDatanodeRegistration() {
|
||||
new StorageInfo(), expKeys);
|
||||
DatanodeRegistrationProto proto = PBHelper.convert(reg);
|
||||
DatanodeRegistration reg2 = PBHelper.convert(proto);
|
||||
compare(reg.storageInfo, reg2.storageInfo);
|
||||
compare(reg.exportedKeys, reg2.exportedKeys);
|
||||
compare(reg.getStorageInfo(), reg2.getStorageInfo());
|
||||
compare(reg.getExportedKeys(), reg2.getExportedKeys());
|
||||
compare((DatanodeID)reg, (DatanodeID)reg2);
|
||||
}
|
||||
|
||||
|
@ -279,8 +279,8 @@ public void testBlockTokenRpcLeak() throws Exception {
|
||||
server.start();
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
|
||||
"fake-storage", 0, addr.getPort());
|
||||
DatanodeID fakeDnId = new DatanodeID("localhost",
|
||||
"localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
|
||||
|
||||
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
|
||||
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
|
||||
|
@ -165,7 +165,7 @@ public static void noticeDeadDatanode(NameNode nn, String dnName) {
|
||||
DatanodeDescriptor[] dnds = hbm.getDatanodes();
|
||||
DatanodeDescriptor theDND = null;
|
||||
for (DatanodeDescriptor dnd : dnds) {
|
||||
if (dnd.getName().equals(dnName)) {
|
||||
if (dnd.getXferAddr().equals(dnName)) {
|
||||
theDND = dnd;
|
||||
}
|
||||
}
|
||||
|
@ -48,12 +48,12 @@
|
||||
|
||||
public class TestBlockManager {
|
||||
private final List<DatanodeDescriptor> nodes = ImmutableList.of(
|
||||
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h4:5020"), "/rackB"),
|
||||
new DatanodeDescriptor(new DatanodeID("h5:5020"), "/rackB"),
|
||||
new DatanodeDescriptor(new DatanodeID("h6:5020"), "/rackB")
|
||||
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
|
||||
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
|
||||
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
|
||||
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
|
||||
);
|
||||
private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
|
||||
private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
|
||||
@ -272,7 +272,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
|
||||
|
||||
// the block is still under-replicated. Add a new node. This should allow
|
||||
// the third off-rack replica.
|
||||
DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7:5020"), "/rackC");
|
||||
DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
|
||||
addNodes(ImmutableList.of(rackCNode));
|
||||
try {
|
||||
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
|
||||
|
@ -137,7 +137,7 @@ private static void tryRead(Configuration conf, LocatedBlock lblock,
|
||||
ExtendedBlock block = lblock.getBlock();
|
||||
try {
|
||||
DatanodeInfo[] nodes = lblock.getLocations();
|
||||
targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
|
||||
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
|
||||
s = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||
s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
|
||||
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
||||
|
@ -28,13 +28,13 @@
|
||||
public class TestHost2NodesMap {
|
||||
private Host2NodesMap map = new Host2NodesMap();
|
||||
private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
|
||||
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
|
||||
};
|
||||
private final DatanodeDescriptor NULL_NODE = null;
|
||||
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
|
||||
private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
|
||||
"/d1/r4");
|
||||
|
||||
@Before
|
||||
@ -56,24 +56,11 @@ public void testContains() throws Exception {
|
||||
|
||||
@Test
|
||||
public void testGetDatanodeByHost() throws Exception {
|
||||
assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
|
||||
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
|
||||
DatanodeDescriptor node = map.getDatanodeByHost("h3");
|
||||
assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
|
||||
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
|
||||
DatanodeDescriptor node = map.getDatanodeByHost("ip3");
|
||||
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
|
||||
assertTrue(null==map.getDatanodeByHost("h4"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetDatanodeByName() throws Exception {
|
||||
assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
|
||||
assertTrue(map.getDatanodeByName("h1:5030")==null);
|
||||
assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]);
|
||||
assertTrue(map.getDatanodeByName("h2:5030")==null);
|
||||
assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]);
|
||||
assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]);
|
||||
assertTrue(map.getDatanodeByName("h3:5040")==null);
|
||||
assertTrue(map.getDatanodeByName("h4")==null);
|
||||
assertTrue(map.getDatanodeByName(null)==null);
|
||||
assertTrue(null==map.getDatanodeByHost("ip4"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -81,21 +68,21 @@ public void testRemove() throws Exception {
|
||||
assertFalse(map.remove(NODE));
|
||||
|
||||
assertTrue(map.remove(dataNodes[0]));
|
||||
assertTrue(map.getDatanodeByHost("h1")==null);
|
||||
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
|
||||
DatanodeDescriptor node = map.getDatanodeByHost("h3");
|
||||
assertTrue(map.getDatanodeByHost("ip1")==null);
|
||||
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
|
||||
DatanodeDescriptor node = map.getDatanodeByHost("ip3");
|
||||
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
|
||||
assertTrue(null==map.getDatanodeByHost("h4"));
|
||||
assertTrue(null==map.getDatanodeByHost("ip4"));
|
||||
|
||||
assertTrue(map.remove(dataNodes[2]));
|
||||
assertTrue(map.getDatanodeByHost("h1")==null);
|
||||
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
|
||||
assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]);
|
||||
assertTrue(map.getDatanodeByHost("ip1")==null);
|
||||
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
|
||||
assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
|
||||
|
||||
assertTrue(map.remove(dataNodes[3]));
|
||||
assertTrue(map.getDatanodeByHost("h1")==null);
|
||||
assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
|
||||
assertTrue(map.getDatanodeByHost("h3")==null);
|
||||
assertTrue(map.getDatanodeByHost("ip1")==null);
|
||||
assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
|
||||
assertTrue(map.getDatanodeByHost("ip3")==null);
|
||||
|
||||
assertFalse(map.remove(NULL_NODE));
|
||||
assertTrue(map.remove(dataNodes[1]));
|
||||
|
@ -78,11 +78,11 @@ public void testNodeCount() throws Exception {
|
||||
|
||||
// bring down first datanode
|
||||
DatanodeDescriptor datanode = datanodes[0];
|
||||
DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
|
||||
DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
|
||||
|
||||
// make sure that NN detects that the datanode is down
|
||||
BlockManagerTestUtil.noticeDeadDatanode(
|
||||
cluster.getNameNode(), datanode.getName());
|
||||
cluster.getNameNode(), datanode.getXferAddr());
|
||||
|
||||
// the block will be replicated
|
||||
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
|
||||
@ -112,10 +112,10 @@ public void testNodeCount() throws Exception {
|
||||
assertTrue(nonExcessDN!=null);
|
||||
|
||||
// bring down non excessive datanode
|
||||
dnprop = cluster.stopDataNode(nonExcessDN.getName());
|
||||
dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr());
|
||||
// make sure that NN detects that the datanode is down
|
||||
BlockManagerTestUtil.noticeDeadDatanode(
|
||||
cluster.getNameNode(), nonExcessDN.getName());
|
||||
cluster.getNameNode(), nonExcessDN.getXferAddr());
|
||||
|
||||
// The block should be replicated
|
||||
initializeTimeout(TIMEOUT);
|
||||
|
@ -91,9 +91,9 @@ public void testProcesOverReplicateBlock() throws IOException {
|
||||
synchronized(hm) {
|
||||
// set live datanode's remaining space to be 0
|
||||
// so they will be chosen to be deleted when over-replication occurs
|
||||
String corruptMachineName = corruptDataNode.getName();
|
||||
String corruptMachineName = corruptDataNode.getXferAddr();
|
||||
for (DatanodeDescriptor datanode : hm.getDatanodes()) {
|
||||
if (!corruptMachineName.equals(datanode.getName())) {
|
||||
if (!corruptMachineName.equals(datanode.getXferAddr())) {
|
||||
datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0);
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ public class TestPendingDataNodeMessages {
|
||||
private final Block block2Gs1 = new Block(2, 0, 1);
|
||||
|
||||
private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
|
||||
new DatanodeID("fake"));
|
||||
new DatanodeID("fake", 100));
|
||||
|
||||
@Test
|
||||
public void testQueues() {
|
||||
@ -56,8 +56,8 @@ public void testQueues() {
|
||||
Queue<ReportedBlockInfo> q =
|
||||
msgs.takeBlockQueue(block1Gs2DifferentInstance);
|
||||
assertEquals(
|
||||
"ReportedBlockInfo [block=blk_1_1, dn=fake, reportedState=FINALIZED]," +
|
||||
"ReportedBlockInfo [block=blk_1_2, dn=fake, reportedState=FINALIZED]",
|
||||
"ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
|
||||
"ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
|
||||
Joiner.on(",").join(q));
|
||||
assertEquals(0, msgs.count());
|
||||
|
||||
|
@ -52,16 +52,16 @@ public class TestReplicationPolicy {
|
||||
private static final String filename = "/dummyfile.txt";
|
||||
private static final DatanodeDescriptor dataNodes[] =
|
||||
new DatanodeDescriptor[] {
|
||||
new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"),
|
||||
new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3")
|
||||
new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
|
||||
new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
|
||||
new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
|
||||
new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
|
||||
};
|
||||
|
||||
private final static DatanodeDescriptor NODE =
|
||||
new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4");
|
||||
new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
|
||||
|
||||
static {
|
||||
try {
|
||||
|
@ -1,267 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.common;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TestDistributedUpgrade {
|
||||
private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
|
||||
private Configuration conf;
|
||||
private int testCounter = 0;
|
||||
private MiniDFSCluster cluster = null;
|
||||
private String clusterId = "testClsterId";
|
||||
|
||||
/**
|
||||
* Writes an INFO log message containing the parameters.
|
||||
*/
|
||||
void log(String label, int numDirs) {
|
||||
LOG.info("============================================================");
|
||||
LOG.info("***TEST " + (testCounter++) + "*** "
|
||||
+ label + ":"
|
||||
+ " numDirs="+numDirs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to start a NameNode with the given operation. Starting
|
||||
* the NameNode should throw an exception.
|
||||
*/
|
||||
void startNameNodeShouldFail(StartupOption operation,
|
||||
String exceptionSubstring) {
|
||||
try {
|
||||
//cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
|
||||
// we set manage dirs to true as NN has to start from untar'ed image with
|
||||
// nn dirs set to name1 and name2
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||
.format(false)
|
||||
.clusterId(clusterId)
|
||||
.startupOption(operation)
|
||||
.build(); // should fail
|
||||
throw new AssertionError("NameNode should have failed to start");
|
||||
} catch (Exception expected) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
exceptionSubstring, expected);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to start a DataNode with the given operation. Starting
|
||||
* the DataNode should throw an exception.
|
||||
*/
|
||||
void startDataNodeShouldFail(StartupOption operation) {
|
||||
try {
|
||||
cluster.startDataNodes(conf, 1, false, operation, null); // should fail
|
||||
throw new AssertionError("DataNode should have failed to start");
|
||||
} catch (Exception expected) {
|
||||
// expected
|
||||
assertFalse(cluster.isDataNodeUp());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
@Test(timeout=300000) // 5 min timeout
|
||||
public void testDistributedUpgrade() throws Exception {
|
||||
int numDirs = 1;
|
||||
TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
|
||||
testImg.unpackStorage();
|
||||
int numDNs = testImg.numDataNodes;
|
||||
|
||||
// register new upgrade objects (ignore all existing)
|
||||
UpgradeObjectCollection.initialize();
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
|
||||
UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
|
||||
|
||||
conf = new HdfsConfiguration();
|
||||
if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
|
||||
System.setProperty("test.build.data", "build/test/data");
|
||||
}
|
||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
|
||||
|
||||
log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
|
||||
startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version");
|
||||
|
||||
log("Start NameNode only distributed upgrade", numDirs);
|
||||
// cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
|
||||
// .startupOption(StartupOption.UPGRADE).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||
.format(false)
|
||||
.clusterId(clusterId)
|
||||
.startupOption(StartupOption.UPGRADE)
|
||||
.build();
|
||||
cluster.shutdown();
|
||||
|
||||
log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
|
||||
startNameNodeShouldFail(StartupOption.REGULAR,
|
||||
"Previous distributed upgrade was not completed");
|
||||
|
||||
log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
|
||||
startNameNodeShouldFail(StartupOption.ROLLBACK,
|
||||
"Cannot rollback to storage version -7 using this version");
|
||||
|
||||
log("Normal distributed upgrade for the cluster", numDirs);
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(numDNs)
|
||||
.format(false)
|
||||
.clusterId(clusterId)
|
||||
.startupOption(StartupOption.UPGRADE)
|
||||
.build();
|
||||
DFSAdmin dfsAdmin = new DFSAdmin();
|
||||
dfsAdmin.setConf(conf);
|
||||
dfsAdmin.run(new String[] {"-safemode", "wait"});
|
||||
cluster.shutdown();
|
||||
|
||||
// it should be ok to start in regular mode
|
||||
log("NameCluster regular startup after the upgrade", numDirs);
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(numDNs)
|
||||
.clusterId(clusterId)
|
||||
.format(false)
|
||||
.startupOption(StartupOption.REGULAR)
|
||||
.build();
|
||||
|
||||
cluster.waitActive();
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
new TestDistributedUpgrade().testDistributedUpgrade();
|
||||
LOG.info("=== DONE ===");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrade object for data-node
|
||||
*/
|
||||
class UO_Datanode extends UpgradeObjectDatanode {
|
||||
int version;
|
||||
|
||||
UO_Datanode(int v) {
|
||||
this.status = (short)0;
|
||||
version = v;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public void doUpgrade() throws IOException {
|
||||
this.status = (short)100;
|
||||
DatanodeProtocol nn = getNamenode();
|
||||
nn.processUpgradeCommand(
|
||||
new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
|
||||
getVersion(), getUpgradeStatus()));
|
||||
}
|
||||
|
||||
public UpgradeCommand startUpgrade() throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrade object for name-node
|
||||
*/
|
||||
class UO_Namenode extends UpgradeObjectNamenode {
|
||||
int version;
|
||||
|
||||
UO_Namenode(int v) {
|
||||
status = (short)0;
|
||||
version = v;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
synchronized public UpgradeCommand processUpgradeCommand(
|
||||
UpgradeCommand command) throws IOException {
|
||||
switch(command.getAction()) {
|
||||
case UpgradeCommand.UC_ACTION_REPORT_STATUS:
|
||||
this.status += command.getCurrentStatus()/8; // 4 reports needed
|
||||
break;
|
||||
default:
|
||||
this.status++;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public UpgradeCommand completeUpgrade() throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Datanode1 extends UO_Datanode {
|
||||
UO_Datanode1() {
|
||||
super(LAYOUT_VERSION+1);
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Namenode1 extends UO_Namenode {
|
||||
UO_Namenode1() {
|
||||
super(LAYOUT_VERSION+1);
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Datanode2 extends UO_Datanode {
|
||||
UO_Datanode2() {
|
||||
super(LAYOUT_VERSION+2);
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Namenode2 extends UO_Namenode {
|
||||
UO_Namenode2() {
|
||||
super(LAYOUT_VERSION+2);
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Datanode3 extends UO_Datanode {
|
||||
UO_Datanode3() {
|
||||
super(LAYOUT_VERSION+3);
|
||||
}
|
||||
}
|
||||
|
||||
class UO_Namenode3 extends UO_Namenode {
|
||||
UO_Namenode3() {
|
||||
super(LAYOUT_VERSION+3);
|
||||
}
|
||||
}
|
@ -37,11 +37,6 @@
|
||||
*
|
||||
*/
|
||||
public class DataNodeTestUtils {
|
||||
public static DatanodeRegistration
|
||||
getDNRegistrationByMachineName(DataNode dn, String mName) {
|
||||
return dn.getDNRegistrationByMachineName(mName);
|
||||
}
|
||||
|
||||
public static DatanodeRegistration
|
||||
getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
|
||||
return dn.getDNRegistrationForBP(bpid);
|
||||
|
@ -383,7 +383,7 @@ private SimulatedBPStorage getBPStorage(String bpid) throws IOException {
|
||||
public SimulatedFSDataset(DataNode datanode, DataStorage storage,
|
||||
Configuration conf) {
|
||||
if (storage != null) {
|
||||
storage.createStorageID(datanode.getPort());
|
||||
storage.createStorageID(datanode.getXferPort());
|
||||
this.storageId = storage.getStorageID();
|
||||
} else {
|
||||
this.storageId = "unknownStorageId" + new Random().nextInt();
|
||||
|
@ -197,9 +197,9 @@ private void testSyncReplicas(ReplicaRecoveryInfo replica1,
|
||||
locs, RECOVERY_ID);
|
||||
ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
|
||||
BlockRecord record1 = new BlockRecord(
|
||||
new DatanodeID("xx", "yy", 44, 55), dn1, replica1);
|
||||
new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
|
||||
BlockRecord record2 = new BlockRecord(
|
||||
new DatanodeID("aa", "bb", 11, 22), dn2, replica2);
|
||||
new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
|
||||
syncList.add(record1);
|
||||
syncList.add(record2);
|
||||
|
||||
@ -402,7 +402,7 @@ public void testRWRReplicas() throws IOException {
|
||||
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
|
||||
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
|
||||
DatanodeInfo mockOtherDN = new DatanodeInfo(
|
||||
new DatanodeID("127.0.0.1", "storage-1234", 0, 0));
|
||||
new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
|
||||
DatanodeInfo[] locs = new DatanodeInfo[] {
|
||||
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
|
||||
mockOtherDN };
|
||||
|
@ -162,16 +162,16 @@ public void testBlockReplacement() throws IOException, TimeoutException {
|
||||
|
||||
// start to replace the block
|
||||
// case 1: proxySource does not contain the block
|
||||
LOG.info("Testcase 1: Proxy " + newNode.getName()
|
||||
LOG.info("Testcase 1: Proxy " + newNode
|
||||
+ " does not contain the block " + b);
|
||||
assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
|
||||
// case 2: destination already contains the block
|
||||
LOG.info("Testcase 2: Destination " + proxies.get(1).getName()
|
||||
LOG.info("Testcase 2: Destination " + proxies.get(1)
|
||||
+ " contains the block " + b);
|
||||
assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
|
||||
// case 3: correct case
|
||||
LOG.info("Testcase 3: Source=" + source.getName() + " Proxy=" +
|
||||
proxies.get(0).getName() + " Destination=" + newNode.getName() );
|
||||
LOG.info("Testcase 3: Source=" + source + " Proxy=" +
|
||||
proxies.get(0) + " Destination=" + newNode );
|
||||
assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
|
||||
// after cluster has time to resolve the over-replication,
|
||||
// block locations should contain two proxies and newNode
|
||||
@ -181,7 +181,7 @@ public void testBlockReplacement() throws IOException, TimeoutException {
|
||||
DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
|
||||
// case 4: proxies.get(0) is not a valid del hint
|
||||
// expect either source or newNode replica to be deleted instead
|
||||
LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() );
|
||||
LOG.info("Testcase 4: invalid del hint " + proxies.get(0) );
|
||||
assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
|
||||
// after cluster has time to resolve the over-replication,
|
||||
// block locations should contain two proxies,
|
||||
@ -222,7 +222,7 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName,
|
||||
for (DatanodeInfo node : includeNodes) {
|
||||
if (!nodeLocations.contains(node) ) {
|
||||
notDone=true;
|
||||
LOG.info("Block is not located at " + node.getName() );
|
||||
LOG.info("Block is not located at " + node );
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -231,9 +231,9 @@ private void checkBlocks(DatanodeInfo[] includeNodes, String fileName,
|
||||
String expectedNodesList = "";
|
||||
String currentNodesList = "";
|
||||
for (DatanodeInfo dn : includeNodes)
|
||||
expectedNodesList += dn.getName() + ", ";
|
||||
expectedNodesList += dn + ", ";
|
||||
for (DatanodeInfo dn : nodes)
|
||||
currentNodesList += dn.getName() + ", ";
|
||||
currentNodesList += dn + ", ";
|
||||
LOG.info("Expected replica nodes are: " + expectedNodesList);
|
||||
LOG.info("Current actual replica nodes are: " + currentNodesList);
|
||||
throw new TimeoutException(
|
||||
@ -254,7 +254,7 @@ private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
|
||||
DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
|
||||
Socket sock = new Socket();
|
||||
sock.connect(NetUtils.createSocketAddr(
|
||||
destination.getName()), HdfsServerConstants.READ_TIMEOUT);
|
||||
destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT);
|
||||
sock.setKeepAlive(true);
|
||||
// sendRequest
|
||||
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user