HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter is failing in trunk (Contributed by Surendra Singh Lilhore)

This commit is contained in:
Vinayakumar B 2015-09-15 17:19:59 +05:30
parent 5468baa80a
commit a440567491
7 changed files with 126 additions and 24 deletions

View File

@ -58,6 +58,9 @@ class MetricsConfig extends SubsetConfiguration {
static final String PERIOD_KEY = "period"; static final String PERIOD_KEY = "period";
static final int PERIOD_DEFAULT = 10; // seconds static final int PERIOD_DEFAULT = 10; // seconds
// For testing, this will have the priority.
static final String PERIOD_MILLIS_KEY = "periodMillis";
static final String QUEUE_CAPACITY_KEY = "queue.capacity"; static final String QUEUE_CAPACITY_KEY = "queue.capacity";
static final int QUEUE_CAPACITY_DEFAULT = 1; static final int QUEUE_CAPACITY_DEFAULT = 1;

View File

@ -105,7 +105,7 @@ enum InitMode { NORMAL, STANDBY }
private Map<String, MetricsConfig> sourceConfigs, sinkConfigs; private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
private boolean monitoring = false; private boolean monitoring = false;
private Timer timer; private Timer timer;
private int period; // seconds private long period; // milliseconds
private long logicalTime; // number of timer invocations * period private long logicalTime; // number of timer invocations * period
private ObjectName mbeanName; private ObjectName mbeanName;
private boolean publishSelfMetrics = true; private boolean publishSelfMetrics = true;
@ -262,7 +262,7 @@ void registerSource(String name, String desc, MetricsSource source) {
checkNotNull(config, "config"); checkNotNull(config, "config");
MetricsConfig conf = sourceConfigs.get(name); MetricsConfig conf = sourceConfigs.get(name);
MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc, MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc,
source, injectedTags, period * 1000L, conf != null ? conf source, injectedTags, period, conf != null ? conf
: config.subset(SOURCE_KEY)); : config.subset(SOURCE_KEY));
sources.put(name, sa); sources.put(name, sa);
sa.start(); sa.start();
@ -359,7 +359,7 @@ private synchronized void startTimer() {
return; return;
} }
logicalTime = 0; logicalTime = 0;
long millis = period * 1000L; long millis = period;
timer = new Timer("Timer for '"+ prefix +"' metrics system", true); timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
timer.scheduleAtFixedRate(new TimerTask() { timer.scheduleAtFixedRate(new TimerTask() {
@Override @Override
@ -371,7 +371,7 @@ public void run() {
} }
} }
}, millis, millis); }, millis, millis);
LOG.info("Scheduled snapshot period at "+ period +" second(s)."); LOG.info("Scheduled snapshot period at "+ (period/1000) +" second(s).");
} }
synchronized void onTimerEvent() { synchronized void onTimerEvent() {
@ -485,12 +485,15 @@ private synchronized void configureSystem() {
private synchronized void configureSinks() { private synchronized void configureSinks() {
sinkConfigs = config.getInstanceConfigs(SINK_KEY); sinkConfigs = config.getInstanceConfigs(SINK_KEY);
int confPeriod = 0; long confPeriodMillis = 0;
for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) { for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
MetricsConfig conf = entry.getValue(); MetricsConfig conf = entry.getValue();
int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT); int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
confPeriod = confPeriod == 0 ? sinkPeriod // Support configuring periodMillis for testing.
: ArithmeticUtils.gcd(confPeriod, sinkPeriod); long sinkPeriodMillis =
conf.getLong(PERIOD_MILLIS_KEY, sinkPeriod * 1000);
confPeriodMillis = confPeriodMillis == 0 ? sinkPeriodMillis
: ArithmeticUtils.gcd(confPeriodMillis, sinkPeriodMillis);
String clsName = conf.getClassName(""); String clsName = conf.getClassName("");
if (clsName == null) continue; // sink can be registered later on if (clsName == null) continue; // sink can be registered later on
String sinkName = entry.getKey(); String sinkName = entry.getKey();
@ -503,8 +506,9 @@ private synchronized void configureSinks() {
LOG.warn("Error creating sink '"+ sinkName +"'", e); LOG.warn("Error creating sink '"+ sinkName +"'", e);
} }
} }
period = confPeriod > 0 ? confPeriod long periodSec = config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
: config.getInt(PERIOD_KEY, PERIOD_DEFAULT); period = confPeriodMillis > 0 ? confPeriodMillis
: config.getLong(PERIOD_MILLIS_KEY, periodSec * 1000);
} }
static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink, static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink,
@ -550,7 +554,7 @@ static String getHostname() {
private void registerSystemSource() { private void registerSystemSource() {
MetricsConfig sysConf = sourceConfigs.get(MS_NAME); MetricsConfig sysConf = sourceConfigs.get(MS_NAME);
sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC, sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC,
MetricsAnnotations.makeSource(this), injectedTags, period * 1000L, MetricsAnnotations.makeSource(this), injectedTags, period,
sysConf == null ? config.subset(SOURCE_KEY) : sysConf); sysConf == null ? config.subset(SOURCE_KEY) : sysConf);
sysSource.start(); sysSource.start();
} }

View File

@ -1338,6 +1338,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9069. TestNameNodeMetricsLogger failing -port in use. HDFS-9069. TestNameNodeMetricsLogger failing -port in use.
(stevel) (stevel)
HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter
is failing in trunk (Surendra Singh Lilhore via vinayakumarb)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -68,6 +68,7 @@
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.UnhandledException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -133,6 +134,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.hdfs.tools.JMXGet;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -1856,4 +1858,21 @@ public static void fillExpectedBuf(LocatedBlocks lbs, byte[] expected) {
} }
} }
public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue)
throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
LOG.info("Waiting for " + metricName +
" to reach value " + expectedValue +
", current value = " + currentValue);
return currentValue == expectedValue;
} catch (Exception e) {
throw new UnhandledException("Test failed due to unexpected exception", e);
}
}
}, 1000, Integer.MAX_VALUE);
}
} }

View File

@ -510,20 +510,7 @@ private void printRamDiskJMXMetrics() {
protected void waitForMetric(final String metricName, final int expectedValue) protected void waitForMetric(final String metricName, final int expectedValue)
throws TimeoutException, InterruptedException { throws TimeoutException, InterruptedException {
GenericTestUtils.waitFor(new Supplier<Boolean>() { DFSTestUtil.waitForMetric(jmx, metricName, expectedValue);
@Override
public Boolean get() {
try {
final int currentValue = Integer.parseInt(jmx.getValue(metricName));
LOG.info("Waiting for " + metricName +
" to reach value " + expectedValue +
", current value = " + currentValue);
return currentValue == expectedValue;
} catch (Exception e) {
throw new UnhandledException("Test failed due to unexpected exception", e);
}
}
}, 1000, Integer.MAX_VALUE);
} }
protected void triggerEviction(DataNode dn) { protected void triggerEviction(DataNode dn) {

View File

@ -72,6 +72,7 @@ public void testSynchronousEviction() throws Exception {
// for the previous one. // for the previous one.
Path path2 = new Path("/" + METHOD_NAME + ".02.dat"); Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
makeTestFile(path2, BLOCK_SIZE, true); makeTestFile(path2, BLOCK_SIZE, true);
waitForMetric("RamDiskBlocksEvicted", 1);
verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1); verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1); verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
} }

View File

@ -0,0 +1,85 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# syntax: [prefix].[source|sink].[instance].[options]
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
# default sampling period, in seconds
*.period=10
*.periodMillis=100
# The namenode-metrics.out will contain metrics from all context
#namenode.sink.file.filename=namenode-metrics.out
# Specifying a special sampling period for namenode:
#namenode.sink.*.period=8
#datanode.sink.file.filename=datanode-metrics.out
#resourcemanager.sink.file.filename=resourcemanager-metrics.out
#nodemanager.sink.file.filename=nodemanager-metrics.out
#mrappmaster.sink.file.filename=mrappmaster-metrics.out
#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
# the following example split metrics of different
# context to different sinks (in this case files)
#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
#nodemanager.sink.file_jvm.context=jvm
#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
#nodemanager.sink.file_mapred.context=mapred
#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
#
# Below are for sending metrics to Ganglia
#
# for Ganglia 3.0 support
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
#
# for Ganglia 3.1 support
# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
# *.sink.ganglia.period=10
# default for supportsparse is false
# *.sink.ganglia.supportsparse=true
#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
# Tag values to use for the ganglia prefix. If not defined no tags are used.
# If '*' all tags are used. If Specifying multiple tags separate them with
# commas. Note that the last segment of the property name is the context name.
#
#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
#*.sink.ganglia.tagsForPrefix.dfs=
#*.sink.ganglia.tagsForPrefix.rpc=
#*.sink.ganglia.tagsForPrefix.mapred=
#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649