Replace use of Guava's Stopwatch with Hadoop's StopWatch. (ozawa)

This commit is contained in:
Tsuyoshi Ozawa 2015-01-08 14:21:09 +09:00
parent ef237bd52f
commit 2eba7eb9af
15 changed files with 234 additions and 49 deletions

View File

@ -146,6 +146,9 @@ Trunk (Unreleased)
HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results
(Masatake Iwasaki via aw)
HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch
(ozawa)
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.

View File

@ -22,6 +22,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -30,7 +31,6 @@
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@ -172,7 +172,7 @@ public String toString() {
private class Monitor implements Runnable {
@Override
public void run() {
Stopwatch sw = new Stopwatch();
StopWatch sw = new StopWatch();
Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
while (shouldRun) {
sw.reset().start();
@ -181,7 +181,7 @@ public void run() {
} catch (InterruptedException ie) {
return;
}
long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS;
long extraSleepTime = sw.now(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
if (extraSleepTime > warnThresholdMs) {

View File

@ -0,0 +1,108 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.Closeable;
import java.util.concurrent.TimeUnit;
/**
* A simplified StopWatch implementation which can measure times in nanoseconds.
*/
public class StopWatch implements Closeable {
private boolean isStarted;
private long startNanos;
private long currentElapsedNanos;
public StopWatch() {
}
/**
* The method is used to find out if the StopWatch is started.
* @return boolean If the StopWatch is started.
*/
public boolean isRunning() {
return isStarted;
}
/**
* Start to measure times and make the state of stopwatch running.
* @return this instance of StopWatch.
*/
public StopWatch start() {
if (isStarted) {
throw new IllegalStateException("StopWatch is already running");
}
isStarted = true;
startNanos = System.nanoTime();
return this;
}
/**
* Stop elapsed time and make the state of stopwatch stop.
* @return this instance of StopWatch.
*/
public StopWatch stop() {
if (!isStarted) {
throw new IllegalStateException("StopWatch is already stopped");
}
long now = System.nanoTime();
isStarted = false;
currentElapsedNanos += now - startNanos;
return this;
}
/**
* Reset elapsed time to zero and make the state of stopwatch stop.
* @return this instance of StopWatch.
*/
public StopWatch reset() {
currentElapsedNanos = 0;
isStarted = false;
return this;
}
/**
* @return current elapsed time in specified timeunit.
*/
public long now(TimeUnit timeUnit) {
return timeUnit.convert(now(), TimeUnit.NANOSECONDS);
}
/**
* @return current elapsed time in nanosecond.
*/
public long now() {
return isStarted ?
System.nanoTime() - startNanos + currentElapsedNanos :
currentElapsedNanos;
}
@Override
public String toString() {
return String.valueOf(now());
}
@Override
public void close() {
if (isStarted) {
stop();
}
}
}

View File

@ -21,12 +21,11 @@
import java.util.ArrayList;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.base.Stopwatch;
public class TestChunkedArrayList {
@Test
@ -71,24 +70,24 @@ public void testPerformance() {
System.gc();
{
ArrayList<String> arrayList = new ArrayList<String>();
Stopwatch sw = new Stopwatch();
StopWatch sw = new StopWatch();
sw.start();
for (int i = 0; i < numElems; i++) {
arrayList.add(obj);
}
System.out.println(" ArrayList " + sw.elapsedMillis());
System.out.println(" ArrayList " + sw.now(TimeUnit.MILLISECONDS));
}
// test ChunkedArrayList
System.gc();
{
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
Stopwatch sw = new Stopwatch();
StopWatch sw = new StopWatch();
sw.start();
for (int i = 0; i < numElems; i++) {
chunkedList.add(obj);
}
System.out.println("ChunkedArrayList " + sw.elapsedMillis());
System.out.println("ChunkedArrayList " + sw.now(TimeUnit.MILLISECONDS));
}
}
}

View File

@ -21,8 +21,6 @@
import java.util.Random;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import org.apache.hadoop.fs.ChecksumException;
import org.junit.Test;
@ -147,19 +145,19 @@ public void commonUsagePerfTest() throws Exception {
Harness h = new Harness(checksum, dataLength, true);
for (int i = 0; i < NUM_RUNS; i++) {
Stopwatch s = new Stopwatch().start();
StopWatch s = new StopWatch().start();
// calculate real checksum, make sure it passes
checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf);
s.stop();
System.err.println("Calculate run #" + i + ": " +
s.elapsedTime(TimeUnit.MICROSECONDS) + "us");
s.now(TimeUnit.MICROSECONDS) + "us");
s = new Stopwatch().start();
s = new StopWatch().start();
// calculate real checksum, make sure it passes
checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0);
s.stop();
System.err.println("Verify run #" + i + ": " +
s.elapsedTime(TimeUnit.MICROSECONDS) + "us");
s.now(TimeUnit.MICROSECONDS) + "us");
}
}

View File

@ -0,0 +1,62 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.junit.Assert;
import org.junit.Test;
public class TestStopWatch {
@Test
public void testStartAndStop() throws Exception {
try (StopWatch sw = new StopWatch()) {
Assert.assertFalse(sw.isRunning());
sw.start();
Assert.assertTrue(sw.isRunning());
sw.stop();
Assert.assertFalse(sw.isRunning());
}
}
@Test
public void testStopInTryWithResource() throws Exception {
try (StopWatch sw = new StopWatch()) {
// make sure that no exception is thrown.
}
}
@Test
public void testExceptions() throws Exception {
StopWatch sw = new StopWatch();
try {
sw.stop();
} catch (Exception e) {
Assert.assertTrue("IllegalStateException is expected",
e instanceof IllegalStateException);
}
sw.reset();
sw.start();
try {
sw.start();
} catch (Exception e) {
Assert.assertTrue("IllegalStateException is expected",
e instanceof IllegalStateException);
}
}
}

View File

@ -479,6 +479,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7484. Make FSDirectory#addINode take existing INodes as its parameter.
(jing9)
HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch
(ozawa)
OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -52,10 +52,10 @@
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.StopWatch;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.common.net.InetAddresses;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
@ -143,7 +143,7 @@ public class IPCLoggerChannel implements AsyncLogger {
/**
* Stopwatch which starts counting on each heartbeat that is sent
*/
private final Stopwatch lastHeartbeatStopwatch = new Stopwatch();
private final StopWatch lastHeartbeatStopwatch = new StopWatch();
private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
@ -463,8 +463,8 @@ private void throwIfOutOfSync()
* written.
*/
private void heartbeatIfNecessary() throws IOException {
if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS ||
!lastHeartbeatStopwatch.isRunning()) {
if (lastHeartbeatStopwatch.now(TimeUnit.MILLISECONDS)
> HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) {
try {
getProxy().heartbeat(createReqInfo());
} finally {

View File

@ -65,11 +65,11 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Range;
import com.google.common.collect.Ranges;
import com.google.protobuf.TextFormat;
import org.apache.hadoop.util.StopWatch;
/**
* A JournalNode can manage journals for several clusters at once.
@ -374,15 +374,20 @@ synchronized void journal(RequestInfo reqInfo,
curSegment.writeRaw(records, 0, records.length);
curSegment.setReadyToFlush();
Stopwatch sw = new Stopwatch();
StopWatch sw = new StopWatch();
sw.start();
curSegment.flush(shouldFsync);
sw.stop();
metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
long nanoSeconds = sw.now();
metrics.addSync(
TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS));
long milliSeconds = TimeUnit.MILLISECONDS.convert(
nanoSeconds, TimeUnit.NANOSECONDS);
if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) {
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
" took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
" took " + milliSeconds + "ms");
}
if (isLagging) {

View File

@ -32,12 +32,11 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.util.Quantile;
import org.apache.hadoop.metrics2.util.SampleQuantiles;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;
import com.google.common.base.Stopwatch;
/**
* This class tests hflushing concurrently from many threads.
*/
@ -100,10 +99,10 @@ public void run() {
}
private void doAWrite() throws IOException {
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
stm.write(toWrite);
stm.hflush();
long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
long micros = sw.now(TimeUnit.MICROSECONDS);
quantiles.insert(micros);
}
}
@ -276,12 +275,12 @@ public int run(String args[]) throws Exception {
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
replication);
sw.stop();
System.out.println("Finished in " + sw.elapsedMillis() + "ms");
System.out.println("Finished in " + sw.now(TimeUnit.MILLISECONDS) + "ms");
System.out.println("Latency quantiles (in microseconds):\n" +
test.quantiles);
return 0;

View File

@ -27,6 +27,7 @@
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
@ -38,8 +39,6 @@
import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.server.Journal;
import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@ -48,12 +47,12 @@
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StopWatch;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Stopwatch;
import com.google.common.primitives.Bytes;
import com.google.common.primitives.Ints;
@ -320,11 +319,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
for (int i = 1; i < numEdits; i++) {
ch.sendEdits(1L, i, 1, data).get();
}
long time = sw.elapsedMillis();
long time = sw.now(TimeUnit.MILLISECONDS);
System.err.println("Wrote " + numEdits + " batches of " + editsSize +
" bytes in " + time + "ms");

View File

@ -92,6 +92,9 @@ Trunk (Unreleased)
MAPREDUCE-6013. [post-HADOOP-9902] mapred version is missing (Akira AJISAKA
via aw)
HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch
(ozawa)
BUG FIXES
MAPREDUCE-6191. Improve clearing stale state of Java serialization

View File

@ -28,6 +28,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -45,9 +46,9 @@
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Iterables;
/**
@ -223,7 +224,7 @@ protected FileStatus[] listStatus(JobConf job) throws IOException {
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
@ -242,7 +243,8 @@ protected FileStatus[] listStatus(JobConf job) throws IOException {
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input paths to process : " + result.length);
return result;
@ -309,7 +311,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
* they're too big.*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
FileStatus[] files = listStatus(job);
// Save the number of input files for metrics/loadgen
@ -371,7 +373,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits)
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.elapsedMillis());
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits.toArray(new FileSplit[splits.size()]);
}

View File

@ -22,6 +22,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -43,9 +44,9 @@
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Lists;
/**
@ -259,7 +260,7 @@ protected List<FileStatus> listStatus(JobContext job
int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
DEFAULT_LIST_STATUS_NUM_THREADS);
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
if (numThreads == 1) {
result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
} else {
@ -276,7 +277,8 @@ protected List<FileStatus> listStatus(JobContext job
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
LOG.debug("Time taken to get FileStatuses: "
+ sw.now(TimeUnit.MILLISECONDS));
}
LOG.info("Total input paths to process : " + result.size());
return result;
@ -376,7 +378,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
* @throws IOException
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
@ -427,7 +429,7 @@ public List<InputSplit> getSplits(JobContext job) throws IOException {
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.elapsedMillis());
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}

View File

@ -18,9 +18,9 @@
package org.apache.hadoop.mapred.nativetask.kvtest;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.zip.CRC32;
import com.google.common.base.Stopwatch;
import com.google.common.primitives.Longs;
import org.apache.commons.logging.Log;
@ -36,6 +36,7 @@
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StopWatch;
public class KVJob {
public static final String INPUTPATH = "nativetask.kvtest.inputfile.path";
@ -93,9 +94,10 @@ public KVJob(String jobname, Configuration conf,
final TestInputFile testfile = new TestInputFile(Integer.valueOf(conf.get(
TestConstants.FILESIZE_KEY, "1000")),
keyclass.getName(), valueclass.getName(), conf);
Stopwatch sw = new Stopwatch().start();
StopWatch sw = new StopWatch().start();
testfile.createSequenceTestFile(inputpath);
LOG.info("Created test file " + inputpath + " in " + sw.elapsedMillis() + "ms");
LOG.info("Created test file " + inputpath + " in "
+ sw.now(TimeUnit.MILLISECONDS) + "ms");
}
job.setInputFormatClass(SequenceFileInputFormat.class);
FileInputFormat.addInputPath(job, new Path(inputpath));