From 2eba7eb9aff5f7a1bf63ff1ebbe28d21fd37065b Mon Sep 17 00:00:00 2001 From: Tsuyoshi Ozawa Date: Thu, 8 Jan 2015 14:21:09 +0900 Subject: [PATCH] Replace use of Guava's Stopwatch with Hadoop's StopWatch. (ozawa) --- .../hadoop-common/CHANGES.txt | 3 + .../apache/hadoop/util/JvmPauseMonitor.java | 6 +- .../org/apache/hadoop/util/StopWatch.java | 108 ++++++++++++++++++ .../hadoop/util/TestChunkedArrayList.java | 11 +- .../apache/hadoop/util/TestDataChecksum.java | 10 +- .../org/apache/hadoop/util/TestStopWatch.java | 62 ++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../qjournal/client/IPCLoggerChannel.java | 8 +- .../hadoop/hdfs/qjournal/server/Journal.java | 17 ++- .../hadoop/hdfs/TestMultiThreadedHflush.java | 11 +- .../hdfs/qjournal/server/TestJournalNode.java | 9 +- hadoop-mapreduce-project/CHANGES.txt | 3 + .../apache/hadoop/mapred/FileInputFormat.java | 12 +- .../mapreduce/lib/input/FileInputFormat.java | 12 +- .../mapred/nativetask/kvtest/KVJob.java | 8 +- 15 files changed, 234 insertions(+), 49 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 23bd42888b..3d095e7655 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -146,6 +146,9 @@ Trunk (Unreleased) HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results (Masatake Iwasaki via aw) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index e8af45e746..1fe7796451 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -30,7 +31,6 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; @@ -172,7 +172,7 @@ public String toString() { private class Monitor implements Runnable { @Override public void run() { - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); Map gcTimesBeforeSleep = getGcTimes(); while (shouldRun) { sw.reset().start(); @@ -181,7 +181,7 @@ public void run() { } catch (InterruptedException ie) { return; } - long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS; + long extraSleepTime = sw.now(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS; Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > warnThresholdMs) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java new file mode 100644 index 0000000000..b9d0d0b664 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import java.io.Closeable; +import java.util.concurrent.TimeUnit; + +/** + * A simplified StopWatch implementation which can measure times in nanoseconds. + */ +public class StopWatch implements Closeable { + private boolean isStarted; + private long startNanos; + private long currentElapsedNanos; + + public StopWatch() { + } + + /** + * The method is used to find out if the StopWatch is started. + * @return boolean If the StopWatch is started. + */ + public boolean isRunning() { + return isStarted; + } + + /** + * Start to measure times and make the state of stopwatch running. + * @return this instance of StopWatch. + */ + public StopWatch start() { + if (isStarted) { + throw new IllegalStateException("StopWatch is already running"); + } + isStarted = true; + startNanos = System.nanoTime(); + return this; + } + + /** + * Stop elapsed time and make the state of stopwatch stop. + * @return this instance of StopWatch. + */ + public StopWatch stop() { + if (!isStarted) { + throw new IllegalStateException("StopWatch is already stopped"); + } + long now = System.nanoTime(); + isStarted = false; + currentElapsedNanos += now - startNanos; + return this; + } + + /** + * Reset elapsed time to zero and make the state of stopwatch stop. + * @return this instance of StopWatch. + */ + public StopWatch reset() { + currentElapsedNanos = 0; + isStarted = false; + return this; + } + + /** + * @return current elapsed time in specified timeunit. + */ + public long now(TimeUnit timeUnit) { + return timeUnit.convert(now(), TimeUnit.NANOSECONDS); + + } + + /** + * @return current elapsed time in nanosecond. + */ + public long now() { + return isStarted ? + System.nanoTime() - startNanos + currentElapsedNanos : + currentElapsedNanos; + } + + @Override + public String toString() { + return String.valueOf(now()); + } + + @Override + public void close() { + if (isStarted) { + stop(); + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java index f8a2d49c58..a007f85c24 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java @@ -21,12 +21,11 @@ import java.util.ArrayList; import java.util.Iterator; +import java.util.concurrent.TimeUnit; import org.junit.Assert; import org.junit.Test; -import com.google.common.base.Stopwatch; - public class TestChunkedArrayList { @Test @@ -71,24 +70,24 @@ public void testPerformance() { System.gc(); { ArrayList arrayList = new ArrayList(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numElems; i++) { arrayList.add(obj); } - System.out.println(" ArrayList " + sw.elapsedMillis()); + System.out.println(" ArrayList " + sw.now(TimeUnit.MILLISECONDS)); } // test ChunkedArrayList System.gc(); { ChunkedArrayList chunkedList = new ChunkedArrayList(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numElems; i++) { chunkedList.add(obj); } - System.out.println("ChunkedArrayList " + sw.elapsedMillis()); + System.out.println("ChunkedArrayList " + sw.now(TimeUnit.MILLISECONDS)); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java index 34fc32aa08..73fd25a7fa 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java @@ -21,8 +21,6 @@ import java.util.Random; import java.util.concurrent.TimeUnit; -import com.google.common.base.Stopwatch; - import org.apache.hadoop.fs.ChecksumException; import org.junit.Test; @@ -147,19 +145,19 @@ public void commonUsagePerfTest() throws Exception { Harness h = new Harness(checksum, dataLength, true); for (int i = 0; i < NUM_RUNS; i++) { - Stopwatch s = new Stopwatch().start(); + StopWatch s = new StopWatch().start(); // calculate real checksum, make sure it passes checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf); s.stop(); System.err.println("Calculate run #" + i + ": " + - s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); + s.now(TimeUnit.MICROSECONDS) + "us"); - s = new Stopwatch().start(); + s = new StopWatch().start(); // calculate real checksum, make sure it passes checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0); s.stop(); System.err.println("Verify run #" + i + ": " + - s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); + s.now(TimeUnit.MICROSECONDS) + "us"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java new file mode 100644 index 0000000000..6f577b08d2 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import org.junit.Assert; +import org.junit.Test; + +public class TestStopWatch { + + @Test + public void testStartAndStop() throws Exception { + try (StopWatch sw = new StopWatch()) { + Assert.assertFalse(sw.isRunning()); + sw.start(); + Assert.assertTrue(sw.isRunning()); + sw.stop(); + Assert.assertFalse(sw.isRunning()); + } + } + + @Test + public void testStopInTryWithResource() throws Exception { + try (StopWatch sw = new StopWatch()) { + // make sure that no exception is thrown. + } + } + + @Test + public void testExceptions() throws Exception { + StopWatch sw = new StopWatch(); + try { + sw.stop(); + } catch (Exception e) { + Assert.assertTrue("IllegalStateException is expected", + e instanceof IllegalStateException); + } + sw.reset(); + sw.start(); + try { + sw.start(); + } catch (Exception e) { + Assert.assertTrue("IllegalStateException is expected", + e instanceof IllegalStateException); + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a8966df15c..9f48c7686b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -479,6 +479,9 @@ Release 2.7.0 - UNRELEASED HDFS-7484. Make FSDirectory#addINode take existing INodes as its parameter. (jing9) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index e37869c797..6938f571e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -52,10 +52,10 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.net.InetAddresses; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; @@ -143,7 +143,7 @@ public class IPCLoggerChannel implements AsyncLogger { /** * Stopwatch which starts counting on each heartbeat that is sent */ - private final Stopwatch lastHeartbeatStopwatch = new Stopwatch(); + private final StopWatch lastHeartbeatStopwatch = new StopWatch(); private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; @@ -463,8 +463,8 @@ private void throwIfOutOfSync() * written. */ private void heartbeatIfNecessary() throws IOException { - if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS || - !lastHeartbeatStopwatch.isRunning()) { + if (lastHeartbeatStopwatch.now(TimeUnit.MILLISECONDS) + > HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) { try { getProxy().heartbeat(createReqInfo()); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 9be56b822d..7cac5c9bc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -65,11 +65,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; -import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Range; import com.google.common.collect.Ranges; import com.google.protobuf.TextFormat; +import org.apache.hadoop.util.StopWatch; /** * A JournalNode can manage journals for several clusters at once. @@ -374,15 +374,20 @@ synchronized void journal(RequestInfo reqInfo, curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); - Stopwatch sw = new Stopwatch(); + StopWatch sw = new StopWatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); - - metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS)); - if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) { + + long nanoSeconds = sw.now(); + metrics.addSync( + TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); + long milliSeconds = TimeUnit.MILLISECONDS.convert( + nanoSeconds, TimeUnit.NANOSECONDS); + + if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + - " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms"); + " took " + milliSeconds + "ms"); } if (isLagging) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index 92c7672f04..a839d85823 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -32,12 +32,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.metrics2.util.SampleQuantiles; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; -import com.google.common.base.Stopwatch; - /** * This class tests hflushing concurrently from many threads. */ @@ -100,10 +99,10 @@ public void run() { } private void doAWrite() throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); stm.write(toWrite); stm.hflush(); - long micros = sw.elapsedTime(TimeUnit.MICROSECONDS); + long micros = sw.now(TimeUnit.MICROSECONDS); quantiles.insert(micros); } } @@ -276,12 +275,12 @@ public int run(String args[]) throws Exception { int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites, replication); sw.stop(); - System.out.println("Finished in " + sw.elapsedMillis() + "ms"); + System.out.println("Finished in " + sw.now(TimeUnit.MILLISECONDS) + "ms"); System.out.println("Latency quantiles (in microseconds):\n" + test.quantiles); return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 1bf3add16d..5b5c450be3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -27,6 +27,7 @@ import java.net.HttpURLConnection; import java.net.URL; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -38,8 +39,6 @@ import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; -import org.apache.hadoop.hdfs.qjournal.server.Journal; -import org.apache.hadoop.hdfs.qjournal.server.JournalNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -48,12 +47,12 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.util.StopWatch; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.google.common.base.Charsets; -import com.google.common.base.Stopwatch; import com.google.common.primitives.Bytes; import com.google.common.primitives.Ints; @@ -320,11 +319,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception { ch.setEpoch(1); ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); for (int i = 1; i < numEdits; i++) { ch.sendEdits(1L, i, 1, data).get(); } - long time = sw.elapsedMillis(); + long time = sw.now(TimeUnit.MILLISECONDS); System.err.println("Wrote " + numEdits + " batches of " + editsSize + " bytes in " + time + "ms"); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3bf34d81ec..976b2e7c22 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -92,6 +92,9 @@ Trunk (Unreleased) MAPREDUCE-6013. [post-HADOOP-9902] mapred version is missing (Akira AJISAKA via aw) + HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch + (ozawa) + BUG FIXES MAPREDUCE-6191. Improve clearing stale state of Java serialization diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java index 0ae56717ab..5e45b49e9e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,9 +46,9 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Stopwatch; import com.google.common.collect.Iterables; /** @@ -223,7 +224,7 @@ protected FileStatus[] listStatus(JobConf job) throws IOException { org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS, org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); if (numThreads == 1) { List locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive); result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]); @@ -242,7 +243,8 @@ protected FileStatus[] listStatus(JobConf job) throws IOException { sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); + LOG.debug("Time taken to get FileStatuses: " + + sw.now(TimeUnit.MILLISECONDS)); } LOG.info("Total input paths to process : " + result.length); return result; @@ -309,7 +311,7 @@ protected FileSplit makeSplit(Path file, long start, long length, * they're too big.*/ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); FileStatus[] files = listStatus(job); // Save the number of input files for metrics/loadgen @@ -371,7 +373,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.elapsedMillis()); + + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); } return splits.toArray(new FileSplit[splits.size()]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index 56fb9fcdf1..a3ffe019c8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -43,9 +44,9 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; /** @@ -259,7 +260,7 @@ protected List listStatus(JobContext job int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS, DEFAULT_LIST_STATUS_NUM_THREADS); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); if (numThreads == 1) { result = singleThreadedListStatus(job, dirs, inputFilter, recursive); } else { @@ -276,7 +277,8 @@ protected List listStatus(JobContext job sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); + LOG.debug("Time taken to get FileStatuses: " + + sw.now(TimeUnit.MILLISECONDS)); } LOG.info("Total input paths to process : " + result.size()); return result; @@ -376,7 +378,7 @@ protected FileSplit makeSplit(Path file, long start, long length, * @throws IOException */ public List getSplits(JobContext job) throws IOException { - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); @@ -427,7 +429,7 @@ public List getSplits(JobContext job) throws IOException { sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.elapsedMillis()); + + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); } return splits; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java index 2d4515f636..3b4c9c004f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java @@ -18,9 +18,9 @@ package org.apache.hadoop.mapred.nativetask.kvtest; import java.io.IOException; +import java.util.concurrent.TimeUnit; import java.util.zip.CRC32; -import com.google.common.base.Stopwatch; import com.google.common.primitives.Longs; import org.apache.commons.logging.Log; @@ -36,6 +36,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.util.StopWatch; public class KVJob { public static final String INPUTPATH = "nativetask.kvtest.inputfile.path"; @@ -93,9 +94,10 @@ public KVJob(String jobname, Configuration conf, final TestInputFile testfile = new TestInputFile(Integer.valueOf(conf.get( TestConstants.FILESIZE_KEY, "1000")), keyclass.getName(), valueclass.getName(), conf); - Stopwatch sw = new Stopwatch().start(); + StopWatch sw = new StopWatch().start(); testfile.createSequenceTestFile(inputpath); - LOG.info("Created test file " + inputpath + " in " + sw.elapsedMillis() + "ms"); + LOG.info("Created test file " + inputpath + " in " + + sw.now(TimeUnit.MILLISECONDS) + "ms"); } job.setInputFormatClass(SequenceFileInputFormat.class); FileInputFormat.addInputPath(job, new Path(inputpath));