From 988639640024933448f822b48119dc832ebd2af3 Mon Sep 17 00:00:00 2001 From: Tsuyoshi Ozawa Date: Thu, 8 Jan 2015 14:35:08 +0900 Subject: [PATCH] Revert "Replace use of Guava's Stopwatch with Hadoop's StopWatch. (ozawa)" because of missing JIRA's number. This reverts commit 2eba7eb9aff5f7a1bf63ff1ebbe28d21fd37065b. --- .../hadoop-common/CHANGES.txt | 3 - .../apache/hadoop/util/JvmPauseMonitor.java | 6 +- .../org/apache/hadoop/util/StopWatch.java | 108 ------------------ .../hadoop/util/TestChunkedArrayList.java | 11 +- .../apache/hadoop/util/TestDataChecksum.java | 10 +- .../org/apache/hadoop/util/TestStopWatch.java | 62 ---------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 - .../qjournal/client/IPCLoggerChannel.java | 8 +- .../hadoop/hdfs/qjournal/server/Journal.java | 17 +-- .../hadoop/hdfs/TestMultiThreadedHflush.java | 11 +- .../hdfs/qjournal/server/TestJournalNode.java | 9 +- hadoop-mapreduce-project/CHANGES.txt | 3 - .../apache/hadoop/mapred/FileInputFormat.java | 12 +- .../mapreduce/lib/input/FileInputFormat.java | 12 +- .../mapred/nativetask/kvtest/KVJob.java | 8 +- 15 files changed, 49 insertions(+), 234 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java delete mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3d095e7655..23bd42888b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -146,9 +146,6 @@ Trunk (Unreleased) HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results (Masatake Iwasaki via aw) - HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch - (ozawa) - BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java index 1fe7796451..e8af45e746 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,6 +30,7 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; @@ -172,7 +172,7 @@ public String toString() { private class Monitor implements Runnable { @Override public void run() { - StopWatch sw = new StopWatch(); + Stopwatch sw = new Stopwatch(); Map gcTimesBeforeSleep = getGcTimes(); while (shouldRun) { sw.reset().start(); @@ -181,7 +181,7 @@ public void run() { } catch (InterruptedException ie) { return; } - long extraSleepTime = sw.now(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS; + long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS; Map gcTimesAfterSleep = getGcTimes(); if (extraSleepTime > warnThresholdMs) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java deleted file mode 100644 index b9d0d0b664..0000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StopWatch.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -import java.io.Closeable; -import java.util.concurrent.TimeUnit; - -/** - * A simplified StopWatch implementation which can measure times in nanoseconds. - */ -public class StopWatch implements Closeable { - private boolean isStarted; - private long startNanos; - private long currentElapsedNanos; - - public StopWatch() { - } - - /** - * The method is used to find out if the StopWatch is started. - * @return boolean If the StopWatch is started. - */ - public boolean isRunning() { - return isStarted; - } - - /** - * Start to measure times and make the state of stopwatch running. - * @return this instance of StopWatch. - */ - public StopWatch start() { - if (isStarted) { - throw new IllegalStateException("StopWatch is already running"); - } - isStarted = true; - startNanos = System.nanoTime(); - return this; - } - - /** - * Stop elapsed time and make the state of stopwatch stop. - * @return this instance of StopWatch. - */ - public StopWatch stop() { - if (!isStarted) { - throw new IllegalStateException("StopWatch is already stopped"); - } - long now = System.nanoTime(); - isStarted = false; - currentElapsedNanos += now - startNanos; - return this; - } - - /** - * Reset elapsed time to zero and make the state of stopwatch stop. - * @return this instance of StopWatch. - */ - public StopWatch reset() { - currentElapsedNanos = 0; - isStarted = false; - return this; - } - - /** - * @return current elapsed time in specified timeunit. - */ - public long now(TimeUnit timeUnit) { - return timeUnit.convert(now(), TimeUnit.NANOSECONDS); - - } - - /** - * @return current elapsed time in nanosecond. - */ - public long now() { - return isStarted ? - System.nanoTime() - startNanos + currentElapsedNanos : - currentElapsedNanos; - } - - @Override - public String toString() { - return String.valueOf(now()); - } - - @Override - public void close() { - if (isStarted) { - stop(); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java index a007f85c24..f8a2d49c58 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestChunkedArrayList.java @@ -21,11 +21,12 @@ import java.util.ArrayList; import java.util.Iterator; -import java.util.concurrent.TimeUnit; import org.junit.Assert; import org.junit.Test; +import com.google.common.base.Stopwatch; + public class TestChunkedArrayList { @Test @@ -70,24 +71,24 @@ public void testPerformance() { System.gc(); { ArrayList arrayList = new ArrayList(); - StopWatch sw = new StopWatch(); + Stopwatch sw = new Stopwatch(); sw.start(); for (int i = 0; i < numElems; i++) { arrayList.add(obj); } - System.out.println(" ArrayList " + sw.now(TimeUnit.MILLISECONDS)); + System.out.println(" ArrayList " + sw.elapsedMillis()); } // test ChunkedArrayList System.gc(); { ChunkedArrayList chunkedList = new ChunkedArrayList(); - StopWatch sw = new StopWatch(); + Stopwatch sw = new Stopwatch(); sw.start(); for (int i = 0; i < numElems; i++) { chunkedList.add(obj); } - System.out.println("ChunkedArrayList " + sw.now(TimeUnit.MILLISECONDS)); + System.out.println("ChunkedArrayList " + sw.elapsedMillis()); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java index 73fd25a7fa..34fc32aa08 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDataChecksum.java @@ -21,6 +21,8 @@ import java.util.Random; import java.util.concurrent.TimeUnit; +import com.google.common.base.Stopwatch; + import org.apache.hadoop.fs.ChecksumException; import org.junit.Test; @@ -145,19 +147,19 @@ public void commonUsagePerfTest() throws Exception { Harness h = new Harness(checksum, dataLength, true); for (int i = 0; i < NUM_RUNS; i++) { - StopWatch s = new StopWatch().start(); + Stopwatch s = new Stopwatch().start(); // calculate real checksum, make sure it passes checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf); s.stop(); System.err.println("Calculate run #" + i + ": " + - s.now(TimeUnit.MICROSECONDS) + "us"); + s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); - s = new StopWatch().start(); + s = new Stopwatch().start(); // calculate real checksum, make sure it passes checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0); s.stop(); System.err.println("Verify run #" + i + ": " + - s.now(TimeUnit.MICROSECONDS) + "us"); + s.elapsedTime(TimeUnit.MICROSECONDS) + "us"); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java deleted file mode 100644 index 6f577b08d2..0000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStopWatch.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.util; - -import org.junit.Assert; -import org.junit.Test; - -public class TestStopWatch { - - @Test - public void testStartAndStop() throws Exception { - try (StopWatch sw = new StopWatch()) { - Assert.assertFalse(sw.isRunning()); - sw.start(); - Assert.assertTrue(sw.isRunning()); - sw.stop(); - Assert.assertFalse(sw.isRunning()); - } - } - - @Test - public void testStopInTryWithResource() throws Exception { - try (StopWatch sw = new StopWatch()) { - // make sure that no exception is thrown. - } - } - - @Test - public void testExceptions() throws Exception { - StopWatch sw = new StopWatch(); - try { - sw.stop(); - } catch (Exception e) { - Assert.assertTrue("IllegalStateException is expected", - e instanceof IllegalStateException); - } - sw.reset(); - sw.start(); - try { - sw.start(); - } catch (Exception e) { - Assert.assertTrue("IllegalStateException is expected", - e instanceof IllegalStateException); - } - } - -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9f48c7686b..a8966df15c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -479,9 +479,6 @@ Release 2.7.0 - UNRELEASED HDFS-7484. Make FSDirectory#addINode take existing INodes as its parameter. (jing9) - HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch - (ozawa) - OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 6938f571e5..e37869c797 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -52,10 +52,10 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; import com.google.common.net.InetAddresses; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; @@ -143,7 +143,7 @@ public class IPCLoggerChannel implements AsyncLogger { /** * Stopwatch which starts counting on each heartbeat that is sent */ - private final StopWatch lastHeartbeatStopwatch = new StopWatch(); + private final Stopwatch lastHeartbeatStopwatch = new Stopwatch(); private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; @@ -463,8 +463,8 @@ private void throwIfOutOfSync() * written. */ private void heartbeatIfNecessary() throws IOException { - if (lastHeartbeatStopwatch.now(TimeUnit.MILLISECONDS) - > HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) { + if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS || + !lastHeartbeatStopwatch.isRunning()) { try { getProxy().heartbeat(createReqInfo()); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 7cac5c9bc7..9be56b822d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -65,11 +65,11 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; +import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Range; import com.google.common.collect.Ranges; import com.google.protobuf.TextFormat; -import org.apache.hadoop.util.StopWatch; /** * A JournalNode can manage journals for several clusters at once. @@ -374,20 +374,15 @@ synchronized void journal(RequestInfo reqInfo, curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); - StopWatch sw = new StopWatch(); + Stopwatch sw = new Stopwatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); - - long nanoSeconds = sw.now(); - metrics.addSync( - TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); - long milliSeconds = TimeUnit.MILLISECONDS.convert( - nanoSeconds, TimeUnit.NANOSECONDS); - - if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { + + metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS)); + if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + - " took " + milliSeconds + "ms"); + " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms"); } if (isLagging) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java index a839d85823..92c7672f04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java @@ -32,11 +32,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.metrics2.util.Quantile; import org.apache.hadoop.metrics2.util.SampleQuantiles; -import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; +import com.google.common.base.Stopwatch; + /** * This class tests hflushing concurrently from many threads. */ @@ -99,10 +100,10 @@ public void run() { } private void doAWrite() throws IOException { - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); stm.write(toWrite); stm.hflush(); - long micros = sw.now(TimeUnit.MICROSECONDS); + long micros = sw.elapsedTime(TimeUnit.MICROSECONDS); quantiles.insert(micros); } } @@ -275,12 +276,12 @@ public int run(String args[]) throws Exception { int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites, replication); sw.stop(); - System.out.println("Finished in " + sw.now(TimeUnit.MILLISECONDS) + "ms"); + System.out.println("Finished in " + sw.elapsedMillis() + "ms"); System.out.println("Latency quantiles (in microseconds):\n" + test.quantiles); return 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 5b5c450be3..1bf3add16d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -27,7 +27,6 @@ import java.net.HttpURLConnection; import java.net.URL; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -39,6 +38,8 @@ import org.apache.hadoop.hdfs.qjournal.client.IPCLoggerChannel; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; +import org.apache.hadoop.hdfs.qjournal.server.Journal; +import org.apache.hadoop.hdfs.qjournal.server.JournalNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -47,12 +48,12 @@ import org.apache.hadoop.test.MetricsAsserts; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.StopWatch; import org.junit.After; import org.junit.Before; import org.junit.Test; import com.google.common.base.Charsets; +import com.google.common.base.Stopwatch; import com.google.common.primitives.Bytes; import com.google.common.primitives.Ints; @@ -319,11 +320,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception { ch.setEpoch(1); ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); for (int i = 1; i < numEdits; i++) { ch.sendEdits(1L, i, 1, data).get(); } - long time = sw.now(TimeUnit.MILLISECONDS); + long time = sw.elapsedMillis(); System.err.println("Wrote " + numEdits + " batches of " + editsSize + " bytes in " + time + "ms"); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 976b2e7c22..3bf34d81ec 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -92,9 +92,6 @@ Trunk (Unreleased) MAPREDUCE-6013. [post-HADOOP-9902] mapred version is missing (Akira AJISAKA via aw) - HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch - (ozawa) - BUG FIXES MAPREDUCE-6191. Improve clearing stale state of Java serialization diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java index 5e45b49e9e..0ae56717ab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -46,9 +45,9 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Stopwatch; import com.google.common.collect.Iterables; /** @@ -224,7 +223,7 @@ protected FileStatus[] listStatus(JobConf job) throws IOException { org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS, org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS); - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); if (numThreads == 1) { List locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive); result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]); @@ -243,8 +242,7 @@ protected FileStatus[] listStatus(JobConf job) throws IOException { sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " - + sw.now(TimeUnit.MILLISECONDS)); + LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); } LOG.info("Total input paths to process : " + result.length); return result; @@ -311,7 +309,7 @@ protected FileSplit makeSplit(Path file, long start, long length, * they're too big.*/ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); FileStatus[] files = listStatus(job); // Save the number of input files for metrics/loadgen @@ -373,7 +371,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); + + ", TimeTaken: " + sw.elapsedMillis()); } return splits.toArray(new FileSplit[splits.size()]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java index a3ffe019c8..56fb9fcdf1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -44,9 +43,9 @@ import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; /** @@ -260,7 +259,7 @@ protected List listStatus(JobContext job int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS, DEFAULT_LIST_STATUS_NUM_THREADS); - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); if (numThreads == 1) { result = singleThreadedListStatus(job, dirs, inputFilter, recursive); } else { @@ -277,8 +276,7 @@ protected List listStatus(JobContext job sw.stop(); if (LOG.isDebugEnabled()) { - LOG.debug("Time taken to get FileStatuses: " - + sw.now(TimeUnit.MILLISECONDS)); + LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis()); } LOG.info("Total input paths to process : " + result.size()); return result; @@ -378,7 +376,7 @@ protected FileSplit makeSplit(Path file, long start, long length, * @throws IOException */ public List getSplits(JobContext job) throws IOException { - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job)); long maxSize = getMaxSplitSize(job); @@ -429,7 +427,7 @@ public List getSplits(JobContext job) throws IOException { sw.stop(); if (LOG.isDebugEnabled()) { LOG.debug("Total # of splits generated by getSplits: " + splits.size() - + ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS)); + + ", TimeTaken: " + sw.elapsedMillis()); } return splits; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java index 3b4c9c004f..2d4515f636 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/kvtest/KVJob.java @@ -18,9 +18,9 @@ package org.apache.hadoop.mapred.nativetask.kvtest; import java.io.IOException; -import java.util.concurrent.TimeUnit; import java.util.zip.CRC32; +import com.google.common.base.Stopwatch; import com.google.common.primitives.Longs; import org.apache.commons.logging.Log; @@ -36,7 +36,6 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.util.StopWatch; public class KVJob { public static final String INPUTPATH = "nativetask.kvtest.inputfile.path"; @@ -94,10 +93,9 @@ public KVJob(String jobname, Configuration conf, final TestInputFile testfile = new TestInputFile(Integer.valueOf(conf.get( TestConstants.FILESIZE_KEY, "1000")), keyclass.getName(), valueclass.getName(), conf); - StopWatch sw = new StopWatch().start(); + Stopwatch sw = new Stopwatch().start(); testfile.createSequenceTestFile(inputpath); - LOG.info("Created test file " + inputpath + " in " - + sw.now(TimeUnit.MILLISECONDS) + "ms"); + LOG.info("Created test file " + inputpath + " in " + sw.elapsedMillis() + "ms"); } job.setInputFormatClass(SequenceFileInputFormat.class); FileInputFormat.addInputPath(job, new Path(inputpath));