HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency. Contributed by Chris Nauroth.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1586902 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9274626a11
commit
193f11a7ab
@ -387,6 +387,9 @@ Release 2.4.1 - UNRELEASED
|
||||
HDFS-6232. OfflineEditsViewer throws a NPE on edits containing ACL
|
||||
modifications (ajisakaa via cmccabe)
|
||||
|
||||
HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency.
|
||||
(cnauroth)
|
||||
|
||||
Release 2.4.0 - 2014-04-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -17,7 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.util.Time.now;
|
||||
import static org.apache.hadoop.util.Time.monotonicNow;
|
||||
|
||||
import java.net.HttpURLConnection;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
@ -116,11 +116,11 @@ public Void run() throws Exception {
|
||||
throw new IOException(errorMessage);
|
||||
}
|
||||
CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders();
|
||||
long start = now();
|
||||
long start = monotonicNow();
|
||||
serveFile(imageFile);
|
||||
|
||||
if (metrics != null) { // Metrics non-null only when used inside name node
|
||||
long elapsed = now() - start;
|
||||
long elapsed = monotonicNow() - start;
|
||||
metrics.addGetImage(elapsed);
|
||||
}
|
||||
} else if (parsedParams.isGetEdit()) {
|
||||
@ -129,11 +129,11 @@ public Void run() throws Exception {
|
||||
|
||||
File editFile = nnImage.getStorage()
|
||||
.findFinalizedEditsFile(startTxId, endTxId);
|
||||
long start = now();
|
||||
long start = monotonicNow();
|
||||
serveFile(editFile);
|
||||
|
||||
if (metrics != null) { // Metrics non-null only when used inside name node
|
||||
long elapsed = now() - start;
|
||||
long elapsed = monotonicNow() - start;
|
||||
metrics.addGetEdit(elapsed);
|
||||
}
|
||||
}
|
||||
@ -469,7 +469,7 @@ public Void run() throws Exception {
|
||||
|
||||
InputStream stream = request.getInputStream();
|
||||
try {
|
||||
long start = now();
|
||||
long start = monotonicNow();
|
||||
MD5Hash downloadImageDigest = TransferFsImage
|
||||
.handleUploadImageRequest(request, txid,
|
||||
nnImage.getStorage(), stream,
|
||||
@ -478,7 +478,7 @@ public Void run() throws Exception {
|
||||
downloadImageDigest);
|
||||
// Metrics non-null only when used inside name node
|
||||
if (metrics != null) {
|
||||
long elapsed = now() - start;
|
||||
long elapsed = monotonicNow() - start;
|
||||
metrics.addPutImage(elapsed);
|
||||
}
|
||||
// Now that we have a new checkpoint, we might be able to
|
||||
|
@ -1897,7 +1897,12 @@ public void testReformatNNBetweenCheckpoints() throws IOException {
|
||||
.format(true).build();
|
||||
int origPort = cluster.getNameNodePort();
|
||||
int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
|
||||
secondary = startSecondaryNameNode(conf);
|
||||
Configuration snnConf = new Configuration(conf);
|
||||
File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
|
||||
"namesecondary");
|
||||
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
|
||||
checkpointDir.getAbsolutePath());
|
||||
secondary = startSecondaryNameNode(snnConf);
|
||||
|
||||
// secondary checkpoints once
|
||||
secondary.doCheckpoint();
|
||||
|
Loading…
Reference in New Issue
Block a user