From f6f2a3f1c73266bfedd802eacde60d8b19b81015 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Thu, 11 Dec 2014 15:40:45 -0800 Subject: [PATCH] HDFS-7449. Add metrics to NFS gateway. Contributed by Brandon Li --- .../hadoop/hdfs/nfs/conf/NfsConfigKeys.java | 3 + .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java | 3 +- .../hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java | 220 ++++++++++++++++++ .../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 4 + .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 17 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 49 +++- .../apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java | 4 +- .../hadoop/hdfs/nfs/nfs3/WriteManager.java | 6 +- .../hdfs/nfs/nfs3/TestNfs3HttpServer.java | 4 + hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + 10 files changed, 297 insertions(+), 15 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java index 7566791b06..9e4aaf538f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java @@ -70,4 +70,7 @@ public class NfsConfigKeys { public static final int NFS_HTTPS_PORT_DEFAULT = 50579; public static final String NFS_HTTPS_ADDRESS_KEY = "nfs.https.address"; public static final String NFS_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTPS_PORT_DEFAULT; + + public static final String NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals"; + public static final String NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = ""; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java index 3daf7bb68d..ac9abf8b02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java @@ -42,7 +42,8 @@ public Nfs3(NfsConfiguration conf) throws IOException { public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { - super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf); + super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket, + allowInsecurePorts), conf); mountd = new Mountd(conf, registrationSocket, allowInsecurePorts); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java new file mode 100644 index 0000000000..d36ea732f0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.nfs3; + +import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; +import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.metrics2.source.JvmMetrics; + +/** + * This class is for maintaining the various NFS gateway activity statistics and + * publishing them through the metrics interfaces. + */ +@InterfaceAudience.Private +@Metrics(about = "Nfs3 metrics", context = "dfs") +public class Nfs3Metrics { + // All mutable rates are in nanoseconds + // No metric for nullProcedure; + @Metric MutableRate getattr; + @Metric MutableRate setattr; + @Metric MutableRate lookup; + @Metric MutableRate access; + @Metric MutableRate readlink; + @Metric MutableRate read; + final MutableQuantiles[] readNanosQuantiles; + @Metric MutableRate write; + final MutableQuantiles[] writeNanosQuantiles; + @Metric MutableRate create; + @Metric MutableRate mkdir; + @Metric MutableRate symlink; + @Metric MutableRate mknod; + @Metric MutableRate remove; + @Metric MutableRate rmdir; + @Metric MutableRate rename; + @Metric MutableRate link; + @Metric MutableRate readdir; + @Metric MutableRate readdirplus; + @Metric MutableRate fsstat; + @Metric MutableRate fsinfo; + @Metric MutableRate pathconf; + @Metric MutableRate commit; + final MutableQuantiles[] commitNanosQuantiles; + + @Metric MutableCounterLong bytesWritten; + @Metric MutableCounterLong bytesRead; + + final MetricsRegistry registry = new MetricsRegistry("nfs3"); + final String name; + JvmMetrics jvmMetrics = null; + + public Nfs3Metrics(String name, String sessionId, int[] intervals, + final JvmMetrics jvmMetrics) { + this.name = name; + this.jvmMetrics = jvmMetrics; + registry.tag(SessionId, sessionId); + + final int len = intervals.length; + readNanosQuantiles = new MutableQuantiles[len]; + writeNanosQuantiles = new MutableQuantiles[len]; + commitNanosQuantiles = new MutableQuantiles[len]; + + for (int i = 0; i < len; i++) { + int interval = intervals[i]; + readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos" + + interval + "s", "Read process in ns", "ops", "latency", interval); + writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos" + + interval + "s", " process in ns", "ops", "latency", interval); + commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos" + + interval + "s", "Read process in ns", "ops", "latency", interval); + } + } + + public static Nfs3Metrics create(Configuration conf, String gatewayName) { + String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); + MetricsSystem ms = DefaultMetricsSystem.instance(); + JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms); + + // Percentile measurement is [,,,] by default + int[] intervals = conf.getInts(conf.get( + NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY, + NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT)); + return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm)); + } + + public String name() { + return name; + } + + public JvmMetrics getJvmMetrics() { + return jvmMetrics; + } + + public void incrBytesWritten(long bytes) { + bytesWritten.incr(bytes); + } + + public void incrBytesRead(long bytes) { + bytesRead.incr(bytes); + } + + public void addGetattr(long latencyNanos) { + getattr.add(latencyNanos); + } + + public void addSetattr(long latencyNanos) { + setattr.add(latencyNanos); + } + + public void addLookup(long latencyNanos) { + lookup.add(latencyNanos); + } + + public void addAccess(long latencyNanos) { + access.add(latencyNanos); + } + + public void addReadlink(long latencyNanos) { + readlink.add(latencyNanos); + } + + public void addRead(long latencyNanos) { + read.add(latencyNanos); + for (MutableQuantiles q : readNanosQuantiles) { + q.add(latencyNanos); + } + } + + public void addWrite(long latencyNanos) { + write.add(latencyNanos); + for (MutableQuantiles q : writeNanosQuantiles) { + q.add(latencyNanos); + } + } + + public void addCreate(long latencyNanos) { + create.add(latencyNanos); + } + + public void addMkdir(long latencyNanos) { + mkdir.add(latencyNanos); + } + + public void addSymlink(long latencyNanos) { + symlink.add(latencyNanos); + } + + public void addMknod(long latencyNanos) { + mknod.add(latencyNanos); + } + + public void addRemove(long latencyNanos) { + remove.add(latencyNanos); + } + + public void addRmdir(long latencyNanos) { + rmdir.add(latencyNanos); + } + + public void addRename(long latencyNanos) { + rename.add(latencyNanos); + } + + public void addLink(long latencyNanos) { + link.add(latencyNanos); + } + + public void addReaddir(long latencyNanos) { + readdir.add(latencyNanos); + } + + public void addReaddirplus(long latencyNanos) { + readdirplus.add(latencyNanos); + } + + public void addFsstat(long latencyNanos) { + fsstat.add(latencyNanos); + } + + public void addFsinfo(long latencyNanos) { + fsinfo.add(latencyNanos); + } + + public void addPathconf(long latencyNanos) { + pathconf.add(latencyNanos); + } + + public void addCommit(long latencyNanos) { + commit.add(latencyNanos); + for (MutableQuantiles q : commitNanosQuantiles) { + q.add(latencyNanos); + } + } + +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index 50e83ed4fa..cc17394197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -213,4 +213,8 @@ public static byte[] longToByte(long v) { data[7] = (byte) (v >>> 0); return data; } + + public static long getElapsedTime(long startTimeNano) { + return System.nanoTime() - startTimeNano; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index b31baf58f5..a06d1c5c02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -129,9 +129,8 @@ static class CommitCtx { private final Channel channel; private final int xid; private final Nfs3FileAttributes preOpAttr; - - // Remember time for debug purpose - private final long startTime; + + public final long startTime; long getOffset() { return offset; @@ -159,7 +158,7 @@ long getStartTime() { this.channel = channel; this.xid = xid; this.preOpAttr = preOpAttr; - this.startTime = Time.monotonicNow(); + this.startTime = System.nanoTime(); } @Override @@ -687,6 +686,8 @@ private void receivedNewWriteInternal(DFSClient dfsClient, WccData fileWcc = new WccData(preOpAttr, latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); + RpcProgramNfs3.metrics.addWrite(Nfs3Utils + .getElapsedTime(writeCtx.startTime)); Nfs3Utils .writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); @@ -1131,14 +1132,16 @@ private void processCommits(long offset) { COMMIT3Response response = new COMMIT3Response(status, wccData, Nfs3Constant.WRITE_COMMIT_VERF); + RpcProgramNfs3.metrics.addCommit(Nfs3Utils + .getElapsedTime(commit.startTime)); Nfs3Utils.writeChannelCommit(commit.getChannel(), response .serialize(new XDR(), commit.getXid(), new VerifierNone()), commit.getXid()); if (LOG.isDebugEnabled()) { LOG.debug("FileId: " + latestAttr.getFileId() + " Service time:" - + (Time.monotonicNow() - commit.getStartTime()) - + "ms. Sent response for commit:" + commit); + + Nfs3Utils.getElapsedTime(commit.startTime) + + "ns. Sent response for commit:" + commit); } entry = pendingCommits.firstEntry(); } @@ -1162,6 +1165,7 @@ private void doSingleWrite(final WriteCtx writeCtx) { // The write is not protected by lock. asyncState is used to make sure // there is one thread doing write back at any time writeCtx.writeData(fos); + RpcProgramNfs3.metrics.incrBytesWritten(writeCtx.getCount()); long flushedOffset = getFlushedOffset(); if (flushedOffset != (offset + count)) { @@ -1213,6 +1217,7 @@ private void doSingleWrite(final WriteCtx writeCtx) { } WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); + RpcProgramNfs3.metrics.addWrite(Nfs3Utils.getElapsedTime(writeCtx.startTime)); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index aaac797b53..148d4f7630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.net.DNS; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.NfsFileType; @@ -164,6 +166,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final RpcCallCache rpcCallCache; private JvmPauseMonitor pauseMonitor; private Nfs3HttpServer infoServer = null; + static Nfs3Metrics metrics; public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { @@ -209,6 +212,17 @@ public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket infoServer = new Nfs3HttpServer(config); } + public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config, + DatagramSocket registrationSocket, boolean allowInsecurePorts) + throws IOException { + DefaultMetricsSystem.initialize("Nfs3"); + String displayName = DNS.getDefaultHost("default", "default") + + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, + NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT); + metrics = Nfs3Metrics.create(config, displayName); + return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts); + } + private void clearDirectory(String writeDumpDir) throws IOException { File dumpDir = new File(writeDumpDir); if (dumpDir.exists()) { @@ -225,10 +239,11 @@ private void clearDirectory(String writeDumpDir) throws IOException { } @Override - public void startDaemons() { + public void startDaemons() { if (pauseMonitor == null) { pauseMonitor = new JvmPauseMonitor(config); pauseMonitor.start(); + metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); } writeManager.startAsyncDataSerivce(); try { @@ -770,6 +785,7 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler, try { readCount = fis.read(offset, readbuffer, 0, count); + metrics.incrBytesRead(readCount); } catch (IOException e) { // TODO: A cleaner way is to throw a new type of exception // which requires incompatible changes. @@ -2049,8 +2065,8 @@ COMMIT3Response commit(XDR xdr, Channel channel, int xid, : (request.getOffset() + request.getCount()); // Insert commit as an async request - writeManager.handleCommit(dfsClient, handle, commitOffset, - channel, xid, preOpAttr); + writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, + preOpAttr); return null; } catch (IOException e) { LOG.warn("Exception ", e); @@ -2132,20 +2148,29 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { } } } - + + // Since write and commit could be async, they use their own startTime and + // only record success requests. + final long startTime = System.nanoTime(); + NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { response = getattr(xdr, info); + metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.SETATTR) { response = setattr(xdr, info); + metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.LOOKUP) { response = lookup(xdr, info); + metrics.addLookup(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.ACCESS) { response = access(xdr, info); + metrics.addAccess(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READLINK) { response = readlink(xdr, info); + metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READ) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.READ_RPC_START + xid); @@ -2154,6 +2179,7 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) { LOG.debug(Nfs3Utils.READ_RPC_END + xid); } + metrics.addRead(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.WRITE) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.WRITE_RPC_START + xid); @@ -2162,30 +2188,43 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { // Write end debug trace is in Nfs3Utils.writeChannel } else if (nfsproc3 == NFSPROC3.CREATE) { response = create(xdr, info); + metrics.addCreate(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.MKDIR) { response = mkdir(xdr, info); + metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.SYMLINK) { response = symlink(xdr, info); + metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.MKNOD) { response = mknod(xdr, info); + metrics.addMknod(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.REMOVE) { response = remove(xdr, info); + metrics.addRemove(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.RMDIR) { response = rmdir(xdr, info); + metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.RENAME) { response = rename(xdr, info); + metrics.addRename(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.LINK) { response = link(xdr, info); + metrics.addLink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READDIR) { response = readdir(xdr, info); + metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { response = readdirplus(xdr, info); + metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.FSSTAT) { response = fsstat(xdr, info); + metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.FSINFO) { response = fsinfo(xdr, info); + metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.PATHCONF) { - response = pathconf(xdr,info); + response = pathconf(xdr, info); + metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.COMMIT) { response = commit(xdr, info); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java index 758fd3998b..82c826fda1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java @@ -84,7 +84,8 @@ public int getOriginalCount() { private long dumpFileOffset; private volatile DataState dataState; - + public final long startTime; + public DataState getDataState() { return dataState; } @@ -235,6 +236,7 @@ void setReplied(boolean replied) { this.replied = replied; this.dataState = dataState; raf = null; + this.startTime = System.nanoTime(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index e71eaa5148..df02e04fb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -224,6 +224,7 @@ int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle, status = Nfs3Status.NFS3_OK; } else { + // commit request triggered by read won't create pending comment obj COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, null, 0, null, true); switch (ret) { @@ -260,6 +261,7 @@ int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle, void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) { + long startTime = System.nanoTime(); int status; OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); @@ -306,9 +308,9 @@ void handleCommit(DFSClient dfsClient, FileHandle fileHandle, WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF); + RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime)); Nfs3Utils.writeChannelCommit(channel, - response.serialize(new XDR(), xid, new VerifierNone()), - xid); + response.serialize(new XDR(), xid, new VerifierNone()), xid); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java index d44e9abe68..46dbd42f4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java @@ -48,6 +48,10 @@ public static void setUp() throws Exception { HttpConfig.Policy.HTTP_AND_HTTPS.name()); conf.set(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, "localhost:0"); conf.set(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, "localhost:0"); + // Use emphral port in case tests are running in parallel + conf.setInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0); + conf.setInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0); + File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e2db1f68f4..5e75424063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -273,6 +273,8 @@ Release 2.7.0 - UNRELEASED (Maysam Yabandeh via wang) HDFS-7424. Add web UI for NFS gateway (brandonli) + + HDFS-7449. Add metrics to NFS gateway (brandonli) IMPROVEMENTS