diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md index 6953c3bbc9..0e0fc091cf 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md @@ -191,6 +191,7 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a | `GetImageAvgTime` | Average fsimage download time in milliseconds | | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode | | `PutImageAvgTime` | Average fsimage upload time in milliseconds | +| `TotalFileOps`| Total number of file operations performed | FSNamesystem ------------ @@ -314,6 +315,10 @@ Each metrics record contains tags such as SessionId and Hostname as additional i | `SendDataPacketBlockedOnNetworkNanosAvgTime` | Average waiting time of sending packets in nanoseconds | | `SendDataPacketTransferNanosNumOps` | Total number of sending packets | | `SendDataPacketTransferNanosAvgTime` | Average transfer time of sending packets in nanoseconds | +| `TotalWriteTime`| Total number of milliseconds spent on write operation | +| `TotalReadTime` | Total number of milliseconds spent on read operation | +| `RemoteBytesRead` | Number of bytes read by remote clients | +| `RemoteBytesWritten` | Number of bytes written by remote clients | yarn context ============ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7d9d0eaf9c..5c472a8a0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -663,6 +663,9 @@ Release 2.7.0 - UNRELEASED HDFS-7772. Document hdfs balancer -exclude/-include option in HDFSCommands.html (Xiaoyu Yao via cnauroth) + HDFS-7773. Additional metrics in HDFS to be accessed via jmx. + (Anu Engineer via cnauroth) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 368d80daff..1db2c7885e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -658,6 +658,7 @@ private int receivePacket() throws IOException { replicaInfo.setLastChecksumAndDataLen(offsetInBlock, lastCrc); datanode.metrics.incrBytesWritten(len); + datanode.metrics.incrTotalWriteTime(duration); manageWriterOsCache(offsetInBlock); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index bb5323af3c..704993acdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -86,6 +86,7 @@ import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; +import org.apache.hadoop.util.Time; /** @@ -480,7 +481,7 @@ public void readBlock(final ExtendedBlock block, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException { previousOpClientName = clientName; - + long read = 0; OutputStream baseStream = getOutputStream(); DataOutputStream out = new DataOutputStream(new BufferedOutputStream( baseStream, HdfsConstants.SMALL_BUFFER_SIZE)); @@ -515,8 +516,9 @@ public void readBlock(final ExtendedBlock block, // send op status writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream())); - long read = blockSender.sendBlock(out, baseStream, null); // send data - + long beginRead = Time.monotonicNow(); + read = blockSender.sendBlock(out, baseStream, null); // send data + long duration = Time.monotonicNow() - beginRead; if (blockSender.didSendEntireByteRange()) { // If we sent the entire range, then we should expect the client // to respond with a Status enum. @@ -539,6 +541,7 @@ public void readBlock(final ExtendedBlock block, } datanode.metrics.incrBytesRead((int) read); datanode.metrics.incrBlocksRead(); + datanode.metrics.incrTotalReadTime(duration); } catch ( SocketException ignored ) { if (LOG.isTraceEnabled()) { LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " + @@ -563,7 +566,7 @@ public void readBlock(final ExtendedBlock block, //update metrics datanode.metrics.addReadBlockOp(elapsed()); - datanode.metrics.incrReadsFromClient(peer.isLocal()); + datanode.metrics.incrReadsFromClient(peer.isLocal(), read); } @Override @@ -590,7 +593,7 @@ public void writeBlock(final ExtendedBlock block, final boolean isClient = !isDatanode; final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED; - + long size = 0; // check single target for transfer-RBW/Finalized if (isTransfer && targets.length > 0) { throw new IOException(stage + " does not support multiple targets " @@ -796,7 +799,9 @@ public void writeBlock(final ExtendedBlock block, + localAddress + " of size " + block.getNumBytes()); } - + if(isClient) { + size = block.getNumBytes(); + } } catch (IOException ioe) { LOG.info("opWriteBlock " + block + " received exception " + ioe); incrDatanodeNetworkErrors(); @@ -813,7 +818,7 @@ public void writeBlock(final ExtendedBlock block, //update metrics datanode.metrics.addWriteBlockOp(elapsed()); - datanode.metrics.incrWritesFromClient(peer.isLocal()); + datanode.metrics.incrWritesFromClient(peer.isLocal(), size); } @Override @@ -993,12 +998,15 @@ public void copyBlock(final ExtendedBlock block, // send status first writeSuccessWithChecksumInfo(blockSender, reply); - // send block content to the target - long read = blockSender.sendBlock(reply, baseStream, - dataXceiverServer.balanceThrottler); + long beginRead = Time.monotonicNow(); + // send block content to the target + long read = blockSender.sendBlock(reply, baseStream, + dataXceiverServer.balanceThrottler); + long duration = Time.monotonicNow() - beginRead; datanode.metrics.incrBytesRead((int) read); datanode.metrics.incrBlocksRead(); + datanode.metrics.incrTotalReadTime(duration); LOG.info("Copied " + block + " to " + peer.getRemoteAddressString()); } catch (IOException ioe) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index 09ad3da642..0fbc2ee10e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -50,7 +50,11 @@ public class DataNodeMetrics { @Metric MutableCounterLong bytesWritten; + @Metric("Milliseconds spent writing") + MutableCounterLong totalWriteTime; @Metric MutableCounterLong bytesRead; + @Metric("Milliseconds spent reading") + MutableCounterLong totalReadTime; @Metric MutableCounterLong blocksWritten; @Metric MutableCounterLong blocksRead; @Metric MutableCounterLong blocksReplicated; @@ -64,6 +68,10 @@ public class DataNodeMetrics { @Metric MutableCounterLong writesFromLocalClient; @Metric MutableCounterLong writesFromRemoteClient; @Metric MutableCounterLong blocksGetLocalPathInfo; + @Metric("Bytes read by remote client") + MutableCounterLong remoteBytesRead; + @Metric("Bytes written by remote client") + MutableCounterLong remoteBytesWritten; // RamDisk metrics on read/write @Metric MutableCounterLong ramDiskBlocksWrite; @@ -262,6 +270,15 @@ public void incrFsyncCount() { fsyncCount.incr(); } + public void incrTotalWriteTime(long timeTaken) { + totalWriteTime.incr(timeTaken); + } + + public void incrTotalReadTime(long timeTaken) { + totalReadTime.incr(timeTaken); + } + + public void addPacketAckRoundTripTimeNanos(long latencyNanos) { packetAckRoundTripTimeNanos.add(latencyNanos); for (MutableQuantiles q : packetAckRoundTripTimeNanosQuantiles) { @@ -287,12 +304,23 @@ public void shutdown() { DefaultMetricsSystem.shutdown(); } - public void incrWritesFromClient(boolean local) { - (local ? writesFromLocalClient : writesFromRemoteClient).incr(); + public void incrWritesFromClient(boolean local, long size) { + if(local) { + writesFromLocalClient.incr(); + } else { + writesFromRemoteClient.incr(); + remoteBytesWritten.incr(size); + } } - public void incrReadsFromClient(boolean local) { - (local ? readsFromLocalClient : readsFromRemoteClient).incr(); + public void incrReadsFromClient(boolean local, long size) { + + if (local) { + readsFromLocalClient.incr(); + } else { + readsFromRemoteClient.incr(); + remoteBytesRead.incr(size); + } } public void incrVolumeFailures() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index 94e845ba3c..31bc16479b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -77,6 +77,31 @@ public class NameNodeMetrics { @Metric("Number of blockReports from individual storages") MutableCounterLong storageBlockReportOps; + @Metric("Number of file system operations") + public long totalFileOps(){ + return + getBlockLocations.value() + + createFileOps.value() + + filesAppended.value() + + addBlockOps.value() + + getAdditionalDatanodeOps.value() + + filesRenamed.value() + + filesTruncated.value() + + deleteFileOps.value() + + getListingOps.value() + + fileInfoOps.value() + + getLinkTargetOps.value() + + createSnapshotOps.value() + + deleteSnapshotOps.value() + + allowSnapshotOps.value() + + disallowSnapshotOps.value() + + renameSnapshotOps.value() + + listSnapshottableDirOps.value() + + createSymlinkOps.value() + + snapshotDiffReportOps.value(); + } + + @Metric("Journal transactions") MutableRate transactions; @Metric("Journal syncs") MutableRate syncs; final MutableQuantiles[] syncsQuantiles; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 0b85d35b0d..8a2bacf90f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.util.Time; import org.junit.Test; import org.mockito.Mockito; @@ -246,4 +247,48 @@ public void testTimeoutMetric() throws Exception { DataNodeFaultInjector.instance = new DataNodeFaultInjector(); } } + + /** + * This function ensures that writing causes TotalWritetime to increment + * and reading causes totalReadTime to move. + * @throws Exception + */ + @Test + public void testDataNodeTimeSpend() throws Exception { + Configuration conf = new HdfsConfiguration(); + SimulatedFSDataset.setFactory(conf); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + try { + FileSystem fs = cluster.getFileSystem(); + List datanodes = cluster.getDataNodes(); + assertEquals(datanodes.size(), 1); + DataNode datanode = datanodes.get(0); + MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name()); + final long LONG_FILE_LEN = 1024 * 1024 * 10; + + long startWriteValue = getLongCounter("TotalWriteTime", rb); + long startReadValue = getLongCounter("TotalReadTime", rb); + + for (int x =0; x < 50; x++) { + DFSTestUtil.createFile(fs, new Path("/time.txt."+ x), + LONG_FILE_LEN, (short) 1, Time.monotonicNow()); + } + + for (int x =0; x < 50; x++) { + String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x)); + } + + MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name()); + long endWriteValue = getLongCounter("TotalWriteTime", rbNew); + long endReadValue = getLongCounter("TotalReadTime", rbNew); + + assertTrue(endReadValue > startReadValue); + assertTrue(endWriteValue > startWriteValue); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 6c378226e6..6771ad8284 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -455,4 +455,24 @@ public void testSyncAndBlockReportMetric() throws Exception { assertQuantileGauges("Syncs1s", rb); assertQuantileGauges("BlockReport1s", rb); } + + /** + * Test NN ReadOps Count and WriteOps Count + */ + @Test + public void testReadWriteOps() throws Exception { + MetricsRecordBuilder rb = getMetrics(NN_METRICS); + long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps", + rb); + Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat"); + + //Perform create file operation + createFile(file1_Path, 1024 * 1024,(short)2); + + // Perform read file operation on earlier created file + readFile(fs, file1_Path); + MetricsRecordBuilder rbNew = getMetrics(NN_METRICS); + assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) > + startWriteCounter); + } }