From 37d65822235fe8285d10232589aba39ededd3be1 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Fri, 17 Apr 2020 00:11:02 +0530 Subject: [PATCH] HDFS-15266. Add missing DFSOps Statistics in WebHDFS. Contributed by Ayush Saxena. --- .../hadoop/hdfs/DistributedFileSystem.java | 13 +++- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 8 +++ .../hdfs/TestDistributedFileSystem.java | 21 ++++++- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 60 +++++++++++++++++++ 4 files changed, 96 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index d1babe3280..a6d475891b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -813,6 +813,8 @@ public BlockStoragePolicySpi next(final FileSystem fs, final Path p) @Override public Collection getAllStoragePolicies() throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES); return Arrays.asList(dfs.getStoragePolicies()); } @@ -834,9 +836,7 @@ public long getBytesWithFutureGenerationStamps() throws IOException { */ @Deprecated public BlockStoragePolicy[] getStoragePolicies() throws IOException { - statistics.incrementReadOps(1); - storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES); - return dfs.getStoragePolicies(); + return getAllStoragePolicies().toArray(new BlockStoragePolicy[0]); } /** @@ -2123,6 +2123,9 @@ public Void next(final FileSystem fs, final Path p) */ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException { + statistics.incrementReadOps(1); + storageStatistics + .incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); return dfs.getSnapshottableDirListing(); } @@ -2295,6 +2298,8 @@ private SnapshotDiffReport getSnapshotDiffReportInternal( */ public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, final String fromSnapshot, final String toSnapshot) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF); Path absF = fixRelativePart(snapshotDir); return new FileSystemLinkResolver() { @Override @@ -3243,6 +3248,8 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies( */ @Override public Path getTrashRoot(Path path) { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT); try { if ((path == null) || !dfs.isHDFSEncryptionEnabled()) { return super.getTrashRoot(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index d0b10cbbcf..4caa0e91fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -1331,6 +1331,8 @@ public void allowSnapshot(final Path p) throws IOException { @Override public void satisfyStoragePolicy(final Path p) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SATISFY_STORAGE_POLICY); final HttpOpParam.Op op = PutOpParam.Op.SATISFYSTORAGEPOLICY; new FsPathRunner(op, p).run(); } @@ -1420,6 +1422,7 @@ public void renameSnapshot(final Path path, final String snapshotOldName, public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, final String fromSnapshot, final String toSnapshot) throws IOException { + statistics.incrementReadOps(1); storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF); final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTDIFF; return new FsPathResponseRunner(op, snapshotDir, @@ -1434,6 +1437,7 @@ SnapshotDiffReport decodeResponse(Map json) { public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList() throws IOException { + statistics.incrementReadOps(1); storageStatistics .incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST; @@ -1995,6 +1999,8 @@ public void setStoragePolicy(Path p, String policyName) throws IOException { @Override public Collection getAllStoragePolicies() throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES); final HttpOpParam.Op op = GetOpParam.Op.GETALLSTORAGEPOLICY; return new FsPathResponseRunner>(op, null) { @Override @@ -2007,6 +2013,8 @@ Collection decodeResponse(Map json) @Override public BlockStoragePolicy getStoragePolicy(Path src) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICY); final HttpOpParam.Op op = GetOpParam.Op.GETSTORAGEPOLICY; return new FsPathResponseRunner(op, src) { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 94830d753f..6353e194ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -918,6 +918,21 @@ public void testStatistics2() throws IOException, NoSuchAlgorithmException { dfs.getEZForPath(dir); checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1); + + opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); + dfs.getSnapshottableDirListing(); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, opCount + 1); + + opCount = getOpStatistics(OpType.GET_STORAGE_POLICIES); + dfs.getAllStoragePolicies(); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_STORAGE_POLICIES, opCount + 1); + + opCount = getOpStatistics(OpType.GET_TRASH_ROOT); + dfs.getTrashRoot(dir); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_TRASH_ROOT, opCount + 1); } } @@ -1058,7 +1073,7 @@ public void run() { } /** Checks statistics. -1 indicates do not check for the operations */ - private void checkStatistics(FileSystem fs, int readOps, int writeOps, + public static void checkStatistics(FileSystem fs, int readOps, int writeOps, int largeReadOps) { assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps()); assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps()); @@ -1164,12 +1179,12 @@ private void testReadFileSystemStatistics(int expectedDistance, } } - private static void checkOpStatistics(OpType op, long count) { + public static void checkOpStatistics(OpType op, long count) { assertEquals("Op " + op.getSymbol() + " has unexpected count!", count, getOpStatistics(op)); } - private static long getOpStatistics(OpType op) { + public static long getOpStatistics(OpType op) { return GlobalStorageStatistics.INSTANCE.get( DFSOpsCountStatistics.NAME) .getLong(op.getSymbol()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 3ee7fcbd61..69a0e600ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -24,6 +24,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics; +import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics; +import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.junit.Assert.assertEquals; @@ -57,6 +60,7 @@ import com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2012,6 +2016,62 @@ public void testECPolicyInFileStatus() throws Exception { ecpolicyForECfile, ecPolicyName); } + @Test + public void testStatistics() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, + StoragePolicySatisfierMode.EXTERNAL.toString()); + StoragePolicySatisfier sps = new StoragePolicySatisfier(conf); + try { + cluster = new MiniDFSCluster.Builder(conf).storageTypes( + new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}}) + .storagesPerDatanode(2).numDataNodes(1).build(); + cluster.waitActive(); + sps.init(new ExternalSPSContext(sps, DFSTestUtil + .getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH, 1, + false))); + sps.start(StoragePolicySatisfierMode.EXTERNAL); + sps.start(StoragePolicySatisfierMode.EXTERNAL); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil + .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); + Path dir = new Path("/test"); + webHdfs.mkdirs(dir); + int readOps = 0; + int writeOps = 0; + FileSystem.clearStatistics(); + + long opCount = + getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY); + webHdfs.getStoragePolicy(dir); + checkStatistics(webHdfs, ++readOps, writeOps, 0); + checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY, + opCount + 1); + + opCount = + getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES); + webHdfs.getAllStoragePolicies(); + checkStatistics(webHdfs, ++readOps, writeOps, 0); + checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES, + opCount + 1); + + opCount = + getOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY); + webHdfs.satisfyStoragePolicy(dir); + checkStatistics(webHdfs, readOps, ++writeOps, 0); + checkOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY, + opCount + 1); + + opCount = getOpStatistics( + DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); + webHdfs.getSnapshottableDirectoryList(); + checkStatistics(webHdfs, ++readOps, writeOps, 0); + checkOpStatistics( + DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, + opCount + 1); + } finally { + cluster.shutdown(); + } + } /** * Get FileStatus JSONObject from ListStatus response. */