HDFS-15266. Add missing DFSOps Statistics in WebHDFS. Contributed by Ayush Saxena.

(cherry picked from commit 37d6582223)
This commit is contained in:
Ayush Saxena 2020-04-17 00:11:02 +05:30 committed by Wei-Chiu Chuang
parent 2b207ea402
commit 5187bd37ae
4 changed files with 96 additions and 6 deletions

View File

@ -819,6 +819,8 @@ public BlockStoragePolicySpi next(final FileSystem fs, final Path p)
@Override
public Collection<BlockStoragePolicy> getAllStoragePolicies()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
return Arrays.asList(dfs.getStoragePolicies());
}
@ -840,9 +842,7 @@ public long getBytesWithFutureGenerationStamps() throws IOException {
*/
@Deprecated
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
return dfs.getStoragePolicies();
return getAllStoragePolicies().toArray(new BlockStoragePolicy[0]);
}
/**
@ -2162,6 +2162,9 @@ public Void next(final FileSystem fs, final Path p)
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
return dfs.getSnapshottableDirListing();
}
@ -2334,6 +2337,8 @@ private SnapshotDiffReport getSnapshotDiffReportInternal(
*/
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
Path absF = fixRelativePart(snapshotDir);
return new FileSystemLinkResolver<SnapshotDiffReport>() {
@Override
@ -3282,6 +3287,8 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies(
*/
@Override
public Path getTrashRoot(Path path) {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT);
try {
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
return super.getTrashRoot(path);

View File

@ -1341,6 +1341,8 @@ public void allowSnapshot(final Path p) throws IOException {
@Override
public void satisfyStoragePolicy(final Path p) throws IOException {
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SATISFY_STORAGE_POLICY);
final HttpOpParam.Op op = PutOpParam.Op.SATISFYSTORAGEPOLICY;
new FsPathRunner(op, p).run();
}
@ -1430,6 +1432,7 @@ public void renameSnapshot(final Path path, final String snapshotOldName,
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
final String fromSnapshot, final String toSnapshot) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTDIFF;
return new FsPathResponseRunner<SnapshotDiffReport>(op, snapshotDir,
@ -1444,6 +1447,7 @@ SnapshotDiffReport decodeResponse(Map<?, ?> json) {
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST;
@ -2005,6 +2009,8 @@ public void setStoragePolicy(Path p, String policyName) throws IOException {
@Override
public Collection<BlockStoragePolicy> getAllStoragePolicies()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
final HttpOpParam.Op op = GetOpParam.Op.GETALLSTORAGEPOLICY;
return new FsPathResponseRunner<Collection<BlockStoragePolicy>>(op, null) {
@Override
@ -2017,6 +2023,8 @@ Collection<BlockStoragePolicy> decodeResponse(Map<?, ?> json)
@Override
public BlockStoragePolicy getStoragePolicy(Path src) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICY);
final HttpOpParam.Op op = GetOpParam.Op.GETSTORAGEPOLICY;
return new FsPathResponseRunner<BlockStoragePolicy>(op, src) {
@Override

View File

@ -920,6 +920,21 @@ public void testStatistics2() throws IOException, NoSuchAlgorithmException {
dfs.getEZForPath(dir);
checkStatistics(dfs, ++readOps, writeOps, 0);
checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1);
opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
dfs.getSnapshottableDirListing();
checkStatistics(dfs, ++readOps, writeOps, 0);
checkOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, opCount + 1);
opCount = getOpStatistics(OpType.GET_STORAGE_POLICIES);
dfs.getAllStoragePolicies();
checkStatistics(dfs, ++readOps, writeOps, 0);
checkOpStatistics(OpType.GET_STORAGE_POLICIES, opCount + 1);
opCount = getOpStatistics(OpType.GET_TRASH_ROOT);
dfs.getTrashRoot(dir);
checkStatistics(dfs, ++readOps, writeOps, 0);
checkOpStatistics(OpType.GET_TRASH_ROOT, opCount + 1);
}
}
@ -1060,7 +1075,7 @@ public void run() {
}
/** Checks statistics. -1 indicates do not check for the operations */
private void checkStatistics(FileSystem fs, int readOps, int writeOps,
public static void checkStatistics(FileSystem fs, int readOps, int writeOps,
int largeReadOps) {
assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps());
assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps());
@ -1166,12 +1181,12 @@ private void testReadFileSystemStatistics(int expectedDistance,
}
}
private static void checkOpStatistics(OpType op, long count) {
public static void checkOpStatistics(OpType op, long count) {
assertEquals("Op " + op.getSymbol() + " has unexpected count!",
count, getOpStatistics(op));
}
private static long getOpStatistics(OpType op) {
public static long getOpStatistics(OpType op) {
return GlobalStorageStatistics.INSTANCE.get(
DFSOpsCountStatistics.NAME)
.getLong(op.getSymbol());

View File

@ -24,6 +24,9 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics;
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.junit.Assert.assertEquals;
@ -58,6 +61,7 @@
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
import org.apache.hadoop.test.LambdaTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -2013,6 +2017,62 @@ public void testECPolicyInFileStatus() throws Exception {
ecpolicyForECfile, ecPolicyName);
}
@Test
public void testStatistics() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
StoragePolicySatisfierMode.EXTERNAL.toString());
StoragePolicySatisfier sps = new StoragePolicySatisfier(conf);
try {
cluster = new MiniDFSCluster.Builder(conf).storageTypes(
new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}})
.storagesPerDatanode(2).numDataNodes(1).build();
cluster.waitActive();
sps.init(new ExternalSPSContext(sps, DFSTestUtil
.getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH, 1,
false)));
sps.start(StoragePolicySatisfierMode.EXTERNAL);
sps.start(StoragePolicySatisfierMode.EXTERNAL);
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
Path dir = new Path("/test");
webHdfs.mkdirs(dir);
int readOps = 0;
int writeOps = 0;
FileSystem.clearStatistics();
long opCount =
getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY);
webHdfs.getStoragePolicy(dir);
checkStatistics(webHdfs, ++readOps, writeOps, 0);
checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY,
opCount + 1);
opCount =
getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES);
webHdfs.getAllStoragePolicies();
checkStatistics(webHdfs, ++readOps, writeOps, 0);
checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES,
opCount + 1);
opCount =
getOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY);
webHdfs.satisfyStoragePolicy(dir);
checkStatistics(webHdfs, readOps, ++writeOps, 0);
checkOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY,
opCount + 1);
opCount = getOpStatistics(
DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
webHdfs.getSnapshottableDirectoryList();
checkStatistics(webHdfs, ++readOps, writeOps, 0);
checkOpStatistics(
DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST,
opCount + 1);
} finally {
cluster.shutdown();
}
}
/**
* Get FileStatus JSONObject from ListStatus response.
*/