HDFS-15266. Add missing DFSOps Statistics in WebHDFS. Contributed by Ayush Saxena.
This commit is contained in:
parent
3481895f8a
commit
37d6582223
@ -813,6 +813,8 @@ public BlockStoragePolicySpi next(final FileSystem fs, final Path p)
|
||||
@Override
|
||||
public Collection<BlockStoragePolicy> getAllStoragePolicies()
|
||||
throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
|
||||
return Arrays.asList(dfs.getStoragePolicies());
|
||||
}
|
||||
|
||||
@ -834,9 +836,7 @@ public long getBytesWithFutureGenerationStamps() throws IOException {
|
||||
*/
|
||||
@Deprecated
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
|
||||
return dfs.getStoragePolicies();
|
||||
return getAllStoragePolicies().toArray(new BlockStoragePolicy[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2123,6 +2123,9 @@ public Void next(final FileSystem fs, final Path p)
|
||||
*/
|
||||
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
|
||||
throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics
|
||||
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
|
||||
return dfs.getSnapshottableDirListing();
|
||||
}
|
||||
|
||||
@ -2295,6 +2298,8 @@ private SnapshotDiffReport getSnapshotDiffReportInternal(
|
||||
*/
|
||||
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
|
||||
final String fromSnapshot, final String toSnapshot) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
|
||||
Path absF = fixRelativePart(snapshotDir);
|
||||
return new FileSystemLinkResolver<SnapshotDiffReport>() {
|
||||
@Override
|
||||
@ -3243,6 +3248,8 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies(
|
||||
*/
|
||||
@Override
|
||||
public Path getTrashRoot(Path path) {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_TRASH_ROOT);
|
||||
try {
|
||||
if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
|
||||
return super.getTrashRoot(path);
|
||||
|
@ -1331,6 +1331,8 @@ public void allowSnapshot(final Path p) throws IOException {
|
||||
|
||||
@Override
|
||||
public void satisfyStoragePolicy(final Path p) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.SATISFY_STORAGE_POLICY);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.SATISFYSTORAGEPOLICY;
|
||||
new FsPathRunner(op, p).run();
|
||||
}
|
||||
@ -1420,6 +1422,7 @@ public void renameSnapshot(final Path path, final String snapshotOldName,
|
||||
|
||||
public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir,
|
||||
final String fromSnapshot, final String toSnapshot) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_SNAPSHOT_DIFF);
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTDIFF;
|
||||
return new FsPathResponseRunner<SnapshotDiffReport>(op, snapshotDir,
|
||||
@ -1434,6 +1437,7 @@ SnapshotDiffReport decodeResponse(Map<?, ?> json) {
|
||||
|
||||
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
|
||||
throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics
|
||||
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST;
|
||||
@ -1995,6 +1999,8 @@ public void setStoragePolicy(Path p, String policyName) throws IOException {
|
||||
@Override
|
||||
public Collection<BlockStoragePolicy> getAllStoragePolicies()
|
||||
throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICIES);
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETALLSTORAGEPOLICY;
|
||||
return new FsPathResponseRunner<Collection<BlockStoragePolicy>>(op, null) {
|
||||
@Override
|
||||
@ -2007,6 +2013,8 @@ Collection<BlockStoragePolicy> decodeResponse(Map<?, ?> json)
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicy getStoragePolicy(Path src) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_STORAGE_POLICY);
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETSTORAGEPOLICY;
|
||||
return new FsPathResponseRunner<BlockStoragePolicy>(op, src) {
|
||||
@Override
|
||||
|
@ -918,6 +918,21 @@ public void testStatistics2() throws IOException, NoSuchAlgorithmException {
|
||||
dfs.getEZForPath(dir);
|
||||
checkStatistics(dfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1);
|
||||
|
||||
opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
|
||||
dfs.getSnapshottableDirListing();
|
||||
checkStatistics(dfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST, opCount + 1);
|
||||
|
||||
opCount = getOpStatistics(OpType.GET_STORAGE_POLICIES);
|
||||
dfs.getAllStoragePolicies();
|
||||
checkStatistics(dfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(OpType.GET_STORAGE_POLICIES, opCount + 1);
|
||||
|
||||
opCount = getOpStatistics(OpType.GET_TRASH_ROOT);
|
||||
dfs.getTrashRoot(dir);
|
||||
checkStatistics(dfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(OpType.GET_TRASH_ROOT, opCount + 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1058,7 +1073,7 @@ public void run() {
|
||||
}
|
||||
|
||||
/** Checks statistics. -1 indicates do not check for the operations */
|
||||
private void checkStatistics(FileSystem fs, int readOps, int writeOps,
|
||||
public static void checkStatistics(FileSystem fs, int readOps, int writeOps,
|
||||
int largeReadOps) {
|
||||
assertEquals(readOps, DFSTestUtil.getStatistics(fs).getReadOps());
|
||||
assertEquals(writeOps, DFSTestUtil.getStatistics(fs).getWriteOps());
|
||||
@ -1164,12 +1179,12 @@ private void testReadFileSystemStatistics(int expectedDistance,
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkOpStatistics(OpType op, long count) {
|
||||
public static void checkOpStatistics(OpType op, long count) {
|
||||
assertEquals("Op " + op.getSymbol() + " has unexpected count!",
|
||||
count, getOpStatistics(op));
|
||||
}
|
||||
|
||||
private static long getOpStatistics(OpType op) {
|
||||
public static long getOpStatistics(OpType op) {
|
||||
return GlobalStorageStatistics.INSTANCE.get(
|
||||
DFSOpsCountStatistics.NAME)
|
||||
.getLong(op.getSymbol());
|
||||
|
@ -24,6 +24,9 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkOpStatistics;
|
||||
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
|
||||
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
|
||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
@ -57,6 +60,7 @@
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.fs.QuotaUsage;
|
||||
import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
|
||||
import org.apache.hadoop.test.LambdaTestUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -2012,6 +2016,62 @@ public void testECPolicyInFileStatus() throws Exception {
|
||||
ecpolicyForECfile, ecPolicyName);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStatistics() throws Exception {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
|
||||
StoragePolicySatisfierMode.EXTERNAL.toString());
|
||||
StoragePolicySatisfier sps = new StoragePolicySatisfier(conf);
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).storageTypes(
|
||||
new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}})
|
||||
.storagesPerDatanode(2).numDataNodes(1).build();
|
||||
cluster.waitActive();
|
||||
sps.init(new ExternalSPSContext(sps, DFSTestUtil
|
||||
.getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH, 1,
|
||||
false)));
|
||||
sps.start(StoragePolicySatisfierMode.EXTERNAL);
|
||||
sps.start(StoragePolicySatisfierMode.EXTERNAL);
|
||||
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
|
||||
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
|
||||
Path dir = new Path("/test");
|
||||
webHdfs.mkdirs(dir);
|
||||
int readOps = 0;
|
||||
int writeOps = 0;
|
||||
FileSystem.clearStatistics();
|
||||
|
||||
long opCount =
|
||||
getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY);
|
||||
webHdfs.getStoragePolicy(dir);
|
||||
checkStatistics(webHdfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICY,
|
||||
opCount + 1);
|
||||
|
||||
opCount =
|
||||
getOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES);
|
||||
webHdfs.getAllStoragePolicies();
|
||||
checkStatistics(webHdfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(DFSOpsCountStatistics.OpType.GET_STORAGE_POLICIES,
|
||||
opCount + 1);
|
||||
|
||||
opCount =
|
||||
getOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY);
|
||||
webHdfs.satisfyStoragePolicy(dir);
|
||||
checkStatistics(webHdfs, readOps, ++writeOps, 0);
|
||||
checkOpStatistics(DFSOpsCountStatistics.OpType.SATISFY_STORAGE_POLICY,
|
||||
opCount + 1);
|
||||
|
||||
opCount = getOpStatistics(
|
||||
DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
|
||||
webHdfs.getSnapshottableDirectoryList();
|
||||
checkStatistics(webHdfs, ++readOps, writeOps, 0);
|
||||
checkOpStatistics(
|
||||
DFSOpsCountStatistics.OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST,
|
||||
opCount + 1);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Get FileStatus JSONObject from ListStatus response.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user