diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 557f335207..09fb493a06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -286,6 +286,7 @@ public enum Operation { GETFILELINKSTATUS(HTTP_GET), GETSTATUS(HTTP_GET), GETECPOLICIES(HTTP_GET), + GETECCODECS(HTTP_GET), GET_BLOCK_LOCATIONS(HTTP_GET); private String httpMethod; @@ -1786,6 +1787,17 @@ public Collection getAllErasureCodingPolicies() throws return JsonUtilClient.getAllErasureCodingPolicies(json); } + public Map getAllErasureCodingCodecs() throws IOException { + Map params = new HashMap<>(); + params.put(OP_PARAM, Operation.GETECCODECS.toString()); + Path path = new Path(getUri().toString(), "/"); + HttpURLConnection conn = + getConnection(Operation.GETECCODECS.getMethod(), params, path, false); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.getErasureCodeCodecs(json); + } + @VisibleForTesting static BlockLocation[] toBlockLocations(JSONObject json) throws IOException { ObjectMapper mapper = new ObjectMapper(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index f495a85a23..7261504820 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -65,6 +65,7 @@ import java.io.OutputStream; import java.util.Collection; import java.util.EnumSet; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -2369,4 +2370,30 @@ public String execute(FileSystem fs) throws IOException { return JsonUtil.toJsonString(ecPolicyInfos.stream().toArray(ErasureCodingPolicyInfo[]::new)); } } + + /** + * Executor that performs a FSGetErasureCodingCodecs operation. + */ + @InterfaceAudience.Private + public static class FSGetErasureCodingCodecs + implements FileSystemAccess.FileSystemExecutor { + + public FSGetErasureCodingCodecs() { + } + + @Override + public Map execute(FileSystem fs) throws IOException { + Map> ecCodecs = new HashMap<>(); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + ecCodecs.put("ErasureCodingCodecs", dfs.getAllErasureCodingCodecs()); + } else { + throw new UnsupportedOperationException("getErasureCodeCodecs is " + + "not supported for HttpFs on " + fs.getClass() + + ". Please check your fs.defaultFS configuration"); + } + HttpFSServerWebApp.get().getMetrics().incrOpsECCodecs(); + return ecCodecs; + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 3477a6fef6..87f2d4d3da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -131,6 +131,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.GETFILELINKSTATUS, new Class[]{}); PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{}); PARAMS_DEF.put(Operation.GETECPOLICIES, new Class[]{}); + PARAMS_DEF.put(Operation.GETECCODECS, new Class[]{}); PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class}); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 196dc44ec5..c9276aae95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -534,6 +534,14 @@ public InputStream run() throws Exception { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETECCODECS: { + FSOperations.FSGetErasureCodingCodecs command = + new FSOperations.FSGetErasureCodingCodecs(); + Map json = fsExecute(user, command); + AUDIT_LOG.info("[{}]", path); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java index d65208fdbb..6e01c5d279 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java @@ -66,6 +66,7 @@ public class HttpFSServerMetrics { private @Metric MutableCounterLong opsCheckAccess; private @Metric MutableCounterLong opsStatus; private @Metric MutableCounterLong opsAllECPolicies; + private @Metric MutableCounterLong opsECCodecs; private final MetricsRegistry registry = new MetricsRegistry("httpfsserver"); private final String name; @@ -170,4 +171,8 @@ public void incrOpsStatus() { public void incrOpsAllECPolicies() { opsAllECPolicies.incr(); } + + public void incrOpsECCodecs() { + opsECCodecs.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index da3faf1066..0283d1d4ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -105,6 +105,7 @@ import java.util.List; import java.util.Map; import java.util.regex.Pattern; +import java.util.concurrent.atomic.AtomicReference; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -1218,9 +1219,9 @@ protected enum Operation { FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST, GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS, - GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES + GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES, GETECCODECS } - + @SuppressWarnings("methodlength") private void operation(Operation op) throws Exception { switch (op) { case GET: @@ -1370,6 +1371,9 @@ private void operation(Operation op) throws Exception { case GETECPOLICIES: testGetAllEEPolicies(); break; + case GETECCODECS: + testGetECCodecs(); + break; } } @@ -2149,6 +2153,54 @@ private void testGetAllEEPolicies() throws Exception { } } + private void testGetECCodecs() throws Exception { + if (isLocalFS()) { + // do not test the testGetECCodecs for local FS. + return; + } + final Path path = new Path("/foo"); + + FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf()); + LambdaTestUtils.intercept(AssertionError.class, () -> { + if (!(fs instanceof DistributedFileSystem)) { + throw new AssertionError(fs.getClass().getSimpleName() + + " is not of type DistributedFileSystem."); + } + }); + + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf()); + FileSystem httpFs = this.getHttpFSFileSystem(); + + Map dfsErasureCodingCodecs = dfs.getAllErasureCodingCodecs(); + + final AtomicReference> diffErasureCodingCodecsRef = + new AtomicReference<>(); + LambdaTestUtils.intercept(AssertionError.class, () -> { + if (httpFs instanceof HttpFSFileSystem) { + HttpFSFileSystem httpFSFileSystem = (HttpFSFileSystem) httpFs; + diffErasureCodingCodecsRef.set(httpFSFileSystem.getAllErasureCodingCodecs()); + } else if (httpFs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) httpFs; + diffErasureCodingCodecsRef.set(webHdfsFileSystem.getAllErasureCodingCodecs()); + } else { + throw new AssertionError(httpFs.getClass().getSimpleName() + + " is not of type HttpFSFileSystem or WebHdfsFileSystem"); + } + }); + Map diffErasureCodingCodecs = diffErasureCodingCodecsRef.get(); + + //Validate testGetECCodecs are the same as DistributedFileSystem + Assert.assertEquals(dfsErasureCodingCodecs.size(), diffErasureCodingCodecs.size()); + + for (Map.Entry entry : dfsErasureCodingCodecs.entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + Assert.assertTrue(diffErasureCodingCodecs.containsKey(key)); + Assert.assertEquals(value, diffErasureCodingCodecs.get(key)); + } + } + private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing, SnapshotDiffReportListing dfsDiffReportListing) { Assert.assertEquals(diffReportListing.getCreateList().size(),