HDFS-17043. HttpFS implementation for getAllErasureCodingPolicies (#5734). Contributed by Hualong Zhang.

Reviewed-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
zhtttylz 2023-06-17 01:06:47 +08:00 committed by GitHub
parent 427366b73b
commit 9a7d1b49e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 94 additions and 2 deletions

View File

@ -23,6 +23,7 @@
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.type.MapType;
@ -284,6 +285,7 @@ public enum Operation {
HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT), GETSNAPSHOTDIFFLISTING(HTTP_GET),
GETFILELINKSTATUS(HTTP_GET),
GETSTATUS(HTTP_GET),
GETECPOLICIES(HTTP_GET),
GET_BLOCK_LOCATIONS(HTTP_GET);
private String httpMethod;
@ -1773,6 +1775,17 @@ public FsStatus getStatus(final Path path) throws IOException {
return JsonUtilClient.toFsStatus(json);
}
public Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies() throws IOException {
Map<String, String> params = new HashMap<>();
params.put(OP_PARAM, Operation.GETECPOLICIES.toString());
Path path = new Path(getUri().toString(), "/");
HttpURLConnection conn =
getConnection(Operation.GETECPOLICIES.getMethod(), params, path, false);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return JsonUtilClient.getAllErasureCodingPolicies(json);
}
@VisibleForTesting
static BlockLocation[] toBlockLocations(JSONObject json) throws IOException {
ObjectMapper mapper = new ObjectMapper();

View File

@ -44,6 +44,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -2342,4 +2343,30 @@ public Map execute(FileSystem fs) throws IOException {
return toJson(fsStatus);
}
}
/**
* Executor that performs a FSGetErasureCodingPolicies operation.
*/
@InterfaceAudience.Private
public static class FSGetErasureCodingPolicies
implements FileSystemAccess.FileSystemExecutor<String> {
public FSGetErasureCodingPolicies() {
}
@Override
public String execute(FileSystem fs) throws IOException {
Collection<ErasureCodingPolicyInfo> ecPolicyInfos = null;
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
ecPolicyInfos = dfs.getAllErasureCodingPolicies();
} else {
throw new UnsupportedOperationException("getErasureCodingPolicies is " +
"not supported for HttpFs on " + fs.getClass() +
". Please check your fs.defaultFS configuration");
}
HttpFSServerWebApp.get().getMetrics().incrOpsAllECPolicies();
return JsonUtil.toJsonString(ecPolicyInfos.stream().toArray(ErasureCodingPolicyInfo[]::new));
}
}
}

View File

@ -130,6 +130,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {});
PARAMS_DEF.put(Operation.GETFILELINKSTATUS, new Class[]{});
PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{});
PARAMS_DEF.put(Operation.GETECPOLICIES, new Class[]{});
PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class});
}

View File

@ -526,6 +526,14 @@ public InputStream run() throws Exception {
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECPOLICIES: {
FSOperations.FSGetErasureCodingPolicies command =
new FSOperations.FSGetErasureCodingPolicies();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GET_BLOCK_LOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;

View File

@ -65,6 +65,7 @@ public class HttpFSServerMetrics {
private @Metric MutableCounterLong opsStat;
private @Metric MutableCounterLong opsCheckAccess;
private @Metric MutableCounterLong opsStatus;
private @Metric MutableCounterLong opsAllECPolicies;
private final MetricsRegistry registry = new MetricsRegistry("httpfsserver");
private final String name;
@ -165,4 +166,8 @@ public long getOpsStat() {
public void incrOpsStatus() {
opsStatus.incr();
}
public void incrOpsAllECPolicies() {
opsAllECPolicies.incr();
}
}

View File

@ -52,6 +52,7 @@
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@ -1217,7 +1218,7 @@ protected enum Operation {
FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST,
GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY,
SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS,
GETFILELINKSTATUS, GETSTATUS
GETFILELINKSTATUS, GETSTATUS, GETECPOLICIES
}
private void operation(Operation op) throws Exception {
@ -1366,8 +1367,10 @@ private void operation(Operation op) throws Exception {
case GETSTATUS:
testGetStatus();
break;
case GETECPOLICIES:
testGetAllEEPolicies();
break;
}
}
@Parameterized.Parameters
@ -2111,6 +2114,41 @@ private void testGetStatus() throws Exception {
}
}
private void testGetAllEEPolicies() throws Exception {
if (isLocalFS()) {
// do not test the getAllEEPolicies for local FS.
return;
}
final Path path = new Path("/foo");
FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf());
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs =
(DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf());
FileSystem httpFs = this.getHttpFSFileSystem();
Collection<ErasureCodingPolicyInfo> dfsAllErasureCodingPolicies =
dfs.getAllErasureCodingPolicies();
Collection<ErasureCodingPolicyInfo> diffErasureCodingPolicies = null;
if (httpFs instanceof HttpFSFileSystem) {
HttpFSFileSystem httpFS = (HttpFSFileSystem) httpFs;
diffErasureCodingPolicies = httpFS.getAllErasureCodingPolicies();
} else if (httpFs instanceof WebHdfsFileSystem) {
WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) httpFs;
diffErasureCodingPolicies = webHdfsFileSystem.getAllErasureCodingPolicies();
} else {
Assert.fail(fs.getClass().getSimpleName() +
" is not of type HttpFSFileSystem or WebHdfsFileSystem");
}
//Validate erasureCodingPolicyInfos are the same as DistributedFileSystem
assertEquals(dfsAllErasureCodingPolicies.size(), diffErasureCodingPolicies.size());
assertTrue(dfsAllErasureCodingPolicies.containsAll(diffErasureCodingPolicies));
} else {
Assert.fail(fs.getClass().getSimpleName() + " is not of type DistributedFileSystem.");
}
}
private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing,
SnapshotDiffReportListing dfsDiffReportListing) {
Assert.assertEquals(diffReportListing.getCreateList().size(),