HDFS-13512. WebHdfs getFileStatus doesn't return ecPolicy. Contributed by Ajay Kumar.

This commit is contained in:
Arpit Agarwal 2018-05-16 11:28:39 -07:00
parent 55d5549087
commit 0fc988e6a3
3 changed files with 82 additions and 1 deletions

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
@ -48,6 +49,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@ -143,6 +145,19 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json,
f.add(HdfsFileStatus.Flags.HAS_EC);
}
Map<String, Object> ecPolicyObj = (Map) m.get("ecPolicyObj");
ErasureCodingPolicy ecPolicy = null;
if (ecPolicyObj != null) {
Map<String, String> extraOptions = (Map) ecPolicyObj.get("extraOptions");
ECSchema ecSchema = new ECSchema((String) ecPolicyObj.get("codecName"),
(int) ecPolicyObj.get("numDataUnits"),
(int) ecPolicyObj.get("numParityUnits"), extraOptions);
ecPolicy = new ErasureCodingPolicy((String) ecPolicyObj.get("name"),
ecSchema, (int) ecPolicyObj.get("cellSize"),
(byte) (int) ecPolicyObj.get("id"));
}
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
@ -170,6 +185,7 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json,
.fileId(fileId)
.children(childrenNum)
.storagePolicy(storagePolicy)
.ecPolicy(ecPolicy)
.build();
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.web;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
@ -135,7 +136,10 @@ private static Map<String, Object> toJsonMap(HdfsFileStatus status) {
if (status.isErasureCoded()) {
m.put("ecBit", true);
if (status.getErasureCodingPolicy() != null) {
// to maintain backward comparability
m.put("ecPolicy", status.getErasureCodingPolicy().getName());
// to re-construct HdfsFileStatus object via WebHdfs
m.put("ecPolicyObj", getEcPolicyAsMap(status.getErasureCodingPolicy()));
}
}
if (status.isSnapshotEnabled()) {
@ -152,6 +156,21 @@ private static Map<String, Object> toJsonMap(HdfsFileStatus status) {
return m;
}
private static Map<String, Object> getEcPolicyAsMap(
final ErasureCodingPolicy ecPolicy) {
/** Convert an ErasureCodingPolicy to a map. */
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
builder.put("name", ecPolicy.getName())
.put("cellSize", ecPolicy.getCellSize())
.put("numDataUnits", ecPolicy.getNumDataUnits())
.put("numParityUnits", ecPolicy.getNumParityUnits())
.put("codecName", ecPolicy.getCodecName())
.put("id", ecPolicy.getId())
.put("extraOptions", ecPolicy.getSchema().getExtraOptions());
return builder.build();
}
/** Convert an ExtendedBlock to a Json map. */
private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) {
if (extendedblock == null) {

View File

@ -23,6 +23,7 @@
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@ -40,8 +41,11 @@
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus.Flags;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@ -66,9 +70,48 @@ static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
}
@Test
public void testHdfsFileStatus() throws IOException {
public void testHdfsFileStatusWithEcPolicy() throws IOException {
final long now = Time.now();
final String parent = "/dir";
ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
final HdfsFileStatus status = new HdfsFileStatus.Builder()
.length(1001L)
.replication(3)
.blocksize(1L << 26)
.mtime(now)
.atime(now + 10)
.perm(new FsPermission((short) 0644))
.owner("user")
.group("group")
.symlink(DFSUtil.string2Bytes("bar"))
.path(DFSUtil.string2Bytes("foo"))
.fileId(HdfsConstants.GRANDFATHER_INODE_ID)
.ecPolicy(dummyEcPolicy)
.flags(EnumSet.allOf(Flags.class))
.build();
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
final HdfsFileStatus s2 =
JsonUtilClient.toFileStatus((Map<?, ?>) READER.readValue(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(status.getErasureCodingPolicy(),
s2.getErasureCodingPolicy());
Assert.assertEquals(fstatus, fs2);
}
@Test
public void testHdfsFileStatusWithoutEcPolicy() throws IOException {
final long now = Time.now();
final String parent = "/dir";
ErasureCodingPolicy dummyEcPolicy = new ErasureCodingPolicy("ecPolicy1",
new ECSchema("EcSchema", 1, 1), 1024 * 2, (byte) 1);
final HdfsFileStatus status = new HdfsFileStatus.Builder()
.length(1001L)
.replication(3)
@ -82,6 +125,8 @@ public void testHdfsFileStatus() throws IOException {
.path(DFSUtil.string2Bytes("foo"))
.fileId(HdfsConstants.GRANDFATHER_INODE_ID)
.build();
Assert.assertTrue(status.getErasureCodingPolicy() == null);
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
@ -92,6 +137,7 @@ public void testHdfsFileStatus() throws IOException {
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
Assert.assertEquals(fstatus, fs2);
}