HDFS-13141. WebHDFS: Add support for getting snasphottable directory list. Contributed by Lokesh Jain.

This commit is contained in:
Xiaoyu Yao 2018-03-12 16:37:29 -07:00
parent 45d1b0fdcc
commit 0355ec20eb
8 changed files with 184 additions and 5 deletions

View File

@ -88,6 +88,7 @@ public enum OpType {
SET_TIMES(CommonStatisticNames.OP_SET_TIMES), SET_TIMES(CommonStatisticNames.OP_SET_TIMES),
SET_XATTR("op_set_xattr"), SET_XATTR("op_set_xattr"),
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"), GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
TRUNCATE(CommonStatisticNames.OP_TRUNCATE), TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
UNSET_STORAGE_POLICY("op_unset_storage_policy"); UNSET_STORAGE_POLICY("op_unset_storage_policy");

View File

@ -50,7 +50,32 @@ enum Flags {
HAS_ACL, HAS_ACL,
HAS_CRYPT, HAS_CRYPT,
HAS_EC, HAS_EC,
SNAPSHOT_ENABLED SNAPSHOT_ENABLED;
/**
* Generates an enum set of Flags from a set of attr flags.
* @param attr Set of attr flags
* @return EnumSet of Flags
*/
public static EnumSet<Flags> convert(Set<AttrFlags> attr) {
if (attr.isEmpty()) {
return EnumSet.noneOf(Flags.class);
}
EnumSet<Flags> flags = EnumSet.noneOf(Flags.class);
if (attr.contains(AttrFlags.HAS_ACL)) {
flags.add(Flags.HAS_ACL);
}
if (attr.contains(AttrFlags.HAS_EC)) {
flags.add(Flags.HAS_EC);
}
if (attr.contains(AttrFlags.HAS_CRYPT)) {
flags.add(Flags.HAS_CRYPT);
}
if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) {
flags.add(Flags.SNAPSHOT_ENABLED);
}
return flags;
}
} }
/** /**

View File

@ -38,6 +38,7 @@
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -64,6 +65,7 @@
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
class JsonUtilClient { class JsonUtilClient {
static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {}; static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
@ -742,4 +744,44 @@ private static byte[] toByteArray(String str) {
} }
return DFSUtilClient.string2Bytes(str); return DFSUtilClient.string2Bytes(str);
} }
public static SnapshottableDirectoryStatus[] toSnapshottableDirectoryList(
final Map<?, ?> json) {
if (json == null) {
return null;
}
List<?> list = (List<?>) json.get("SnapshottableDirectoryList");
if (list == null) {
return null;
}
SnapshottableDirectoryStatus[] statuses =
new SnapshottableDirectoryStatus[list.size()];
for (int i = 0; i < list.size(); i++) {
statuses[i] = toSnapshottableDirectoryStatus((Map<?, ?>) list.get(i));
}
return statuses;
}
private static SnapshottableDirectoryStatus toSnapshottableDirectoryStatus(
Map<?, ?> json) {
if (json == null) {
return null;
}
int snapshotNumber = getInt(json, "snapshotNumber", 0);
int snapshotQuota = getInt(json, "snapshotQuota", 0);
byte[] parentFullPath = toByteArray((String) json.get("parentFullPath"));
HdfsFileStatus dirStatus =
toFileStatus((Map<?, ?>) json.get("dirStatus"), false);
Set<FileStatus.AttrFlags> attrFlags = FileStatus
.attributes(dirStatus.hasAcl(), dirStatus.isEncrypted(),
dirStatus.isErasureCoded(), dirStatus.isSnapshotEnabled());
SnapshottableDirectoryStatus snapshottableDirectoryStatus =
new SnapshottableDirectoryStatus(dirStatus.getModificationTime(),
dirStatus.getAccessTime(), dirStatus.getPermission(),
HdfsFileStatus.Flags.convert(attrFlags), dirStatus.getOwner(),
dirStatus.getGroup(), dirStatus.getLocalNameInBytes(),
dirStatus.getFileId(), dirStatus.getChildrenNum(), snapshotNumber,
snapshotQuota, parentFullPath);
return snapshottableDirectoryStatus;
}
} }

View File

@ -99,6 +99,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@ -1358,6 +1359,19 @@ SnapshotDiffReport decodeResponse(Map<?, ?> json) {
}.run(); }.run();
} }
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
throws IOException {
storageStatistics
.incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST);
final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST;
return new FsPathResponseRunner<SnapshottableDirectoryStatus[]>(op, null) {
@Override
SnapshottableDirectoryStatus[] decodeResponse(Map<?, ?> json) {
return JsonUtilClient.toSnapshottableDirectoryList(json);
}
}.run();
}
@Override @Override
public boolean setReplication(final Path p, final short replication public boolean setReplication(final Path p, final short replication
) throws IOException { ) throws IOException {

View File

@ -48,7 +48,8 @@ public enum Op implements HttpOpParam.Op {
CHECKACCESS(false, HttpURLConnection.HTTP_OK), CHECKACCESS(false, HttpURLConnection.HTTP_OK),
LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK), LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK),
GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK), GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK); GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK);
final boolean redirect; final boolean redirect;
final int expectedHttpResponseCode; final int expectedHttpResponseCode;

View File

@ -80,6 +80,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
@ -1207,6 +1208,12 @@ private Response get(
final String js = JsonUtil.toJsonString(diffReport); final String js = JsonUtil.toJsonString(diffReport);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case GETSNAPSHOTTABLEDIRECTORYLIST: {
SnapshottableDirectoryStatus[] snapshottableDirectoryList =
cp.getSnapshottableDirListing();
final String js = JsonUtil.toJsonString(snapshottableDirectoryList);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }

View File

@ -530,4 +530,24 @@ private static Object toJsonMap(
} }
return m; return m;
} }
public static String toJsonString(
SnapshottableDirectoryStatus[] snapshottableDirectoryList) {
Object[] a = new Object[snapshottableDirectoryList.length];
for (int i = 0; i < snapshottableDirectoryList.length; i++) {
a[i] = toJsonMap(snapshottableDirectoryList[i]);
}
return toJsonString("SnapshottableDirectoryList", a);
}
private static Object toJsonMap(
SnapshottableDirectoryStatus snapshottableDirectoryStatus) {
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("snapshotNumber", snapshottableDirectoryStatus.getSnapshotNumber());
m.put("snapshotQuota", snapshottableDirectoryStatus.getSnapshotQuota());
m.put("parentFullPath", DFSUtilClient
.bytes2String(snapshottableDirectoryStatus.getParentFullPath()));
m.put("dirStatus", toJsonMap(snapshottableDirectoryStatus.getDirStatus()));
return m;
}
} }

View File

@ -628,7 +628,7 @@ public void testWebHdfsCreateSnapshot() throws Exception {
} }
/** /**
* Test snapshot deletion through WebHdfs * Test snapshot deletion through WebHdfs.
*/ */
@Test @Test
public void testWebHdfsDeleteSnapshot() throws Exception { public void testWebHdfsDeleteSnapshot() throws Exception {
@ -673,7 +673,7 @@ public void testWebHdfsDeleteSnapshot() throws Exception {
} }
/** /**
* Test snapshot diff through WebHdfs * Test snapshot diff through WebHdfs.
*/ */
@Test @Test
public void testWebHdfsSnapshotDiff() throws Exception { public void testWebHdfsSnapshotDiff() throws Exception {
@ -738,6 +738,75 @@ public void testWebHdfsSnapshotDiff() throws Exception {
} }
} }
/**
* Test snapshottable directory list through WebHdfs.
*/
@Test
public void testWebHdfsSnapshottableDirectoryList() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
dfs.mkdirs(foo);
dfs.mkdirs(bar);
dfs.allowSnapshot(foo);
dfs.allowSnapshot(bar);
Path file0 = new Path(foo, "file0");
DFSTestUtil.createFile(dfs, file0, 100, (short) 1, 0);
Path file1 = new Path(bar, "file1");
DFSTestUtil.createFile(dfs, file1, 100, (short) 1, 0);
SnapshottableDirectoryStatus[] statuses =
webHdfs.getSnapshottableDirectoryList();
SnapshottableDirectoryStatus[] dfsStatuses =
dfs.getSnapshottableDirListing();
for (int i = 0; i < dfsStatuses.length; i++) {
Assert.assertEquals(statuses[i].getSnapshotNumber(),
dfsStatuses[i].getSnapshotNumber());
Assert.assertEquals(statuses[i].getSnapshotQuota(),
dfsStatuses[i].getSnapshotQuota());
Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
dfsStatuses[i].getParentFullPath()));
Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
statuses[i].getDirStatus().getChildrenNum());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
statuses[i].getDirStatus().getModificationTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
statuses[i].getDirStatus().isDir());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
statuses[i].getDirStatus().getAccessTime());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
statuses[i].getDirStatus().getPermission());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
statuses[i].getDirStatus().getOwner());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
statuses[i].getDirStatus().getGroup());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
statuses[i].getDirStatus().getPath());
Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
statuses[i].getDirStatus().getFileId());
Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
statuses[i].getDirStatus().hasAcl());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
statuses[i].getDirStatus().isEncrypted());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
statuses[i].getDirStatus().isErasureCoded());
Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
statuses[i].getDirStatus().isSnapshotEnabled());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test @Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException { public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
@ -763,7 +832,7 @@ public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxExcepti
} }
} }
/** /**
* Test snapshot rename through WebHdfs * Test snapshot rename through WebHdfs.
*/ */
@Test @Test
public void testWebHdfsRenameSnapshot() throws Exception { public void testWebHdfsRenameSnapshot() throws Exception {