HDFS-13878. HttpFS: Implement GETSNAPSHOTTABLEDIRECTORYLIST. Contributed by Siyao Meng.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
Siyao Meng 2018-10-11 15:01:50 -07:00 committed by Wei-Chiu Chuang
parent 3532aa3886
commit 6dcfef79af
6 changed files with 165 additions and 2 deletions

View File

@ -46,6 +46,7 @@
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtilClient; import org.apache.hadoop.hdfs.web.JsonUtilClient;
import org.apache.hadoop.lib.wsrs.EnumSetParam; import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -234,7 +235,8 @@ public enum Operation {
SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST), SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT), ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT),
CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE), CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET); RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET),
GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET);
private String httpMethod; private String httpMethod;
@ -1482,4 +1484,16 @@ public SnapshotDiffReport getSnapshotDiffReport(Path path,
return JsonUtilClient.toSnapshotDiffReport(json); return JsonUtilClient.toSnapshotDiffReport(json);
} }
public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList()
throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, Operation.GETSNAPSHOTTABLEDIRECTORYLIST.toString());
HttpURLConnection conn = getConnection(
Operation.GETSNAPSHOTTABLEDIRECTORYLIST.getMethod(),
params, new Path(getUri().toString(), "/"), true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
return JsonUtilClient.toSnapshottableDirectoryList(json);
}
} }

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.lib.service.FileSystemAccess;
@ -1701,4 +1702,38 @@ public String execute(FileSystem fs) throws IOException {
} }
} }
} }
/**
* Executor that performs a getSnapshottableDirListing operation.
*/
@InterfaceAudience.Private
public static class FSGetSnapshottableDirListing implements
FileSystemAccess.FileSystemExecutor<String> {
/**
* Creates a getSnapshottableDirListing executor.
*/
public FSGetSnapshottableDirListing() {
}
/**
* Executes the filesystem operation.
* @param fs filesystem instance to use.
* @return A JSON string of all snapshottable directories.
* @throws IOException thrown if an IO error occurred.
*/
@Override
public String execute(FileSystem fs) throws IOException {
SnapshottableDirectoryStatus[] sds = null;
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) fs;
sds = dfs.getSnapshottableDirListing();
} else {
throw new UnsupportedOperationException("getSnapshottableDirListing is "
+ "not supported for HttpFs on " + fs.getClass()
+ ". Please check your fs.defaultFS configuration");
}
return JsonUtil.toJsonString(sds);
}
}
} }

View File

@ -113,6 +113,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.GETSNAPSHOTDIFF, PARAMS_DEF.put(Operation.GETSNAPSHOTDIFF,
new Class[] {OldSnapshotNameParam.class, new Class[] {OldSnapshotNameParam.class,
SnapshotNameParam.class}); SnapshotNameParam.class});
PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {});
} }
public HttpFSParametersProvider() { public HttpFSParametersProvider() {

View File

@ -379,6 +379,14 @@ public InputStream run() throws Exception {
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break; break;
} }
case GETSNAPSHOTTABLEDIRECTORYLIST: {
FSOperations.FSGetSnapshottableDirListing command =
new FSOperations.FSGetSnapshottableDirListing();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
default: { default: {
throw new IOException( throw new IOException(
MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));

View File

@ -41,6 +41,8 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -1071,7 +1073,7 @@ protected enum Operation {
GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING,
CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION, ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION,
FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST
} }
private void operation(Operation op) throws Exception { private void operation(Operation op) throws Exception {
@ -1185,6 +1187,9 @@ private void operation(Operation op) throws Exception {
testGetSnapshotDiff(); testGetSnapshotDiff();
testGetSnapshotDiffIllegalParam(); testGetSnapshotDiffIllegalParam();
break; break;
case GET_SNAPSHOTTABLE_DIRECTORY_LIST:
testGetSnapshottableDirListing();
break;
} }
} }
@ -1529,4 +1534,51 @@ private void testGetSnapshotDiffIllegalParam() throws Exception {
fs.delete(path, true); fs.delete(path, true);
} }
} }
private void verifyGetSnapshottableDirListing(
FileSystem fs, DistributedFileSystem dfs) throws Exception {
// Get snapshottable directory list
SnapshottableDirectoryStatus[] sds = null;
if (fs instanceof HttpFSFileSystem) {
HttpFSFileSystem httpFS = (HttpFSFileSystem) fs;
sds = httpFS.getSnapshottableDirectoryList();
} else if (fs instanceof WebHdfsFileSystem) {
WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs;
sds = webHdfsFileSystem.getSnapshottableDirectoryList();
} else {
Assert.fail(fs.getClass().getSimpleName() +
" doesn't support getSnapshottableDirListing");
}
// Verify result with DFS
SnapshottableDirectoryStatus[] dfssds = dfs.getSnapshottableDirListing();
Assert.assertEquals(JsonUtil.toJsonString(sds),
JsonUtil.toJsonString(dfssds));
}
private void testGetSnapshottableDirListing() throws Exception {
if (!this.isLocalFS()) {
FileSystem fs = this.getHttpFSFileSystem();
// Create directories with snapshot allowed
Path path1 = new Path("/tmp/tmp-snap-dirlist-test-1");
DistributedFileSystem dfs = (DistributedFileSystem)
FileSystem.get(path1.toUri(), this.getProxiedFSConf());
// Verify response when there is no snapshottable directory
verifyGetSnapshottableDirListing(fs, dfs);
createSnapshotTestsPreconditions(path1);
Assert.assertTrue(fs.getFileStatus(path1).isSnapshotEnabled());
// Verify response when there is one snapshottable directory
verifyGetSnapshottableDirListing(fs, dfs);
Path path2 = new Path("/tmp/tmp-snap-dirlist-test-2");
createSnapshotTestsPreconditions(path2);
Assert.assertTrue(fs.getFileStatus(path2).isSnapshotEnabled());
// Verify response when there are two snapshottable directories
verifyGetSnapshottableDirListing(fs, dfs);
// Clean up and verify
fs.delete(path2, true);
verifyGetSnapshottableDirListing(fs, dfs);
fs.delete(path1, true);
verifyGetSnapshottableDirListing(fs, dfs);
}
}
} }

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider; import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator; import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
@ -1400,4 +1401,56 @@ public void testGetSnapshotDiffIllegalParam() throws Exception {
// Clean up // Clean up
dfs.delete(path, true); dfs.delete(path, true);
} }
private void verifyGetSnapshottableDirectoryList(DistributedFileSystem dfs)
throws Exception {
// Send a request
HttpURLConnection conn = sendRequestToHttpFSServer("/",
"GETSNAPSHOTTABLEDIRECTORYLIST", "");
// Should return HTTP_OK
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
// Verify the content of diff with DFS API.
SnapshottableDirectoryStatus[] dfsDirLst = dfs.getSnapshottableDirListing();
Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshottableDirectoryList() throws Exception {
createHttpFSServer(false, false);
// Create test directories
String pathStr1 = "/tmp/tmp-snap-dirlist-test-1";
createDirWithHttp(pathStr1, "700", null);
Path path1 = new Path(pathStr1);
String pathStr2 = "/tmp/tmp-snap-dirlist-test-2";
createDirWithHttp(pathStr2, "700", null);
Path path2 = new Path(pathStr2);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path1.toUri(), TestHdfsHelper.getHdfsConf());
// Verify response when there is no snapshottable directory
verifyGetSnapshottableDirectoryList(dfs);
// Enable snapshot for path1
dfs.allowSnapshot(path1);
Assert.assertTrue(dfs.getFileStatus(path1).isSnapshotEnabled());
// Verify response when there is one snapshottable directory
verifyGetSnapshottableDirectoryList(dfs);
// Enable snapshot for path2
dfs.allowSnapshot(path2);
Assert.assertTrue(dfs.getFileStatus(path2).isSnapshotEnabled());
// Verify response when there are two snapshottable directories
verifyGetSnapshottableDirectoryList(dfs);
// Clean up and verify
dfs.delete(path2, true);
verifyGetSnapshottableDirectoryList(dfs);
dfs.delete(path1, true);
verifyGetSnapshottableDirectoryList(dfs);
}
} }