HDFS-12117. HttpFS does not seem to support SNAPSHOT related methods for WebHDFS REST Interface. Contributed by Wellington Chevreuil.
This commit is contained in:
parent
9a3c2379ef
commit
8a4bff02c1
@ -124,6 +124,8 @@ public class HttpFSFileSystem extends FileSystem
|
|||||||
public static final String POLICY_NAME_PARAM = "storagepolicy";
|
public static final String POLICY_NAME_PARAM = "storagepolicy";
|
||||||
public static final String OFFSET_PARAM = "offset";
|
public static final String OFFSET_PARAM = "offset";
|
||||||
public static final String LENGTH_PARAM = "length";
|
public static final String LENGTH_PARAM = "length";
|
||||||
|
public static final String SNAPSHOT_NAME_PARAM = "snapshotname";
|
||||||
|
public static final String OLD_SNAPSHOT_NAME_PARAM = "oldsnapshotname";
|
||||||
|
|
||||||
public static final Short DEFAULT_PERMISSION = 0755;
|
public static final Short DEFAULT_PERMISSION = 0755;
|
||||||
public static final String ACLSPEC_DEFAULT = "";
|
public static final String ACLSPEC_DEFAULT = "";
|
||||||
@ -144,6 +146,8 @@ public class HttpFSFileSystem extends FileSystem
|
|||||||
|
|
||||||
public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
|
public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
|
||||||
|
|
||||||
|
public static final String SNAPSHOT_JSON = "Path";
|
||||||
|
|
||||||
public enum FILE_TYPE {
|
public enum FILE_TYPE {
|
||||||
FILE, DIRECTORY, SYMLINK;
|
FILE, DIRECTORY, SYMLINK;
|
||||||
|
|
||||||
@ -229,7 +233,9 @@ public enum Operation {
|
|||||||
DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
|
DELETE(HTTP_DELETE), SETXATTR(HTTP_PUT), GETXATTRS(HTTP_GET),
|
||||||
REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
|
REMOVEXATTR(HTTP_PUT), LISTXATTRS(HTTP_GET), LISTSTATUS_BATCH(HTTP_GET),
|
||||||
GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
|
GETALLSTORAGEPOLICY(HTTP_GET), GETSTORAGEPOLICY(HTTP_GET),
|
||||||
SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST);
|
SETSTORAGEPOLICY(HTTP_PUT), UNSETSTORAGEPOLICY(HTTP_POST),
|
||||||
|
CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE),
|
||||||
|
RENAMESNAPSHOT(HTTP_PUT);
|
||||||
|
|
||||||
private String httpMethod;
|
private String httpMethod;
|
||||||
|
|
||||||
@ -1434,4 +1440,43 @@ public void unsetStoragePolicy(Path src) throws IOException {
|
|||||||
Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true);
|
Operation.UNSETSTORAGEPOLICY.getMethod(), params, src, true);
|
||||||
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final Path createSnapshot(Path path, String snapshotName)
|
||||||
|
throws IOException {
|
||||||
|
Map<String, String> params = new HashMap<String, String>();
|
||||||
|
params.put(OP_PARAM, Operation.CREATESNAPSHOT.toString());
|
||||||
|
if (snapshotName != null) {
|
||||||
|
params.put(SNAPSHOT_NAME_PARAM, snapshotName);
|
||||||
|
}
|
||||||
|
HttpURLConnection conn = getConnection(Operation.CREATESNAPSHOT.getMethod(),
|
||||||
|
params, path, true);
|
||||||
|
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||||
|
JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
|
||||||
|
return new Path((String) json.get(SNAPSHOT_JSON));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void renameSnapshot(Path path, String snapshotOldName,
|
||||||
|
String snapshotNewName) throws IOException {
|
||||||
|
Map<String, String> params = new HashMap<String, String>();
|
||||||
|
params.put(OP_PARAM, Operation.RENAMESNAPSHOT.toString());
|
||||||
|
params.put(SNAPSHOT_NAME_PARAM, snapshotNewName);
|
||||||
|
params.put(OLD_SNAPSHOT_NAME_PARAM, snapshotOldName);
|
||||||
|
HttpURLConnection conn = getConnection(Operation.RENAMESNAPSHOT.getMethod(),
|
||||||
|
params, path, true);
|
||||||
|
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteSnapshot(Path path, String snapshotName)
|
||||||
|
throws IOException {
|
||||||
|
Map<String, String> params = new HashMap<String, String>();
|
||||||
|
params.put(OP_PARAM, Operation.DELETESNAPSHOT.toString());
|
||||||
|
params.put(SNAPSHOT_NAME_PARAM, snapshotName);
|
||||||
|
HttpURLConnection conn = getConnection(Operation.DELETESNAPSHOT.getMethod(),
|
||||||
|
params, path, true);
|
||||||
|
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1492,4 +1492,109 @@ public Map execute(FileSystem fs) throws IOException {
|
|||||||
return JsonUtil.toJsonMap(locations);
|
return JsonUtil.toJsonMap(locations);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executor that performs a createSnapshot FileSystemAccess operation.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public static class FSCreateSnapshot implements
|
||||||
|
FileSystemAccess.FileSystemExecutor<String> {
|
||||||
|
|
||||||
|
private Path path;
|
||||||
|
private String snapshotName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a createSnapshot executor.
|
||||||
|
* @param path directory path to be snapshotted.
|
||||||
|
* @param snapshotName the snapshot name.
|
||||||
|
*/
|
||||||
|
public FSCreateSnapshot(String path, String snapshotName) {
|
||||||
|
this.path = new Path(path);
|
||||||
|
this.snapshotName = snapshotName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes the filesystem operation.
|
||||||
|
* @param fs filesystem instance to use.
|
||||||
|
* @return <code>Path</code> the complete path for newly created snapshot
|
||||||
|
* @throws IOException thrown if an IO error occurred.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public String execute(FileSystem fs) throws IOException {
|
||||||
|
Path snapshotPath = fs.createSnapshot(path, snapshotName);
|
||||||
|
JSONObject json = toJSON(HttpFSFileSystem.HOME_DIR_JSON,
|
||||||
|
snapshotPath.toString());
|
||||||
|
return json.toJSONString().replaceAll("\\\\", "");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executor that performs a deleteSnapshot FileSystemAccess operation.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public static class FSDeleteSnapshot implements
|
||||||
|
FileSystemAccess.FileSystemExecutor<Void> {
|
||||||
|
|
||||||
|
private Path path;
|
||||||
|
private String snapshotName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a deleteSnapshot executor.
|
||||||
|
* @param path path for the snapshot to be deleted.
|
||||||
|
* @param snapshotName snapshot name.
|
||||||
|
*/
|
||||||
|
public FSDeleteSnapshot(String path, String snapshotName) {
|
||||||
|
this.path = new Path(path);
|
||||||
|
this.snapshotName = snapshotName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes the filesystem operation.
|
||||||
|
* @param fs filesystem instance to use.
|
||||||
|
* @return void
|
||||||
|
* @throws IOException thrown if an IO error occurred.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Void execute(FileSystem fs) throws IOException {
|
||||||
|
fs.deleteSnapshot(path, snapshotName);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executor that performs a renameSnapshot FileSystemAccess operation.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public static class FSRenameSnapshot implements
|
||||||
|
FileSystemAccess.FileSystemExecutor<Void> {
|
||||||
|
private Path path;
|
||||||
|
private String oldSnapshotName;
|
||||||
|
private String snapshotName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a renameSnapshot executor.
|
||||||
|
* @param path directory path of the snapshot to be renamed.
|
||||||
|
* @param oldSnapshotName current snapshot name.
|
||||||
|
* @param snapshotName new snapshot name to be set.
|
||||||
|
*/
|
||||||
|
public FSRenameSnapshot(String path, String oldSnapshotName,
|
||||||
|
String snapshotName) {
|
||||||
|
this.path = new Path(path);
|
||||||
|
this.oldSnapshotName = oldSnapshotName;
|
||||||
|
this.snapshotName = snapshotName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes the filesystem operation.
|
||||||
|
* @param fs filesystem instance to use.
|
||||||
|
* @return void
|
||||||
|
* @throws IOException thrown if an IO error occurred.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public Void execute(FileSystem fs) throws IOException {
|
||||||
|
fs.renameSnapshot(path, oldSnapshotName, snapshotName);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,13 @@ public class HttpFSParametersProvider extends ParametersProvider {
|
|||||||
PARAMS_DEF.put(Operation.SETSTORAGEPOLICY,
|
PARAMS_DEF.put(Operation.SETSTORAGEPOLICY,
|
||||||
new Class[] {PolicyNameParam.class});
|
new Class[] {PolicyNameParam.class});
|
||||||
PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {});
|
PARAMS_DEF.put(Operation.UNSETSTORAGEPOLICY, new Class[] {});
|
||||||
|
PARAMS_DEF.put(Operation.CREATESNAPSHOT,
|
||||||
|
new Class[] {SnapshotNameParam.class});
|
||||||
|
PARAMS_DEF.put(Operation.DELETESNAPSHOT,
|
||||||
|
new Class[] {SnapshotNameParam.class});
|
||||||
|
PARAMS_DEF.put(Operation.RENAMESNAPSHOT,
|
||||||
|
new Class[] {OldSnapshotNameParam.class,
|
||||||
|
SnapshotNameParam.class});
|
||||||
}
|
}
|
||||||
|
|
||||||
public HttpFSParametersProvider() {
|
public HttpFSParametersProvider() {
|
||||||
@ -565,4 +572,42 @@ public PolicyNameParam() {
|
|||||||
super(NAME, null);
|
super(NAME, null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class for SnapshotName parameter.
|
||||||
|
*/
|
||||||
|
public static class SnapshotNameParam extends StringParam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameter name.
|
||||||
|
*/
|
||||||
|
public static final String NAME = HttpFSFileSystem.SNAPSHOT_NAME_PARAM;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*/
|
||||||
|
public SnapshotNameParam() {
|
||||||
|
super(NAME, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class for OldSnapshotName parameter.
|
||||||
|
*/
|
||||||
|
public static class OldSnapshotNameParam extends StringParam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parameter name.
|
||||||
|
*/
|
||||||
|
public static final String NAME = HttpFSFileSystem.OLD_SNAPSHOT_NAME_PARAM;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*/
|
||||||
|
public OldSnapshotNameParam() {
|
||||||
|
super(NAME, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
|
||||||
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
|
||||||
@ -45,6 +46,7 @@
|
|||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SourcesParam;
|
||||||
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.SnapshotNameParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrEncodingParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrNameParam;
|
||||||
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
|
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.XAttrSetFlagParam;
|
||||||
@ -430,6 +432,16 @@ public Response delete(@PathParam("path") String path,
|
|||||||
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
|
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case DELETESNAPSHOT: {
|
||||||
|
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||||
|
SnapshotNameParam.class);
|
||||||
|
FSOperations.FSDeleteSnapshot command =
|
||||||
|
new FSOperations.FSDeleteSnapshot(path, snapshotName);
|
||||||
|
fsExecute(user, command);
|
||||||
|
AUDIT_LOG.info("[{}] deleted snapshot [{}]", path, snapshotName);
|
||||||
|
response = Response.ok().build();
|
||||||
|
break;
|
||||||
|
}
|
||||||
default: {
|
default: {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
|
MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
|
||||||
@ -602,6 +614,16 @@ public Response put(InputStream is,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case CREATESNAPSHOT: {
|
||||||
|
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||||
|
SnapshotNameParam.class);
|
||||||
|
FSOperations.FSCreateSnapshot command =
|
||||||
|
new FSOperations.FSCreateSnapshot(path, snapshotName);
|
||||||
|
String json = fsExecute(user, command);
|
||||||
|
AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName);
|
||||||
|
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
|
||||||
|
break;
|
||||||
|
}
|
||||||
case SETXATTR: {
|
case SETXATTR: {
|
||||||
String xattrName = params.get(XAttrNameParam.NAME,
|
String xattrName = params.get(XAttrNameParam.NAME,
|
||||||
XAttrNameParam.class);
|
XAttrNameParam.class);
|
||||||
@ -617,6 +639,20 @@ public Response put(InputStream is,
|
|||||||
response = Response.ok().build();
|
response = Response.ok().build();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case RENAMESNAPSHOT: {
|
||||||
|
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
|
||||||
|
OldSnapshotNameParam.class);
|
||||||
|
String snapshotName = params.get(SnapshotNameParam.NAME,
|
||||||
|
SnapshotNameParam.class);
|
||||||
|
FSOperations.FSRenameSnapshot command =
|
||||||
|
new FSOperations.FSRenameSnapshot(path, oldSnapshotName,
|
||||||
|
snapshotName);
|
||||||
|
fsExecute(user, command);
|
||||||
|
AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path,
|
||||||
|
oldSnapshotName, snapshotName);
|
||||||
|
response = Response.ok().build();
|
||||||
|
break;
|
||||||
|
}
|
||||||
case REMOVEXATTR: {
|
case REMOVEXATTR: {
|
||||||
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
|
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
|
||||||
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
|
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
import org.apache.hadoop.hdfs.AppendTestUtil;
|
import org.apache.hadoop.hdfs.AppendTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
@ -74,6 +75,7 @@
|
|||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
@ -1038,7 +1040,8 @@ protected enum Operation {
|
|||||||
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
|
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
|
||||||
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
|
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
|
||||||
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
|
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
|
||||||
GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS
|
GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, GETFILEBLOCKLOCATIONS,
|
||||||
|
CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT
|
||||||
}
|
}
|
||||||
|
|
||||||
private void operation(Operation op) throws Exception {
|
private void operation(Operation op) throws Exception {
|
||||||
@ -1130,6 +1133,15 @@ private void operation(Operation op) throws Exception {
|
|||||||
case GETFILEBLOCKLOCATIONS:
|
case GETFILEBLOCKLOCATIONS:
|
||||||
testGetFileBlockLocations();
|
testGetFileBlockLocations();
|
||||||
break;
|
break;
|
||||||
|
case CREATE_SNAPSHOT:
|
||||||
|
testCreateSnapshot();
|
||||||
|
break;
|
||||||
|
case RENAME_SNAPSHOT:
|
||||||
|
testRenameSnapshot();
|
||||||
|
break;
|
||||||
|
case DELETE_SNAPSHOT:
|
||||||
|
testDeleteSnapshot();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1257,4 +1269,98 @@ private void verifyBlockLocations(BlockLocation[] locations1,
|
|||||||
location2.getTopologyPaths());
|
location2.getTopologyPaths());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void testCreateSnapshot(String snapshotName) throws Exception {
|
||||||
|
if (!this.isLocalFS()) {
|
||||||
|
Path snapshottablePath = new Path("/tmp/tmp-snap-test");
|
||||||
|
createSnapshotTestsPreconditions(snapshottablePath);
|
||||||
|
//Now get the FileSystem instance that's being tested
|
||||||
|
FileSystem fs = this.getHttpFSFileSystem();
|
||||||
|
if (snapshotName == null) {
|
||||||
|
fs.createSnapshot(snapshottablePath);
|
||||||
|
} else {
|
||||||
|
fs.createSnapshot(snapshottablePath, snapshotName);
|
||||||
|
}
|
||||||
|
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
|
||||||
|
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
|
||||||
|
assertTrue("Should have exactly one snapshot.",
|
||||||
|
snapshotItems.length == 1);
|
||||||
|
String resultingSnapName = snapshotItems[0].getPath().getName();
|
||||||
|
if (snapshotName == null) {
|
||||||
|
assertTrue("Snapshot auto generated name not matching pattern",
|
||||||
|
Pattern.matches("(s)(\\d{8})(-)(\\d{6})(\\.)(\\d{3})",
|
||||||
|
resultingSnapName));
|
||||||
|
} else {
|
||||||
|
assertTrue("Snapshot name is not same as passed name.",
|
||||||
|
snapshotName.equals(resultingSnapName));
|
||||||
|
}
|
||||||
|
cleanSnapshotTests(snapshottablePath, resultingSnapName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testCreateSnapshot() throws Exception {
|
||||||
|
testCreateSnapshot(null);
|
||||||
|
testCreateSnapshot("snap-with-name");
|
||||||
|
}
|
||||||
|
|
||||||
|
private void createSnapshotTestsPreconditions(Path snapshottablePath)
|
||||||
|
throws Exception {
|
||||||
|
//Needed to get a DistributedFileSystem instance, in order to
|
||||||
|
//call allowSnapshot on the newly created directory
|
||||||
|
DistributedFileSystem distributedFs = (DistributedFileSystem)
|
||||||
|
FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
|
||||||
|
distributedFs.mkdirs(snapshottablePath);
|
||||||
|
distributedFs.allowSnapshot(snapshottablePath);
|
||||||
|
Path subdirPath = new Path("/tmp/tmp-snap-test/subdir");
|
||||||
|
distributedFs.mkdirs(subdirPath);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private void cleanSnapshotTests(Path snapshottablePath,
|
||||||
|
String resultingSnapName) throws Exception {
|
||||||
|
DistributedFileSystem distributedFs = (DistributedFileSystem)
|
||||||
|
FileSystem.get(snapshottablePath.toUri(), this.getProxiedFSConf());
|
||||||
|
distributedFs.deleteSnapshot(snapshottablePath, resultingSnapName);
|
||||||
|
distributedFs.delete(snapshottablePath, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testRenameSnapshot() throws Exception {
|
||||||
|
if (!this.isLocalFS()) {
|
||||||
|
Path snapshottablePath = new Path("/tmp/tmp-snap-test");
|
||||||
|
createSnapshotTestsPreconditions(snapshottablePath);
|
||||||
|
//Now get the FileSystem instance that's being tested
|
||||||
|
FileSystem fs = this.getHttpFSFileSystem();
|
||||||
|
fs.createSnapshot(snapshottablePath, "snap-to-rename");
|
||||||
|
fs.renameSnapshot(snapshottablePath, "snap-to-rename",
|
||||||
|
"snap-new-name");
|
||||||
|
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
|
||||||
|
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
|
||||||
|
assertTrue("Should have exactly one snapshot.",
|
||||||
|
snapshotItems.length == 1);
|
||||||
|
String resultingSnapName = snapshotItems[0].getPath().getName();
|
||||||
|
assertTrue("Snapshot name is not same as passed name.",
|
||||||
|
"snap-new-name".equals(resultingSnapName));
|
||||||
|
cleanSnapshotTests(snapshottablePath, resultingSnapName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testDeleteSnapshot() throws Exception {
|
||||||
|
if (!this.isLocalFS()) {
|
||||||
|
Path snapshottablePath = new Path("/tmp/tmp-snap-test");
|
||||||
|
createSnapshotTestsPreconditions(snapshottablePath);
|
||||||
|
//Now get the FileSystem instance that's being tested
|
||||||
|
FileSystem fs = this.getHttpFSFileSystem();
|
||||||
|
fs.createSnapshot(snapshottablePath, "snap-to-delete");
|
||||||
|
Path snapshotsDir = new Path("/tmp/tmp-snap-test/.snapshot");
|
||||||
|
FileStatus[] snapshotItems = fs.listStatus(snapshotsDir);
|
||||||
|
assertTrue("Should have exactly one snapshot.",
|
||||||
|
snapshotItems.length == 1);
|
||||||
|
fs.deleteSnapshot(snapshottablePath, "snap-to-delete");
|
||||||
|
snapshotItems = fs.listStatus(snapshotsDir);
|
||||||
|
assertTrue("There should be no snapshot anymore.",
|
||||||
|
snapshotItems.length == 0);
|
||||||
|
fs.delete(snapshottablePath, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
package org.apache.hadoop.fs.http.server;
|
package org.apache.hadoop.fs.http.server;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
|
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
|
||||||
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
|
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
|
||||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
|
||||||
@ -71,6 +72,7 @@
|
|||||||
|
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -465,6 +467,20 @@ private String getStatus(String filename, String command)
|
|||||||
*/
|
*/
|
||||||
private void putCmd(String filename, String command,
|
private void putCmd(String filename, String command,
|
||||||
String params) throws Exception {
|
String params) throws Exception {
|
||||||
|
Assert.assertEquals(HttpURLConnection.HTTP_OK,
|
||||||
|
putCmdWithReturn(filename, command, params).getResponseCode());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* General-purpose http PUT command to the httpfs server,
|
||||||
|
* which returns relted HttpURLConnection instance.
|
||||||
|
* @param filename The file to operate upon
|
||||||
|
* @param command The command to perform (SETACL, etc)
|
||||||
|
* @param params Parameters, like "aclspec=..."
|
||||||
|
* @return HttpURLConnection the HttpURLConnection instance for the given PUT
|
||||||
|
*/
|
||||||
|
private HttpURLConnection putCmdWithReturn(String filename, String command,
|
||||||
|
String params) throws Exception {
|
||||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||||
// Remove leading / from filename
|
// Remove leading / from filename
|
||||||
if (filename.charAt(0) == '/') {
|
if (filename.charAt(0) == '/') {
|
||||||
@ -478,7 +494,7 @@ private void putCmd(String filename, String command,
|
|||||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||||
conn.setRequestMethod("PUT");
|
conn.setRequestMethod("PUT");
|
||||||
conn.connect();
|
conn.connect();
|
||||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
return conn;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -882,6 +898,108 @@ public void testDelegationTokenOperations() throws Exception {
|
|||||||
delegationTokenCommonTests(false);
|
delegationTokenCommonTests(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private HttpURLConnection snapshotTestPreconditions(String httpMethod,
|
||||||
|
String snapOperation,
|
||||||
|
String additionalParams)
|
||||||
|
throws Exception {
|
||||||
|
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||||
|
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
|
||||||
|
"/webhdfs/v1/tmp/tmp-snap-test/subdir?user.name={0}&op=MKDIRS",
|
||||||
|
user));
|
||||||
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||||
|
conn.setRequestMethod("PUT");
|
||||||
|
conn.connect();
|
||||||
|
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
|
||||||
|
//needed to make the given dir snapshottable
|
||||||
|
Path snapshottablePath = new Path("/tmp/tmp-snap-test");
|
||||||
|
DistributedFileSystem dfs =
|
||||||
|
(DistributedFileSystem) FileSystem.get(snapshottablePath.toUri(),
|
||||||
|
TestHdfsHelper.getHdfsConf());
|
||||||
|
dfs.allowSnapshot(snapshottablePath);
|
||||||
|
|
||||||
|
//Try to create snapshot passing snapshot name
|
||||||
|
url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
|
||||||
|
"/webhdfs/v1/tmp/tmp-snap-test?user.name={0}&op={1}&{2}", user,
|
||||||
|
snapOperation, additionalParams));
|
||||||
|
conn = (HttpURLConnection) url.openConnection();
|
||||||
|
conn.setRequestMethod(httpMethod);
|
||||||
|
conn.connect();
|
||||||
|
return conn;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@TestDir
|
||||||
|
@TestJetty
|
||||||
|
@TestHdfs
|
||||||
|
public void testCreateSnapshot() throws Exception {
|
||||||
|
createHttpFSServer(false, false);
|
||||||
|
final HttpURLConnection conn = snapshotTestPreconditions("PUT",
|
||||||
|
"CREATESNAPSHOT",
|
||||||
|
"snapshotname=snap-with-name");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
final BufferedReader reader =
|
||||||
|
new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||||
|
String result = reader.readLine();
|
||||||
|
//Validates if the content format is correct
|
||||||
|
Assert.assertTrue(result.
|
||||||
|
equals("{\"Path\":\"/tmp/tmp-snap-test/.snapshot/snap-with-name\"}"));
|
||||||
|
//Validates if the snapshot is properly created under .snapshot folder
|
||||||
|
result = getStatus("/tmp/tmp-snap-test/.snapshot",
|
||||||
|
"LISTSTATUS");
|
||||||
|
Assert.assertTrue(result.contains("snap-with-name"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@TestDir
|
||||||
|
@TestJetty
|
||||||
|
@TestHdfs
|
||||||
|
public void testCreateSnapshotNoSnapshotName() throws Exception {
|
||||||
|
createHttpFSServer(false, false);
|
||||||
|
final HttpURLConnection conn = snapshotTestPreconditions("PUT",
|
||||||
|
"CREATESNAPSHOT",
|
||||||
|
"");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
final BufferedReader reader = new BufferedReader(
|
||||||
|
new InputStreamReader(conn.getInputStream()));
|
||||||
|
String result = reader.readLine();
|
||||||
|
//Validates if the content format is correct
|
||||||
|
Assert.assertTrue(Pattern.matches(
|
||||||
|
"(\\{\\\"Path\\\"\\:\\\"/tmp/tmp-snap-test/.snapshot/s)" +
|
||||||
|
"(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\"\\})", result));
|
||||||
|
//Validates if the snapshot is properly created under .snapshot folder
|
||||||
|
result = getStatus("/tmp/tmp-snap-test/.snapshot",
|
||||||
|
"LISTSTATUS");
|
||||||
|
|
||||||
|
Assert.assertTrue(Pattern.matches("(.+)(\\\"pathSuffix\\\":\\\"s)" +
|
||||||
|
"(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\")(.+)",
|
||||||
|
result));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@TestDir
|
||||||
|
@TestJetty
|
||||||
|
@TestHdfs
|
||||||
|
public void testRenameSnapshot() throws Exception {
|
||||||
|
createHttpFSServer(false, false);
|
||||||
|
HttpURLConnection conn = snapshotTestPreconditions("PUT",
|
||||||
|
"CREATESNAPSHOT",
|
||||||
|
"snapshotname=snap-to-rename");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
conn = snapshotTestPreconditions("PUT",
|
||||||
|
"RENAMESNAPSHOT",
|
||||||
|
"oldsnapshotname=snap-to-rename" +
|
||||||
|
"&snapshotname=snap-renamed");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
//Validates the snapshot is properly renamed under .snapshot folder
|
||||||
|
String result = getStatus("/tmp/tmp-snap-test/.snapshot",
|
||||||
|
"LISTSTATUS");
|
||||||
|
Assert.assertTrue(result.contains("snap-renamed"));
|
||||||
|
//There should be no snapshot named snap-to-rename now
|
||||||
|
Assert.assertFalse(result.contains("snap-to-rename"));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@TestDir
|
@TestDir
|
||||||
@TestJetty
|
@TestJetty
|
||||||
@ -890,4 +1008,24 @@ public void testDelegationTokenOperationsSsl() throws Exception {
|
|||||||
createHttpFSServer(true, true);
|
createHttpFSServer(true, true);
|
||||||
delegationTokenCommonTests(true);
|
delegationTokenCommonTests(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
@TestDir
|
||||||
|
@TestJetty
|
||||||
|
@TestHdfs
|
||||||
|
public void testDeleteSnapshot() throws Exception {
|
||||||
|
createHttpFSServer(false, false);
|
||||||
|
HttpURLConnection conn = snapshotTestPreconditions("PUT",
|
||||||
|
"CREATESNAPSHOT",
|
||||||
|
"snapshotname=snap-to-delete");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
conn = snapshotTestPreconditions("DELETE",
|
||||||
|
"DELETESNAPSHOT",
|
||||||
|
"snapshotname=snap-to-delete");
|
||||||
|
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||||
|
//Validates the snapshot is not under .snapshot folder anymore
|
||||||
|
String result = getStatus("/tmp/tmp-snap-test/.snapshot",
|
||||||
|
"LISTSTATUS");
|
||||||
|
Assert.assertFalse(result.contains("snap-to-delete"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user