HDFS-16503. Should verify whether the path name is valid in the WebHDFS (#4067). Contributed by tomscut.

Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
litao 2022-03-21 15:10:51 +08:00 committed by GitHub
parent 62d59e516e
commit e5549a2a68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 68 additions and 1 deletions

View File

@ -478,7 +478,8 @@ private Path makeAbsolute(Path f) {
return f.isAbsolute()? f: new Path(workingDir, f); return f.isAbsolute()? f: new Path(workingDir, f);
} }
static Map<?, ?> jsonParse(final HttpURLConnection c, @VisibleForTesting
public static Map<?, ?> jsonParse(final HttpURLConnection c,
final boolean useErrorStream) throws IOException { final boolean useErrorStream) throws IOException {
if (c.getContentLength() == 0) { if (c.getContentLength() == 0) {
return null; return null;

View File

@ -19,6 +19,8 @@
import static org.apache.hadoop.util.StringUtils.getTrimmedStringCollection; import static org.apache.hadoop.util.StringUtils.getTrimmedStringCollection;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -419,6 +421,9 @@ private URI redirectURI(final Router router, final UserGroupInformation ugi,
final DoAsParam doAsUser, final String path, final HttpOpParam.Op op, final DoAsParam doAsUser, final String path, final HttpOpParam.Op op,
final long openOffset, final String excludeDatanodes, final long openOffset, final String excludeDatanodes,
final Param<?, ?>... parameters) throws URISyntaxException, IOException { final Param<?, ?>... parameters) throws URISyntaxException, IOException {
if (!DFSUtil.isValidName(path)) {
throw new InvalidPathException(path);
}
final DatanodeInfo dn = final DatanodeInfo dn =
chooseDatanode(router, path, op, openOffset, excludeDatanodes); chooseDatanode(router, path, op, openOffset, excludeDatanodes);

View File

@ -23,9 +23,12 @@
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.URL; import java.net.URL;
import java.util.Collections; import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -34,6 +37,7 @@
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder; import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
@ -145,4 +149,24 @@ public void testGetNsFromDataNodeNetworkLocation() {
assertEquals("", RouterWebHdfsMethods assertEquals("", RouterWebHdfsMethods
.getNsFromDataNodeNetworkLocation("whatever-rack-info1")); .getNsFromDataNodeNetworkLocation("whatever-rack-info1"));
} }
@Test
public void testWebHdfsCreateWithInvalidPath() throws Exception {
// A path name include duplicated slashes.
String path = "//tmp//file";
assertResponse(path);
}
private void assertResponse(String path) throws IOException {
URL url = new URL(getUri(path));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
// Assert response code.
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, conn.getResponseCode());
// Assert exception.
Map<?, ?> response = WebHdfsFileSystem.jsonParse(conn, true);
assertEquals("InvalidPathException",
((LinkedHashMap) response.get("RemoteException")).get("exception"));
conn.disconnect();
}
} }

View File

@ -55,6 +55,7 @@
import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -401,6 +402,9 @@ private URI redirectURI(ResponseBuilder rb, final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset, final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes, final long blocksize, final String excludeDatanodes,
final Param<?, ?>... parameters) throws URISyntaxException, IOException { final Param<?, ?>... parameters) throws URISyntaxException, IOException {
if (!DFSUtil.isValidName(path)) {
throw new InvalidPathException(path);
}
final DatanodeInfo dn; final DatanodeInfo dn;
final NamenodeProtocols np = getRPCServer(namenode); final NamenodeProtocols np = getRPCServer(namenode);
HdfsFileStatus status = null; HdfsFileStatus status = null;

View File

@ -55,6 +55,7 @@
import java.util.Collection; import java.util.Collection;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map; import java.util.Map;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
import java.util.Random; import java.util.Random;
@ -522,6 +523,38 @@ public void testCreateWithNoDN() throws Exception {
} }
} }
@Test
public void testWebHdfsCreateWithInvalidPath() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
// A path name include duplicated slashes.
String path = "//tmp//file";
assertResponse(path);
}
private String getUri(String path) {
final String user = System.getProperty("user.name");
final StringBuilder uri = new StringBuilder(cluster.getHttpUri(0));
uri.append("/webhdfs/v1").
append(path).
append("?op=CREATE").
append("&user.name=" + user);
return uri.toString();
}
private void assertResponse(String path) throws IOException {
URL url = new URL(getUri(path));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
// Assert response code.
assertEquals(HttpURLConnection.HTTP_BAD_REQUEST, conn.getResponseCode());
// Assert exception.
Map<?, ?> response = WebHdfsFileSystem.jsonParse(conn, true);
assertEquals("InvalidPathException",
((LinkedHashMap) response.get("RemoteException")).get("exception"));
conn.disconnect();
}
/** /**
* Test allow and disallow snapshot through WebHdfs. Verifying webhdfs with * Test allow and disallow snapshot through WebHdfs. Verifying webhdfs with
* Distributed filesystem methods. * Distributed filesystem methods.