Revert "HDFS-11156. Add new op GETFILEBLOCKLOCATIONS to WebHDFS REST API. Contributed by Weiwei Yang"

This reverts commit c7ff34f8dc.
This commit is contained in:
Andrew Wang 2016-12-05 23:08:49 -08:00
parent b2a3d6c519
commit 08a7253bc0
6 changed files with 4 additions and 151 deletions

View File

@ -22,7 +22,6 @@
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.FileChecksum;
@ -589,35 +588,4 @@ static LocatedBlocks toLocatedBlocks(
lastLocatedBlock, isLastBlockComplete, null, null);
}
/** Convert a Json map to BlockLocation. **/
static BlockLocation toBlockLocation(Map<String, Object> m)
throws IOException{
long length = ((Number) m.get("length")).longValue();
long offset = ((Number) m.get("offset")).longValue();
boolean corrupt = Boolean.
getBoolean(m.get("corrupt").toString());
String[] storageIds = toStringArray(getList(m, "storageIds"));
String[] cachedHosts = toStringArray(getList(m, "cachedHosts"));
String[] hosts = toStringArray(getList(m, "hosts"));
String[] names = toStringArray(getList(m, "names"));
String[] topologyPaths = toStringArray(getList(m, "topologyPaths"));
StorageType[] storageTypes = toStorageTypeArray(
getList(m, "storageTypes"));
return new BlockLocation(names, hosts, cachedHosts,
topologyPaths, storageIds, storageTypes,
offset, length, corrupt);
}
static String[] toStringArray(List<?> list) {
if (list == null) {
return null;
} else {
final String[] array = new String[list.size()];
int i = 0;
for (Object object : list) {
array[i++] = object.toString();
}
return array;
}
}
}

View File

@ -1610,20 +1610,13 @@ public BlockLocation[] getFileBlockLocations(final Path p,
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_BLOCK_LOCATIONS);
final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
return new FsPathResponseRunner<BlockLocation[]>(op, p,
new OffsetParam(offset), new LengthParam(length)) {
@Override
@SuppressWarnings("unchecked")
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
List<?> list = JsonUtilClient.getList(json, "BlockLocations");
BlockLocation[] locations = new BlockLocation[list.size()];
for(int i=0; i<locations.length; i++) {
BlockLocation bl = JsonUtilClient.
toBlockLocation((Map<String, Object>) list.get(i));
locations[i] = bl;
}
return locations;
return DFSUtilClient.locatedBlocks2Locations(
JsonUtilClient.toLocatedBlocks(json));
}
}.run();
}

View File

@ -33,18 +33,8 @@ public enum Op implements HttpOpParam.Op {
GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
/**
* GET_BLOCK_LOCATIONS is a private/stable API op. It returns a
* {@link org.apache.hadoop.hdfs.protocol.LocatedBlocks}
* json object.
*/
/** GET_BLOCK_LOCATIONS is a private unstable op. */
GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
/**
* GETFILEBLOCKLOCATIONS is the public op that complies with
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations}
* interface.
*/
GETFILEBLOCKLOCATIONS(false, HttpURLConnection.HTTP_OK),
GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
GETXATTRS(false, HttpURLConnection.HTTP_OK),
GETTRASHROOT(false, HttpURLConnection.HTTP_OK),

View File

@ -54,7 +54,6 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -976,22 +975,6 @@ private Response get(
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
case GETFILEBLOCKLOCATIONS:
{
final long offsetValue = offset.getValue();
final Long lengthValue = length.getValue();
try (final FileSystem fs = FileSystem.get(conf != null ?
conf : new Configuration())) {
BlockLocation[] locations = fs.getFileBlockLocations(
new org.apache.hadoop.fs.Path(fullpath),
offsetValue,
lengthValue != null? lengthValue: Long.MAX_VALUE);
final String js = JsonUtil.toJsonString("BlockLocations",
JsonUtil.toJsonArray(locations));
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
case GET_BLOCK_LOCATIONS:
{
final long offsetValue = offset.getValue();

View File

@ -436,34 +436,4 @@ public static String toJsonString(Object obj) throws IOException {
return MAPPER.writeValueAsString(obj);
}
public static Object[] toJsonArray(BlockLocation[] locations)
throws IOException {
if(locations == null) {
return null;
}
Object[] blockLocations = new Object[locations.length];
for(int i=0; i<locations.length; i++) {
blockLocations[i] = toJsonMap(locations[i]);
}
return blockLocations;
}
public static Map<String, Object> toJsonMap(
final BlockLocation blockLocation) throws IOException {
if (blockLocation == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("length", blockLocation.getLength());
m.put("offset", blockLocation.getOffset());
m.put("corrupt", blockLocation.isCorrupt());
m.put("storageTypes", toJsonArray(blockLocation.getStorageTypes()));
m.put("storageIds", blockLocation.getStorageIds());
m.put("cachedHosts", blockLocation.getCachedHosts());
m.put("hosts", blockLocation.getHosts());
m.put("names", blockLocation.getNames());
m.put("topologyPaths", blockLocation.getTopologyPaths());
return m;
}
}

View File

@ -37,7 +37,6 @@
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Map;
import java.util.Random;
import org.apache.commons.io.IOUtils;
@ -89,8 +88,6 @@
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.type.MapType;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.doReturn;
@ -855,46 +852,6 @@ public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception{
Assert.assertTrue(storageTypes != null && storageTypes.length > 0 &&
storageTypes[0] == StorageType.DISK);
}
// Query webhdfs REST API to get block locations
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
URL url = new URL("http", addr.getHostString(), addr.getPort(),
WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS");
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url);
String response = getResponse(url, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response);
// Expected output from rest API
// { "BlockLoactions" : [{Block_Loation_Json}, ...] }
ObjectMapper mapper = new ObjectMapper();
MapType jsonType = mapper.getTypeFactory().constructMapType(
Map.class,
String.class,
BlockLocation[].class);
Map<String, BlockLocation[]> jsonMap = mapper.readValue(response,
jsonType);
BlockLocation[] array = jsonMap.get("BlockLocations");
for(int i=0; i<locations.length; i++) {
BlockLocation raw = locations[i];
BlockLocation rest = array[i];
Assert.assertEquals(raw.getLength(),
rest.getLength());
Assert.assertEquals(raw.getOffset(),
rest.getOffset());
Assert.assertArrayEquals(raw.getCachedHosts(),
rest.getCachedHosts());
Assert.assertArrayEquals(raw.getHosts(),
rest.getHosts());
Assert.assertArrayEquals(raw.getNames(),
rest.getNames());
Assert.assertArrayEquals(raw.getStorageIds(),
rest.getStorageIds());
Assert.assertArrayEquals(raw.getTopologyPaths(),
rest.getTopologyPaths());
Assert.assertArrayEquals(raw.getStorageTypes(),
rest.getStorageTypes());
}
} finally {
if (cluster != null) {
cluster.shutdown();
@ -902,14 +859,6 @@ public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception{
}
}
private static String getResponse(URL url, String httpRequestType)
throws IOException {
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(httpRequestType);
conn.setInstanceFollowRedirects(false);
return IOUtils.toString(conn.getInputStream());
}
private WebHdfsFileSystem createWebHDFSAsTestUser(final Configuration conf,
final URI uri, final String userName) throws Exception {