HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-23 17:33:05 -07:00
parent 0b3f8957a8
commit bcf89ddc7d
22 changed files with 198 additions and 163 deletions

View File

@ -60,7 +60,7 @@ public static RetryPolicy getDefaultRetryPolicy(
boolean defaultRetryPolicyEnabled, boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey, String retryPolicySpecKey,
String defaultRetryPolicySpec, String defaultRetryPolicySpec,
final Class<? extends Exception> remoteExceptionToRetry final String remoteExceptionToRetry
) { ) {
final RetryPolicy multipleLinearRandomRetry = final RetryPolicy multipleLinearRandomRetry =
@ -94,7 +94,7 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
final RetryPolicy p; final RetryPolicy p;
if (e instanceof RemoteException) { if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e; final RemoteException re = (RemoteException)e;
p = remoteExceptionToRetry.getName().equals(re.getClassName())? p = remoteExceptionToRetry.equals(re.getClassName())?
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL; multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) { } else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry; p = multipleLinearRandomRetry;

View File

@ -19,10 +19,17 @@
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -31,6 +38,7 @@
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Map; import java.util.Map;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
@ -39,6 +47,13 @@
public class DFSUtilClient { public class DFSUtilClient {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
DFSUtilClient.class); DFSUtilClient.class);
/**
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
}
/** /**
* Converts a byte array to a string using UTF8 encoding. * Converts a byte array to a string using UTF8 encoding.
*/ */
@ -113,6 +128,62 @@ public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresse
} }
} }
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(
List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/** /**
* Decode a specific range of bytes of the given byte array to a string * Decode a specific range of bytes of the given byte array to a string
* using UTF8. * using UTF8.
@ -234,4 +305,42 @@ private static String getConfValue(String defaultValue, String keySuffix,
} }
return value; return value;
} }
/**
* Whether the pathname is valid. Currently prohibits relative paths,
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.contains(":")) ||
(element.contains("/"))) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(".reserved")
&& components[2].equals(".inodes")) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
}
} }

View File

@ -26,6 +26,7 @@ public interface HdfsClientConfigKeys {
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024; long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
String DFS_REPLICATION_KEY = "dfs.replication"; String DFS_REPLICATION_KEY = "dfs.replication";
short DFS_REPLICATION_DEFAULT = 3; short DFS_REPLICATION_DEFAULT = 3;
String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$"; String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";

View File

@ -38,4 +38,8 @@ public interface HdfsConstantsClient {
* URI. * URI.
*/ */
String HA_DT_SERVICE_PREFIX = "ha-"; String HA_DT_SERVICE_PREFIX = "ha-";
// The name of the SafeModeException. FileSystem should retry if it sees
// the below exception in RPC
String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
".namenode.SafeModeException";
} }

View File

@ -29,7 +29,7 @@
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@ -110,7 +110,7 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
final String localName = (String) m.get("pathSuffix"); final String localName = (String) m.get("pathSuffix");
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type")); final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
: DFSUtil.string2Bytes((String) m.get("symlink")); : DFSUtilClient.string2Bytes((String) m.get("symlink"));
final long len = ((Number) m.get("length")).longValue(); final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner"); final String owner = (String) m.get("owner");
@ -130,7 +130,8 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null, symlink, DFSUtilClient.string2Bytes(localName),
fileId, childrenNum, null,
storagePolicy); storagePolicy);
} }

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem { public class SWebHdfsFileSystem extends WebHdfsFileSystem {
@ -39,6 +39,6 @@ protected Text getTokenKind() {
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; return HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
} }
} }

View File

@ -56,14 +56,12 @@
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op; import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -145,8 +143,8 @@ public synchronized void initialize(URI uri, Configuration conf
setConf(conf); setConf(conf);
/** set user pattern based on configuration file */ /** set user pattern based on configuration file */
UserParam.setUserPattern(conf.get( UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
connectionFactory = URLConnectionFactory connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf); .newDefaultURLConnectionFactory(conf);
@ -173,7 +171,7 @@ public synchronized void initialize(URI uri, Configuration conf
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
SafeModeException.class); HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
} else { } else {
int maxFailoverAttempts = conf.getInt( int maxFailoverAttempts = conf.getInt(
@ -256,7 +254,7 @@ synchronized boolean replaceExpiredDelegationToken() throws IOException {
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; return HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
} }
@Override @Override
@ -287,7 +285,7 @@ public synchronized Path getWorkingDirectory() {
@Override @Override
public synchronized void setWorkingDirectory(final Path dir) { public synchronized void setWorkingDirectory(final Path dir) {
String result = makeAbsolute(dir).toUri().getPath(); String result = makeAbsolute(dir).toUri().getPath();
if (!DFSUtil.isValidName(result)) { if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " + throw new IllegalArgumentException("Invalid DFS directory name " +
result); result);
} }
@ -1111,14 +1109,14 @@ public void setTimes(final Path p, final long mtime, final long atime
@Override @Override
public long getDefaultBlockSize() { public long getDefaultBlockSize() {
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
} }
@Override @Override
public short getDefaultReplication() { public short getDefaultReplication() {
return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT); HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT);
} }
@Override @Override
@ -1379,7 +1377,7 @@ public BlockLocation[] getFileBlockLocations(final Path p,
new OffsetParam(offset), new LengthParam(length)) { new OffsetParam(offset), new LengthParam(length)) {
@Override @Override
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException { BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
return DFSUtil.locatedBlocks2Locations( return DFSUtilClient.locatedBlocks2Locations(
JsonUtilClient.toLocatedBlocks(json)); JsonUtilClient.toLocatedBlocks(json));
} }
}.run(); }.run();

View File

@ -467,6 +467,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
via szetszwo) via szetszwo)
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -48,7 +48,6 @@
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span; import org.apache.htrace.Span;
import org.apache.htrace.Trace; import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope; import org.apache.htrace.TraceScope;
@ -296,7 +295,7 @@ static BlockStorageLocation[] convertToVolumeBlockLocations(
List<LocatedBlock> blocks, List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException { Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[] // Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks); BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs = List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length); new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) { for (int i = 0; i < locations.length; i++) {

View File

@ -917,7 +917,7 @@ public BlockLocation[] getBlockLocations(String src, long start,
TraceScope scope = getPathTraceScope("getBlockLocations", src); TraceScope scope = getPathTraceScope("getBlockLocations", src);
try { try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length); LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks); BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length]; HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) { for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i)); hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));

View File

@ -164,7 +164,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
*/ */
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
"org.apache.hadoop.hdfs.web.AuthFilter".toString(); "org.apache.hadoop.hdfs.web.AuthFilter".toString();
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern"; @Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_KEY =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT = public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT; HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled"; public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

View File

@ -228,37 +228,7 @@ public boolean match(InetSocketAddress s) {
* names which contain a ":" or "//", or other non-canonical paths. * names which contain a ":" or "//", or other non-canonical paths.
*/ */
public static boolean isValidName(String src) { public static boolean isValidName(String src) {
// Path must be absolute. return DFSUtilClient.isValidName(src);
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.indexOf(":") >= 0) ||
(element.indexOf("/") >= 0)) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(FSDirectory.DOT_RESERVED_STRING)
&& components[2].equals(FSDirectory.DOT_INODES_STRING)) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
} }
/** /**
@ -329,7 +299,7 @@ public static String bytes2String(byte[] bytes, int offset, int length) {
* Converts a string to a byte array using UTF8 encoding. * Converts a string to a byte array using UTF8 encoding.
*/ */
public static byte[] string2Bytes(String str) { public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8); return DFSUtilClient.string2Bytes(str);
} }
/** /**
@ -476,61 +446,6 @@ public static byte[][] bytes2byteArray(byte[] bytes,
return result; return result;
} }
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/** /**
* Return configuration key of format key.suffix1.suffix2...suffixN * Return configuration key of format key.suffix1.suffix2...suffixN
*/ */

View File

@ -404,7 +404,7 @@ private static ClientProtocol createNNProxyWithClientProtocol(
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
SafeModeException.class); SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class); final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy( ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
/** /**
* Interface that represents the over the wire information * Interface that represents the over the wire information
@ -78,6 +78,6 @@ public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
isSymlink() ? new Path(getSymlink()) : null, isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified( (getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path defaultUri, null), // fully-qualify path
DFSUtil.locatedBlocks2Locations(getBlockLocations())); DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
} }
} }

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -69,8 +70,8 @@ public class NameNodeHttpServer {
private void initWebHdfs(Configuration conf) throws IOException { private void initWebHdfs(Configuration conf) throws IOException {
// set user pattern based on configuration file // set user pattern based on configuration file
UserParam.setUserPattern(conf.get( UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
// add authentication filter for webhdfs // add authentication filter for webhdfs
final String className = conf.get( final String className = conf.get(

View File

@ -112,7 +112,7 @@ public void testLocatedBlocks2Locations() {
List<LocatedBlock> ls = Arrays.asList(l1, l2); List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null); LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs); BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length, assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2); bs.length == 2);
@ -128,7 +128,7 @@ public void testLocatedBlocks2Locations() {
corruptCount == 1); corruptCount == 1);
// test an empty location // test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks()); bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length); assertEquals(0, bs.length);
} }

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -282,7 +283,8 @@ public Void run() throws IOException, URISyntaxException {
@Test(timeout=300000) @Test(timeout=300000)
public void testNumericalUserName() throws Exception { public void testNumericalUserName() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf(); final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$"); conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
"._-]*[$]?$");
final MiniDFSCluster cluster = final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try { try {