HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2015-04-23 17:33:05 -07:00
parent 0b3f8957a8
commit bcf89ddc7d
22 changed files with 198 additions and 163 deletions

View File

@ -60,7 +60,7 @@ public static RetryPolicy getDefaultRetryPolicy(
boolean defaultRetryPolicyEnabled, boolean defaultRetryPolicyEnabled,
String retryPolicySpecKey, String retryPolicySpecKey,
String defaultRetryPolicySpec, String defaultRetryPolicySpec,
final Class<? extends Exception> remoteExceptionToRetry final String remoteExceptionToRetry
) { ) {
final RetryPolicy multipleLinearRandomRetry = final RetryPolicy multipleLinearRandomRetry =
@ -94,7 +94,7 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
final RetryPolicy p; final RetryPolicy p;
if (e instanceof RemoteException) { if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e; final RemoteException re = (RemoteException)e;
p = remoteExceptionToRetry.getName().equals(re.getClassName())? p = remoteExceptionToRetry.equals(re.getClassName())?
multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL; multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) { } else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry; p = multipleLinearRandomRetry;

View File

@ -19,10 +19,17 @@
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -31,6 +38,7 @@
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Map; import java.util.Map;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
@ -39,6 +47,13 @@
public class DFSUtilClient { public class DFSUtilClient {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
DFSUtilClient.class); DFSUtilClient.class);
/**
* Converts a string to a byte array using UTF8 encoding.
*/
public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8);
}
/** /**
* Converts a byte array to a string using UTF8 encoding. * Converts a byte array to a string using UTF8 encoding.
*/ */
@ -113,6 +128,62 @@ public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresse
} }
} }
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(
List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/** /**
* Decode a specific range of bytes of the given byte array to a string * Decode a specific range of bytes of the given byte array to a string
* using UTF8. * using UTF8.
@ -234,4 +305,42 @@ private static String getConfValue(String defaultValue, String keySuffix,
} }
return value; return value;
} }
/**
* Whether the pathname is valid. Currently prohibits relative paths,
* names which contain a ":" or "//", or other non-canonical paths.
*/
public static boolean isValidName(String src) {
// Path must be absolute.
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.contains(":")) ||
(element.contains("/"))) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(".reserved")
&& components[2].equals(".inodes")) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
}
} }

View File

@ -26,6 +26,7 @@ public interface HdfsClientConfigKeys {
long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024; long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
String DFS_REPLICATION_KEY = "dfs.replication"; String DFS_REPLICATION_KEY = "dfs.replication";
short DFS_REPLICATION_DEFAULT = 3; short DFS_REPLICATION_DEFAULT = 3;
String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$"; String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";

View File

@ -38,4 +38,8 @@ public interface HdfsConstantsClient {
* URI. * URI.
*/ */
String HA_DT_SERVICE_PREFIX = "ha-"; String HA_DT_SERVICE_PREFIX = "ha-";
// The name of the SafeModeException. FileSystem should retry if it sees
// the below exception in RPC
String SAFEMODE_EXCEPTION_CLASS_NAME = "org.apache.hadoop.hdfs.server" +
".namenode.SafeModeException";
} }

View File

@ -29,7 +29,7 @@
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@ -110,7 +110,7 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
final String localName = (String) m.get("pathSuffix"); final String localName = (String) m.get("pathSuffix");
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type")); final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
: DFSUtil.string2Bytes((String) m.get("symlink")); : DFSUtilClient.string2Bytes((String) m.get("symlink"));
final long len = ((Number) m.get("length")).longValue(); final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner"); final String owner = (String) m.get("owner");
@ -130,7 +130,8 @@ static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
blockSize, mTime, aTime, permission, owner, group, blockSize, mTime, aTime, permission, owner, group,
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null, symlink, DFSUtilClient.string2Bytes(localName),
fileId, childrenNum, null,
storagePolicy); storagePolicy);
} }

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
public class SWebHdfsFileSystem extends WebHdfsFileSystem { public class SWebHdfsFileSystem extends WebHdfsFileSystem {
@ -39,6 +39,6 @@ protected Text getTokenKind() {
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; return HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
} }
} }

View File

@ -174,7 +174,7 @@ public URLConnection openConnection(URL url, boolean isSpnego)
/** /**
* Sets timeout parameters on the given URLConnection. * Sets timeout parameters on the given URLConnection.
* *
* @param connection * @param connection
* URLConnection to set * URLConnection to set
* @param socketTimeout * @param socketTimeout

View File

@ -56,14 +56,12 @@
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op; import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -145,8 +143,8 @@ public synchronized void initialize(URI uri, Configuration conf
setConf(conf); setConf(conf);
/** set user pattern based on configuration file */ /** set user pattern based on configuration file */
UserParam.setUserPattern(conf.get( UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
connectionFactory = URLConnectionFactory connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf); .newDefaultURLConnectionFactory(conf);
@ -173,7 +171,7 @@ public synchronized void initialize(URI uri, Configuration conf
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY,
HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT,
SafeModeException.class); HdfsConstantsClient.SAFEMODE_EXCEPTION_CLASS_NAME);
} else { } else {
int maxFailoverAttempts = conf.getInt( int maxFailoverAttempts = conf.getInt(
@ -224,7 +222,7 @@ protected synchronized Token<?> getDelegationToken() throws IOException {
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("Using UGI token: " + token); LOG.debug("Using UGI token: " + token);
} }
canRefreshDelegationToken = false; canRefreshDelegationToken = false;
} else { } else {
token = getDelegationToken(null); token = getDelegationToken(null);
if (token != null) { if (token != null) {
@ -256,14 +254,14 @@ synchronized boolean replaceExpiredDelegationToken() throws IOException {
@Override @Override
protected int getDefaultPort() { protected int getDefaultPort() {
return DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; return HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
} }
@Override @Override
public URI getUri() { public URI getUri() {
return this.uri; return this.uri;
} }
@Override @Override
protected URI canonicalizeUri(URI uri) { protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort()); return NetUtils.getCanonicalUri(uri, getDefaultPort());
@ -287,8 +285,8 @@ public synchronized Path getWorkingDirectory() {
@Override @Override
public synchronized void setWorkingDirectory(final Path dir) { public synchronized void setWorkingDirectory(final Path dir) {
String result = makeAbsolute(dir).toUri().getPath(); String result = makeAbsolute(dir).toUri().getPath();
if (!DFSUtil.isValidName(result)) { if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " + throw new IllegalArgumentException("Invalid DFS directory name " +
result); result);
} }
workingDir = makeAbsolute(dir); workingDir = makeAbsolute(dir);
@ -367,10 +365,10 @@ private Path makeAbsolute(Path f) {
/** /**
* Covert an exception to an IOException. * Covert an exception to an IOException.
* *
* For a non-IOException, wrap it with IOException. * For a non-IOException, wrap it with IOException.
* For a RemoteException, unwrap it. * For a RemoteException, unwrap it.
* For an IOException which is not a RemoteException, return it. * For an IOException which is not a RemoteException, return it.
*/ */
private static IOException toIOException(Exception e) { private static IOException toIOException(Exception e) {
if (!(e instanceof IOException)) { if (!(e instanceof IOException)) {
@ -413,9 +411,9 @@ private URL getNamenodeURL(String path, String query) throws IOException {
} }
return url; return url;
} }
Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException { Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException {
List<Param<?,?>> authParams = Lists.newArrayList(); List<Param<?,?>> authParams = Lists.newArrayList();
// Skip adding delegation token for token operations because these // Skip adding delegation token for token operations because these
// operations require authentication. // operations require authentication.
Token<?> token = null; Token<?> token = null;
@ -494,11 +492,11 @@ public T run() throws IOException {
/** /**
* Two-step requests redirected to a DN * Two-step requests redirected to a DN
* *
* Create/Append: * Create/Append:
* Step 1) Submit a Http request with neither auto-redirect nor data. * Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 2) Submit another Http request with the URL from the Location header with data. * Step 2) Submit another Http request with the URL from the Location header with data.
* *
* The reason of having two-step create/append is for preventing clients to * The reason of having two-step create/append is for preventing clients to
* send out the data before the redirect. This issue is addressed by the * send out the data before the redirect. This issue is addressed by the
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3. * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
@ -506,7 +504,7 @@ public T run() throws IOException {
* and Java 6 http client), which do not correctly implement "Expect: * and Java 6 http client), which do not correctly implement "Expect:
* 100-continue". The two-step create/append is a temporary workaround for * 100-continue". The two-step create/append is a temporary workaround for
* the software library bugs. * the software library bugs.
* *
* Open/Checksum * Open/Checksum
* Also implements two-step connects for other operations redirected to * Also implements two-step connects for other operations redirected to
* a DN such as open and checksum * a DN such as open and checksum
@ -515,7 +513,7 @@ private HttpURLConnection connect(URL url) throws IOException {
//redirect hostname and port //redirect hostname and port
String redirectHost = null; String redirectHost = null;
// resolve redirects for a DN operation unless already resolved // resolve redirects for a DN operation unless already resolved
if (op.getRedirect() && !redirected) { if (op.getRedirect() && !redirected) {
final HttpOpParam.Op redirectOp = final HttpOpParam.Op redirectOp =
@ -545,7 +543,7 @@ private HttpURLConnection connect(URL url) throws IOException {
} }
} }
throw ioe; throw ioe;
} }
} }
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url) private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@ -557,7 +555,7 @@ private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
conn.setInstanceFollowRedirects(false); conn.setInstanceFollowRedirects(false);
switch (op.getType()) { switch (op.getType()) {
// if not sending a message body for a POST or PUT operation, need // if not sending a message body for a POST or PUT operation, need
// to ensure the server/proxy knows this // to ensure the server/proxy knows this
case POST: case POST:
case PUT: { case PUT: {
conn.setDoOutput(true); conn.setDoOutput(true);
@ -665,21 +663,21 @@ private void shouldRetry(final IOException ioe, final int retry
abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> { abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> {
private final Path fspath; private final Path fspath;
private final Param<?,?>[] parameters; private final Param<?,?>[] parameters;
AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath, AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath,
Param<?,?>... parameters) { Param<?,?>... parameters) {
super(op, false); super(op, false);
this.fspath = fspath; this.fspath = fspath;
this.parameters = parameters; this.parameters = parameters;
} }
AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters, AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) { final Path fspath) {
super(op, false); super(op, false);
this.fspath = fspath; this.fspath = fspath;
this.parameters = parameters; this.parameters = parameters;
} }
@Override @Override
protected URL getUrl() throws IOException { protected URL getUrl() throws IOException {
if (excludeDatanodes.getValue() != null) { if (excludeDatanodes.getValue() != null) {
@ -700,7 +698,7 @@ class FsPathRunner extends AbstractFsPathRunner<Void> {
FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) { FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters); super(op, fspath, parameters);
} }
@Override @Override
Void getResponse(HttpURLConnection conn) throws IOException { Void getResponse(HttpURLConnection conn) throws IOException {
return null; return null;
@ -715,12 +713,12 @@ abstract class FsPathResponseRunner<T> extends AbstractFsPathRunner<T> {
Param<?,?>... parameters) { Param<?,?>... parameters) {
super(op, fspath, parameters); super(op, fspath, parameters);
} }
FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters, FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
final Path fspath) { final Path fspath) {
super(op, parameters, fspath); super(op, parameters, fspath);
} }
@Override @Override
final T getResponse(HttpURLConnection conn) throws IOException { final T getResponse(HttpURLConnection conn) throws IOException {
try { try {
@ -743,7 +741,7 @@ final T getResponse(HttpURLConnection conn) throws IOException {
conn.disconnect(); conn.disconnect();
} }
} }
abstract T decodeResponse(Map<?,?> json) throws IOException; abstract T decodeResponse(Map<?,?> json) throws IOException;
} }
@ -754,7 +752,7 @@ class FsPathBooleanRunner extends FsPathResponseRunner<Boolean> {
FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) { FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) {
super(op, fspath, parameters); super(op, fspath, parameters);
} }
@Override @Override
Boolean decodeResponse(Map<?,?> json) throws IOException { Boolean decodeResponse(Map<?,?> json) throws IOException {
return (Boolean)json.get("boolean"); return (Boolean)json.get("boolean");
@ -766,13 +764,13 @@ Boolean decodeResponse(Map<?,?> json) throws IOException {
*/ */
class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> { class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> {
private final int bufferSize; private final int bufferSize;
FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize, FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize,
Param<?,?>... parameters) { Param<?,?>... parameters) {
super(op, fspath, parameters); super(op, fspath, parameters);
this.bufferSize = bufferSize; this.bufferSize = bufferSize;
} }
@Override @Override
FSDataOutputStream getResponse(final HttpURLConnection conn) FSDataOutputStream getResponse(final HttpURLConnection conn)
throws IOException { throws IOException {
@ -804,7 +802,7 @@ HttpURLConnection getResponse(final HttpURLConnection conn)
return conn; return conn;
} }
} }
/** /**
* Used by open() which tracks the resolved url itself * Used by open() which tracks the resolved url itself
*/ */
@ -918,26 +916,26 @@ public void rename(final Path src, final Path dst,
new RenameOptionSetParam(options) new RenameOptionSetParam(options)
).run(); ).run();
} }
@Override @Override
public void setXAttr(Path p, String name, byte[] value, public void setXAttr(Path p, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException { EnumSet<XAttrSetFlag> flag) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETXATTR; final HttpOpParam.Op op = PutOpParam.Op.SETXATTR;
if (value != null) { if (value != null) {
new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam( new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam(
XAttrCodec.encodeValue(value, XAttrCodec.HEX)), XAttrCodec.encodeValue(value, XAttrCodec.HEX)),
new XAttrSetFlagParam(flag)).run(); new XAttrSetFlagParam(flag)).run();
} else { } else {
new FsPathRunner(op, p, new XAttrNameParam(name), new FsPathRunner(op, p, new XAttrNameParam(name),
new XAttrSetFlagParam(flag)).run(); new XAttrSetFlagParam(flag)).run();
} }
} }
@Override @Override
public byte[] getXAttr(Path p, final String name) throws IOException { public byte[] getXAttr(Path p, final String name) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name), return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name),
new XAttrEncodingParam(XAttrCodec.HEX)) { new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override @Override
byte[] decodeResponse(Map<?, ?> json) throws IOException { byte[] decodeResponse(Map<?, ?> json) throws IOException {
@ -945,11 +943,11 @@ byte[] decodeResponse(Map<?, ?> json) throws IOException {
} }
}.run(); }.run();
} }
@Override @Override
public Map<String, byte[]> getXAttrs(Path p) throws IOException { public Map<String, byte[]> getXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, p, return new FsPathResponseRunner<Map<String, byte[]>>(op, p,
new XAttrEncodingParam(XAttrCodec.HEX)) { new XAttrEncodingParam(XAttrCodec.HEX)) {
@Override @Override
Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException { Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
@ -957,18 +955,18 @@ Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
} }
}.run(); }.run();
} }
@Override @Override
public Map<String, byte[]> getXAttrs(Path p, final List<String> names) public Map<String, byte[]> getXAttrs(Path p, final List<String> names)
throws IOException { throws IOException {
Preconditions.checkArgument(names != null && !names.isEmpty(), Preconditions.checkArgument(names != null && !names.isEmpty(),
"XAttr names cannot be null or empty."); "XAttr names cannot be null or empty.");
Param<?,?>[] parameters = new Param<?,?>[names.size() + 1]; Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
for (int i = 0; i < parameters.length - 1; i++) { for (int i = 0; i < parameters.length - 1; i++) {
parameters[i] = new XAttrNameParam(names.get(i)); parameters[i] = new XAttrNameParam(names.get(i));
} }
parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX); parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) { return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
@Override @Override
@ -977,7 +975,7 @@ Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
} }
}.run(); }.run();
} }
@Override @Override
public List<String> listXAttrs(Path p) throws IOException { public List<String> listXAttrs(Path p) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS; final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
@ -1057,7 +1055,7 @@ public void setAcl(final Path p, final List<AclEntry> aclSpec)
} }
@Override @Override
public Path createSnapshot(final Path path, final String snapshotName) public Path createSnapshot(final Path path, final String snapshotName)
throws IOException { throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT; final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
@ -1111,14 +1109,14 @@ public void setTimes(final Path p, final long mtime, final long atime
@Override @Override
public long getDefaultBlockSize() { public long getDefaultBlockSize() {
return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
} }
@Override @Override
public short getDefaultReplication() { public short getDefaultReplication() {
return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT); HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT);
} }
@Override @Override
@ -1228,7 +1226,7 @@ protected HttpURLConnection connect(final long offset,
final URL offsetUrl = offset == 0L? url final URL offsetUrl = offset == 0L? url
: new URL(url + "&" + new OffsetParam(offset)); : new URL(url + "&" + new OffsetParam(offset));
return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run(); return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run();
} }
} }
private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "="; private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
@ -1359,7 +1357,7 @@ public synchronized void cancelDelegationToken(final Token<?> token
new TokenArgumentParam(token.encodeToUrlString()) new TokenArgumentParam(token.encodeToUrlString())
).run(); ).run();
} }
@Override @Override
public BlockLocation[] getFileBlockLocations(final FileStatus status, public BlockLocation[] getFileBlockLocations(final FileStatus status,
final long offset, final long length) throws IOException { final long offset, final long length) throws IOException {
@ -1370,7 +1368,7 @@ public BlockLocation[] getFileBlockLocations(final FileStatus status,
} }
@Override @Override
public BlockLocation[] getFileBlockLocations(final Path p, public BlockLocation[] getFileBlockLocations(final Path p,
final long offset, final long length) throws IOException { final long offset, final long length) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
@ -1379,7 +1377,7 @@ public BlockLocation[] getFileBlockLocations(final Path p,
new OffsetParam(offset), new LengthParam(length)) { new OffsetParam(offset), new LengthParam(length)) {
@Override @Override
BlockLocation[] decodeResponse(Map<?,?> json) throws IOException { BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
return DFSUtil.locatedBlocks2Locations( return DFSUtilClient.locatedBlocks2Locations(
JsonUtilClient.toLocatedBlocks(json)); JsonUtilClient.toLocatedBlocks(json));
} }
}.run(); }.run();
@ -1408,7 +1406,7 @@ ContentSummary decodeResponse(Map<?,?> json) {
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException { ) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) { return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
@Override @Override

View File

@ -467,6 +467,8 @@ Release 2.8.0 - UNRELEASED
HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma HDFS-8215. Refactor NamenodeFsck#check method. (Takanobu Asanuma
via szetszwo) via szetszwo)
HDFS-8052. Move WebHdfsFileSystem into hadoop-hdfs-client. (wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -48,7 +48,6 @@
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span; import org.apache.htrace.Span;
import org.apache.htrace.Trace; import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope; import org.apache.htrace.TraceScope;
@ -296,7 +295,7 @@ static BlockStorageLocation[] convertToVolumeBlockLocations(
List<LocatedBlock> blocks, List<LocatedBlock> blocks,
Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException { Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
// Construct the final return value of VolumeBlockLocation[] // Construct the final return value of VolumeBlockLocation[]
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks); BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
List<BlockStorageLocation> volumeBlockLocs = List<BlockStorageLocation> volumeBlockLocs =
new ArrayList<BlockStorageLocation>(locations.length); new ArrayList<BlockStorageLocation>(locations.length);
for (int i = 0; i < locations.length; i++) { for (int i = 0; i < locations.length; i++) {

View File

@ -917,7 +917,7 @@ public BlockLocation[] getBlockLocations(String src, long start,
TraceScope scope = getPathTraceScope("getBlockLocations", src); TraceScope scope = getPathTraceScope("getBlockLocations", src);
try { try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length); LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks); BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length]; HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
for (int i = 0; i < locations.length; i++) { for (int i = 0; i < locations.length; i++) {
hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i)); hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));

View File

@ -164,7 +164,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
*/ */
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
"org.apache.hadoop.hdfs.web.AuthFilter".toString(); "org.apache.hadoop.hdfs.web.AuthFilter".toString();
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern"; @Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_KEY =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY;
@Deprecated
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT = public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT; HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled"; public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

View File

@ -228,37 +228,7 @@ public boolean match(InetSocketAddress s) {
* names which contain a ":" or "//", or other non-canonical paths. * names which contain a ":" or "//", or other non-canonical paths.
*/ */
public static boolean isValidName(String src) { public static boolean isValidName(String src) {
// Path must be absolute. return DFSUtilClient.isValidName(src);
if (!src.startsWith(Path.SEPARATOR)) {
return false;
}
// Check for ".." "." ":" "/"
String[] components = StringUtils.split(src, '/');
for (int i = 0; i < components.length; i++) {
String element = components[i];
if (element.equals(".") ||
(element.indexOf(":") >= 0) ||
(element.indexOf("/") >= 0)) {
return false;
}
// ".." is allowed in path starting with /.reserved/.inodes
if (element.equals("..")) {
if (components.length > 4
&& components[1].equals(FSDirectory.DOT_RESERVED_STRING)
&& components[2].equals(FSDirectory.DOT_INODES_STRING)) {
continue;
}
return false;
}
// The string may start or end with a /, but not have
// "//" in the middle.
if (element.isEmpty() && i != components.length - 1 &&
i != 0) {
return false;
}
}
return true;
} }
/** /**
@ -329,7 +299,7 @@ public static String bytes2String(byte[] bytes, int offset, int length) {
* Converts a string to a byte array using UTF8 encoding. * Converts a string to a byte array using UTF8 encoding.
*/ */
public static byte[] string2Bytes(String str) { public static byte[] string2Bytes(String str) {
return str.getBytes(Charsets.UTF_8); return DFSUtilClient.string2Bytes(str);
} }
/** /**
@ -475,61 +445,6 @@ public static byte[][] bytes2byteArray(byte[] bytes,
} }
return result; return result;
} }
/**
* Convert a LocatedBlocks to BlockLocations[]
* @param blocks a LocatedBlocks
* @return an array of BlockLocations
*/
public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
return locatedBlocks2Locations(blocks.getLocatedBlocks());
}
/**
* Convert a List<LocatedBlock> to BlockLocation[]
* @param blocks A List<LocatedBlock> to be converted
* @return converted array of BlockLocation
*/
public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
if (blocks == null) {
return new BlockLocation[0];
}
int nrBlocks = blocks.size();
BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
if (nrBlocks == 0) {
return blkLocations;
}
int idx = 0;
for (LocatedBlock blk : blocks) {
assert idx < nrBlocks : "Incorrect index";
DatanodeInfo[] locations = blk.getLocations();
String[] hosts = new String[locations.length];
String[] xferAddrs = new String[locations.length];
String[] racks = new String[locations.length];
for (int hCnt = 0; hCnt < locations.length; hCnt++) {
hosts[hCnt] = locations[hCnt].getHostName();
xferAddrs[hCnt] = locations[hCnt].getXferAddr();
NodeBase node = new NodeBase(xferAddrs[hCnt],
locations[hCnt].getNetworkLocation());
racks[hCnt] = node.toString();
}
DatanodeInfo[] cachedLocations = blk.getCachedLocations();
String[] cachedHosts = new String[cachedLocations.length];
for (int i=0; i<cachedLocations.length; i++) {
cachedHosts[i] = cachedLocations[i].getHostName();
}
blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
racks,
blk.getStartOffset(),
blk.getBlockSize(),
blk.isCorrupt());
idx++;
}
return blkLocations;
}
/** /**
* Return configuration key of format key.suffix1.suffix2...suffixN * Return configuration key of format key.suffix1.suffix2...suffixN

View File

@ -404,7 +404,7 @@ private static ClientProtocol createNNProxyWithClientProtocol(
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
SafeModeException.class); SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class); final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy( ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient;
/** /**
* Interface that represents the over the wire information * Interface that represents the over the wire information
@ -78,6 +78,6 @@ public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
isSymlink() ? new Path(getSymlink()) : null, isSymlink() ? new Path(getSymlink()) : null,
(getFullPath(path)).makeQualified( (getFullPath(path)).makeQualified(
defaultUri, null), // fully-qualify path defaultUri, null), // fully-qualify path
DFSUtil.locatedBlocks2Locations(getBlockLocations())); DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
} }
} }

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -69,8 +70,8 @@ public class NameNodeHttpServer {
private void initWebHdfs(Configuration conf) throws IOException { private void initWebHdfs(Configuration conf) throws IOException {
// set user pattern based on configuration file // set user pattern based on configuration file
UserParam.setUserPattern(conf.get( UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
// add authentication filter for webhdfs // add authentication filter for webhdfs
final String className = conf.get( final String className = conf.get(

View File

@ -112,7 +112,7 @@ public void testLocatedBlocks2Locations() {
List<LocatedBlock> ls = Arrays.asList(l1, l2); List<LocatedBlock> ls = Arrays.asList(l1, l2);
LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null); LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs); BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length, assertTrue("expected 2 blocks but got " + bs.length,
bs.length == 2); bs.length == 2);
@ -128,7 +128,7 @@ public void testLocatedBlocks2Locations() {
corruptCount == 1); corruptCount == 1);
// test an empty location // test an empty location
bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks()); bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0, bs.length); assertEquals(0, bs.length);
} }

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -282,7 +283,8 @@ public Void run() throws IOException, URISyntaxException {
@Test(timeout=300000) @Test(timeout=300000)
public void testNumericalUserName() throws Exception { public void testNumericalUserName() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf(); final Configuration conf = WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$"); conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
"._-]*[$]?$");
final MiniDFSCluster cluster = final MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try { try {