HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1327627 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
32d511065a
commit
8bda086d04
@ -170,7 +170,7 @@ private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystem
|
||||
throws IOException, FileSystemAccessException {
|
||||
String hadoopUser = getEffectiveUser(user, doAs);
|
||||
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
|
||||
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
|
||||
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
|
||||
return fsAccess.execute(hadoopUser, conf, executor);
|
||||
}
|
||||
|
||||
@ -194,7 +194,7 @@ private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystem
|
||||
private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException {
|
||||
String hadoopUser = getEffectiveUser(user, doAs);
|
||||
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
|
||||
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getDefaultConfiguration();
|
||||
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
|
||||
FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
|
||||
FileSystemReleaseFilter.setFileSystem(fs);
|
||||
return fs;
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.apache.hadoop.fs.http.server;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.lib.server.ServerException;
|
||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||
import org.apache.hadoop.lib.servlet.ServerWebApp;
|
||||
@ -29,8 +30,9 @@
|
||||
|
||||
/**
|
||||
* Bootstrap class that manages the initialization and destruction of the
|
||||
* HttpFSServer server, it is a <code>javax.servlet.ServletContextListener</code>
|
||||
* implementation that is wired in HttpFSServer's WAR <code>WEB-INF/web.xml</code>.
|
||||
* HttpFSServer server, it is a <code>javax.servlet.ServletContextListener
|
||||
* </code> implementation that is wired in HttpFSServer's WAR
|
||||
* <code>WEB-INF/web.xml</code>.
|
||||
* <p/>
|
||||
* It provides acces to the server context via the singleton {@link #get}.
|
||||
* <p/>
|
||||
@ -38,7 +40,8 @@
|
||||
* with <code>httpfs.</code>.
|
||||
*/
|
||||
public class HttpFSServerWebApp extends ServerWebApp {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HttpFSServerWebApp.class);
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HttpFSServerWebApp.class);
|
||||
|
||||
/**
|
||||
* Server name and prefix for all configuration properties.
|
||||
@ -67,8 +70,8 @@ public HttpFSServerWebApp() throws IOException {
|
||||
/**
|
||||
* Constructor used for testing purposes.
|
||||
*/
|
||||
protected HttpFSServerWebApp(String homeDir, String configDir, String logDir, String tempDir,
|
||||
Configuration config) {
|
||||
protected HttpFSServerWebApp(String homeDir, String configDir, String logDir,
|
||||
String tempDir, Configuration config) {
|
||||
super(NAME, homeDir, configDir, logDir, tempDir, config);
|
||||
}
|
||||
|
||||
@ -80,9 +83,11 @@ public HttpFSServerWebApp(String homeDir, Configuration config) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the HttpFSServer server, loads configuration and required services.
|
||||
* Initializes the HttpFSServer server, loads configuration and required
|
||||
* services.
|
||||
*
|
||||
* @throws ServerException thrown if HttpFSServer server could not be initialized.
|
||||
* @throws ServerException thrown if HttpFSServer server could not be
|
||||
* initialized.
|
||||
*/
|
||||
@Override
|
||||
public void init() throws ServerException {
|
||||
@ -93,7 +98,8 @@ public void init() throws ServerException {
|
||||
SERVER = this;
|
||||
adminGroup = getConfig().get(getPrefixedName(CONF_ADMIN_GROUP), "admin");
|
||||
LOG.info("Connects to Namenode [{}]",
|
||||
get().get(FileSystemAccess.class).getDefaultConfiguration().get("fs.default.name"));
|
||||
get().get(FileSystemAccess.class).getFileSystemConfiguration().
|
||||
get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -106,7 +112,8 @@ public void destroy() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns HttpFSServer server singleton, configuration and services are accessible through it.
|
||||
* Returns HttpFSServer server singleton, configuration and services are
|
||||
* accessible through it.
|
||||
*
|
||||
* @return the HttpFSServer server singleton.
|
||||
*/
|
||||
|
@ -37,6 +37,6 @@ public <T> T execute(String user, Configuration conf, FileSystemExecutor<T> exec
|
||||
|
||||
public void releaseFileSystem(FileSystem fs) throws IOException;
|
||||
|
||||
public Configuration getDefaultConfiguration();
|
||||
public Configuration getFileSystemConfiguration();
|
||||
|
||||
}
|
||||
|
@ -26,12 +26,14 @@ public enum ERROR implements XException.ERROR {
|
||||
H01("Service property [{0}] not defined"),
|
||||
H02("Kerberos initialization failed, {0}"),
|
||||
H03("FileSystemExecutor error, {0}"),
|
||||
H04("JobClientExecutor error, {0}"),
|
||||
H04("Invalid configuration, it has not be created by the FileSystemAccessService"),
|
||||
H05("[{0}] validation failed, {1}"),
|
||||
H06("Property [{0}] not defined in configuration object"),
|
||||
H07("[{0}] not healthy, {1}"),
|
||||
H08(""),
|
||||
H09("Invalid FileSystemAccess security mode [{0}]");
|
||||
H08("{0}"),
|
||||
H09("Invalid FileSystemAccess security mode [{0}]"),
|
||||
H10("Hadoop config directory not found [{0}]"),
|
||||
H11("Could not load Hadoop config files, {0}");
|
||||
|
||||
private String template;
|
||||
|
||||
|
@ -19,7 +19,9 @@
|
||||
package org.apache.hadoop.lib.service.hadoop;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.lib.server.BaseService;
|
||||
import org.apache.hadoop.lib.server.ServiceException;
|
||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||
@ -32,6 +34,7 @@
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
@ -54,9 +57,11 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
|
||||
|
||||
public static final String NAME_NODE_WHITELIST = "name.node.whitelist";
|
||||
|
||||
private static final String HADOOP_CONF_PREFIX = "conf:";
|
||||
public static final String HADOOP_CONF_DIR = "config.dir";
|
||||
|
||||
private static final String NAME_NODE_PROPERTY = "fs.default.name";
|
||||
private static final String[] HADOOP_CONF_FILES = {"core-site.xml", "hdfs-site.xml"};
|
||||
|
||||
private static final String FILE_SYSTEM_SERVICE_CREATED = "FileSystemAccessService.created";
|
||||
|
||||
public FileSystemAccessService() {
|
||||
super(PREFIX);
|
||||
@ -102,26 +107,40 @@ protected void init() throws ServiceException {
|
||||
throw new ServiceException(FileSystemAccessException.ERROR.H09, security);
|
||||
}
|
||||
|
||||
serviceHadoopConf = new Configuration(false);
|
||||
for (Map.Entry entry : getServiceConfig()) {
|
||||
String name = (String) entry.getKey();
|
||||
if (name.startsWith(HADOOP_CONF_PREFIX)) {
|
||||
name = name.substring(HADOOP_CONF_PREFIX.length());
|
||||
String value = (String) entry.getValue();
|
||||
serviceHadoopConf.set(name, value);
|
||||
|
||||
String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir());
|
||||
File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile();
|
||||
if (hadoopConfDir == null) {
|
||||
hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile();
|
||||
}
|
||||
if (!hadoopConfDir.exists()) {
|
||||
throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir);
|
||||
}
|
||||
try {
|
||||
serviceHadoopConf = loadHadoopConf(hadoopConfDir);
|
||||
} catch (IOException ex) {
|
||||
throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex);
|
||||
}
|
||||
setRequiredServiceHadoopConf(serviceHadoopConf);
|
||||
|
||||
LOG.debug("FileSystemAccess default configuration:");
|
||||
LOG.debug("FileSystemAccess FileSystem configuration:");
|
||||
for (Map.Entry entry : serviceHadoopConf) {
|
||||
LOG.debug(" {} = {}", entry.getKey(), entry.getValue());
|
||||
}
|
||||
setRequiredServiceHadoopConf(serviceHadoopConf);
|
||||
|
||||
nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
|
||||
}
|
||||
|
||||
private Configuration loadHadoopConf(File dir) throws IOException {
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
for (String file : HADOOP_CONF_FILES) {
|
||||
File f = new File(dir, file);
|
||||
if (f.exists()) {
|
||||
hadoopConf.addResource(new Path(f.getAbsolutePath()));
|
||||
}
|
||||
}
|
||||
return hadoopConf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postInit() throws ServiceException {
|
||||
super.postInit();
|
||||
@ -166,17 +185,6 @@ protected void setRequiredServiceHadoopConf(Configuration conf) {
|
||||
conf.set("fs.hdfs.impl.disable.cache", "true");
|
||||
}
|
||||
|
||||
protected Configuration createHadoopConf(Configuration conf) {
|
||||
Configuration hadoopConf = new Configuration();
|
||||
ConfigurationUtils.copy(serviceHadoopConf, hadoopConf);
|
||||
ConfigurationUtils.copy(conf, hadoopConf);
|
||||
return hadoopConf;
|
||||
}
|
||||
|
||||
protected Configuration createNameNodeConf(Configuration conf) {
|
||||
return createHadoopConf(conf);
|
||||
}
|
||||
|
||||
protected FileSystem createFileSystem(Configuration namenodeConf) throws IOException {
|
||||
return FileSystem.get(namenodeConf);
|
||||
}
|
||||
@ -202,16 +210,22 @@ public <T> T execute(String user, final Configuration conf, final FileSystemExec
|
||||
Check.notEmpty(user, "user");
|
||||
Check.notNull(conf, "conf");
|
||||
Check.notNull(executor, "executor");
|
||||
if (conf.get(NAME_NODE_PROPERTY) == null || conf.getTrimmed(NAME_NODE_PROPERTY).length() == 0) {
|
||||
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06, NAME_NODE_PROPERTY);
|
||||
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
|
||||
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
|
||||
}
|
||||
if (conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) == null ||
|
||||
conf.getTrimmed(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY).length() == 0) {
|
||||
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H06,
|
||||
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
|
||||
}
|
||||
try {
|
||||
validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
|
||||
validateNamenode(
|
||||
new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).
|
||||
getAuthority());
|
||||
UserGroupInformation ugi = getUGI(user);
|
||||
return ugi.doAs(new PrivilegedExceptionAction<T>() {
|
||||
public T run() throws Exception {
|
||||
Configuration namenodeConf = createNameNodeConf(conf);
|
||||
FileSystem fs = createFileSystem(namenodeConf);
|
||||
FileSystem fs = createFileSystem(conf);
|
||||
Instrumentation instrumentation = getServer().get(Instrumentation.class);
|
||||
Instrumentation.Cron cron = instrumentation.createCron();
|
||||
try {
|
||||
@ -236,13 +250,16 @@ public FileSystem createFileSystemInternal(String user, final Configuration conf
|
||||
throws IOException, FileSystemAccessException {
|
||||
Check.notEmpty(user, "user");
|
||||
Check.notNull(conf, "conf");
|
||||
if (!conf.getBoolean(FILE_SYSTEM_SERVICE_CREATED, false)) {
|
||||
throw new FileSystemAccessException(FileSystemAccessException.ERROR.H04);
|
||||
}
|
||||
try {
|
||||
validateNamenode(new URI(conf.get(NAME_NODE_PROPERTY)).getAuthority());
|
||||
validateNamenode(
|
||||
new URI(conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)).getAuthority());
|
||||
UserGroupInformation ugi = getUGI(user);
|
||||
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||
public FileSystem run() throws Exception {
|
||||
Configuration namenodeConf = createNameNodeConf(conf);
|
||||
return createFileSystem(namenodeConf);
|
||||
return createFileSystem(conf);
|
||||
}
|
||||
});
|
||||
} catch (IOException ex) {
|
||||
@ -267,11 +284,11 @@ public void releaseFileSystem(FileSystem fs) throws IOException {
|
||||
closeFileSystem(fs);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Configuration getDefaultConfiguration() {
|
||||
Configuration conf = new Configuration(false);
|
||||
public Configuration getFileSystemConfiguration() {
|
||||
Configuration conf = new Configuration(true);
|
||||
ConfigurationUtils.copy(serviceHadoopConf, conf);
|
||||
conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
@ -153,29 +153,6 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- FileSystemAccess Namenode Configuration -->
|
||||
|
||||
<property>
|
||||
<name>namenode.hostname</name>
|
||||
<value>localhost</value>
|
||||
<description>
|
||||
The HDFS Namenode host the httpfs server connects to perform file
|
||||
system operations.
|
||||
|
||||
This property is only used to resolve other properties within this
|
||||
configuration file.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>httpfs.hadoop.conf:fs.default.name</name>
|
||||
<value>hdfs://${namenode.hostname}:8020</value>
|
||||
<description>
|
||||
The HDFS Namenode URI the httpfs server connects to perform file
|
||||
system operations.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- FileSystemAccess Namenode Security Configuration -->
|
||||
|
||||
<property>
|
||||
@ -206,12 +183,4 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>httpfs.hadoop.conf:dfs.namenode.kerberos.principal</name>
|
||||
<value>hdfs/${namenode.hostname}@${kerberos.realm}</value>
|
||||
<description>
|
||||
The HDFS Namenode Kerberos principal.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
@ -37,13 +37,13 @@ Hadoop HDFS over HTTP ${project.version} - Server Setup
|
||||
|
||||
* Configure HttpFS
|
||||
|
||||
Edit the <<<httpfs-${project.version}/conf/httpfs-site.xml>>> file and
|
||||
set the <<<httpfs.fsAccess.conf:fs.default.name>>> property to the HDFS
|
||||
Namenode URI. For example:
|
||||
By default, HttpFS assumes that Hadoop configuration files
|
||||
(<<<core-site.xml & hdfs-site.xml>>>) are in the HttpFS
|
||||
configuration directory.
|
||||
|
||||
+---+
|
||||
httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
|
||||
+---+
|
||||
If this is not the case, add to the <<<httpfs-site.xml>>> file the
|
||||
<<<httpfs.hadoop.config.dir>>> property set to the location
|
||||
of the Hadoop configuration directory.
|
||||
|
||||
* Configure Hadoop
|
||||
|
||||
@ -53,11 +53,11 @@ httpfs.fsAccess.conf:fs.default.name=hdfs://localhost:8021
|
||||
+---+
|
||||
...
|
||||
<property>
|
||||
<name>fsAccess.proxyuser.#HTTPFSUSER#.hosts</name>
|
||||
<name>hadoop.proxyuser.#HTTPFSUSER#.hosts</name>
|
||||
<value>httpfs-host.foo.com</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fsAccess.proxyuser.#HTTPFSUSER#.groups</name>
|
||||
<name>hadoop.proxyuser.#HTTPFSUSER#.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
...
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.apache.hadoop.fs.http.client;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
@ -70,16 +71,24 @@ private void createHttpFSServer() throws Exception {
|
||||
w.write("secret");
|
||||
w.close();
|
||||
|
||||
String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
|
||||
//HDFS configuration
|
||||
String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper
|
||||
.getHadoopProxyUserGroups());
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper
|
||||
.getHadoopProxyUserHosts());
|
||||
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
|
||||
File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hdfsSite);
|
||||
conf.writeXml(os);
|
||||
os.close();
|
||||
|
||||
//HTTPFS configuration
|
||||
conf = new Configuration(false);
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
|
||||
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
|
||||
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
|
||||
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
|
||||
File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hoopSite);
|
||||
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
|
||||
os = new FileOutputStream(httpfsSite);
|
||||
conf.writeXml(os);
|
||||
os.close();
|
||||
|
||||
|
@ -20,10 +20,12 @@
|
||||
|
||||
import junit.framework.Assert;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.lib.service.security.DummyGroupMapping;
|
||||
import org.apache.hadoop.lib.server.Service;
|
||||
import org.apache.hadoop.lib.server.ServiceException;
|
||||
import org.apache.hadoop.lib.service.Groups;
|
||||
import org.apache.hadoop.test.HFSTestCase;
|
||||
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
||||
import org.apache.hadoop.test.TestDir;
|
||||
@ -40,12 +42,15 @@
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.FileWriter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Writer;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class TestHttpFSServer extends HFSTestCase {
|
||||
|
||||
@ -54,12 +59,48 @@ public class TestHttpFSServer extends HFSTestCase {
|
||||
@TestJetty
|
||||
public void server() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
Configuration hoopConf = new Configuration(false);
|
||||
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, hoopConf);
|
||||
|
||||
Configuration httpfsConf = new Configuration(false);
|
||||
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
|
||||
server.init();
|
||||
server.destroy();
|
||||
}
|
||||
|
||||
public static class MockGroups implements Service,Groups {
|
||||
|
||||
@Override
|
||||
public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postInit() throws ServiceException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class[] getServiceDependencies() {
|
||||
return new Class[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class getInterface() {
|
||||
return Groups.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus,
|
||||
org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getGroups(String user) throws IOException {
|
||||
return Arrays.asList(HadoopUsersConfTestHelper.getHadoopUserGroups(user));
|
||||
}
|
||||
|
||||
}
|
||||
private void createHttpFSServer() throws Exception {
|
||||
File homeDir = TestDirHelper.getTestDir();
|
||||
Assert.assertTrue(new File(homeDir, "conf").mkdir());
|
||||
@ -72,13 +113,29 @@ private void createHttpFSServer() throws Exception {
|
||||
w.write("secret");
|
||||
w.close();
|
||||
|
||||
String fsDefaultName = TestHdfsHelper.getHdfsConf().get("fs.default.name");
|
||||
//HDFS configuration
|
||||
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
|
||||
hadoopConfDir.mkdirs();
|
||||
String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("httpfs.hadoop.conf:fs.default.name", fsDefaultName);
|
||||
conf.set("httpfs.groups." + CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, DummyGroupMapping.class.getName());
|
||||
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
|
||||
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hdfsSite);
|
||||
conf.writeXml(os);
|
||||
os.close();
|
||||
|
||||
//HTTPFS configuration
|
||||
conf = new Configuration(false);
|
||||
conf.set("httpfs.services.ext", MockGroups.class.getName());
|
||||
conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.
|
||||
getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
|
||||
HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
|
||||
conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
|
||||
HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
|
||||
conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
|
||||
File hoopSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hoopSite);
|
||||
File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
|
||||
os = new FileOutputStream(httpfsSite);
|
||||
conf.writeXml(os);
|
||||
os.close();
|
||||
|
||||
@ -103,7 +160,8 @@ public void instrumentation() throws Exception {
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
|
||||
|
||||
url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "root"));
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||
@ -112,7 +170,8 @@ public void instrumentation() throws Exception {
|
||||
Assert.assertTrue(line.contains("\"counters\":{"));
|
||||
|
||||
url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation", "root"));
|
||||
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
import junit.framework.Assert;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.lib.server.Server;
|
||||
@ -34,13 +35,32 @@
|
||||
import org.apache.hadoop.test.TestHdfs;
|
||||
import org.apache.hadoop.test.TestHdfsHelper;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TestFileSystemAccessService extends HFSTestCase {
|
||||
|
||||
private void createHadoopConf(Configuration hadoopConf) throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
File hdfsSite = new File(dir, "hdfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hdfsSite);
|
||||
hadoopConf.writeXml(os);
|
||||
os.close();
|
||||
}
|
||||
|
||||
@Before
|
||||
public void createHadoopConf() throws Exception {
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set("foo", "FOO");
|
||||
createHadoopConf(hadoopConf);
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestDir
|
||||
public void simpleSecurity() throws Exception {
|
||||
@ -124,7 +144,7 @@ public void serviceHadoopConf() throws Exception {
|
||||
FileSystemAccessService.class.getName()));
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
conf.set("server.hadoop.conf:foo", "FOO");
|
||||
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
server.init();
|
||||
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
|
||||
@ -132,6 +152,32 @@ public void serviceHadoopConf() throws Exception {
|
||||
server.destroy();
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestDir
|
||||
public void serviceHadoopConfCustomDir() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
String hadoopConfDir = new File(dir, "confx").getAbsolutePath();
|
||||
new File(hadoopConfDir).mkdirs();
|
||||
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
|
||||
FileSystemAccessService.class.getName()));
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
conf.set("server.hadoop.config.dir", hadoopConfDir);
|
||||
|
||||
File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
|
||||
OutputStream os = new FileOutputStream(hdfsSite);
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set("foo", "BAR");
|
||||
hadoopConf.writeXml(os);
|
||||
os.close();
|
||||
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
server.init();
|
||||
FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
|
||||
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "BAR");
|
||||
server.destroy();
|
||||
}
|
||||
|
||||
@Test
|
||||
@TestDir
|
||||
public void inWhitelists() throws Exception {
|
||||
@ -188,12 +234,17 @@ public void createFileSystem() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
|
||||
FileSystemAccessService.class.getName()));
|
||||
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
|
||||
createHadoopConf(hadoopConf);
|
||||
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
server.init();
|
||||
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
|
||||
FileSystem fs = hadoop.createFileSystem("u", TestHdfsHelper.getHdfsConf());
|
||||
FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
|
||||
Assert.assertNotNull(fs);
|
||||
fs.mkdirs(new Path("/tmp/foo"));
|
||||
hadoop.releaseFileSystem(fs);
|
||||
@ -214,6 +265,11 @@ public void fileSystemExecutor() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
|
||||
FileSystemAccessService.class.getName()));
|
||||
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
|
||||
createHadoopConf(hadoopConf);
|
||||
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
@ -222,7 +278,7 @@ public void fileSystemExecutor() throws Exception {
|
||||
|
||||
final FileSystem fsa[] = new FileSystem[1];
|
||||
|
||||
hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor<Void>() {
|
||||
hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
fs.mkdirs(new Path("/tmp/foo"));
|
||||
@ -248,14 +304,18 @@ public void fileSystemExecutorNoNameNode() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
|
||||
FileSystemAccessService.class.getName()));
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
|
||||
createHadoopConf(hadoopConf);
|
||||
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
server.init();
|
||||
FileSystemAccess fsAccess = server.get(FileSystemAccess.class);
|
||||
|
||||
Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
|
||||
hdfsConf.set("fs.default.name", "");
|
||||
Configuration hdfsConf = fsAccess.getFileSystemConfiguration();
|
||||
hdfsConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "");
|
||||
fsAccess.execute("u", hdfsConf, new FileSystemAccess.FileSystemExecutor<Void>() {
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
@ -271,6 +331,11 @@ public void fileSystemExecutorException() throws Exception {
|
||||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(),
|
||||
FileSystemAccessService.class.getName()));
|
||||
|
||||
Configuration hadoopConf = new Configuration(false);
|
||||
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
|
||||
createHadoopConf(hadoopConf);
|
||||
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set("server.services", services);
|
||||
Server server = new Server("server", dir, dir, dir, dir, conf);
|
||||
@ -279,7 +344,7 @@ public void fileSystemExecutorException() throws Exception {
|
||||
|
||||
final FileSystem fsa[] = new FileSystem[1];
|
||||
try {
|
||||
hadoop.execute("u", TestHdfsHelper.getHdfsConf(), new FileSystemAccess.FileSystemExecutor<Void>() {
|
||||
hadoop.execute("u", hadoop.getFileSystemConfiguration(), new FileSystemAccess.FileSystemExecutor<Void>() {
|
||||
@Override
|
||||
public Void execute(FileSystem fs) throws IOException {
|
||||
fsa[0] = fs;
|
||||
|
@ -145,7 +145,12 @@ public static String[] getHadoopUsers() {
|
||||
*/
|
||||
public static String[] getHadoopUserGroups(String user) {
|
||||
if (getHadoopUsers() == DEFAULT_USERS) {
|
||||
for (String defaultUser : DEFAULT_USERS) {
|
||||
if (defaultUser.equals(user)) {
|
||||
return DEFAULT_USERS_GROUP;
|
||||
}
|
||||
}
|
||||
return new String[0];
|
||||
} else {
|
||||
String groups = System.getProperty(HADOOP_USER_PREFIX + user);
|
||||
return (groups != null) ? groups.split(",") : new String[0];
|
||||
|
@ -385,6 +385,8 @@ Release 2.0.0 - UNRELEASED
|
||||
HDFS-3294. Fix code indentation in NamenodeWebHdfsMethods and
|
||||
DatanodeWebHdfsMethods. (szetszwo)
|
||||
|
||||
HDFS-3263. HttpFS should read HDFS config from Hadoop site.xml files (tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
|
Loading…
Reference in New Issue
Block a user