HDFS-7985. WebHDFS should be always enabled. Contributed by Li Lu.
This commit is contained in:
parent
53a28afe29
commit
80278a5f85
@ -18,6 +18,8 @@ Trunk (Unreleased)
|
||||
option since it may incorrectly finalize an ongoing rolling upgrade.
|
||||
(Kai Sasaki via szetszwo)
|
||||
|
||||
HDFS-7985. WebHDFS should be always enabled. (Li Lu via wheat9)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
|
||||
|
@ -227,8 +227,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final int DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
|
||||
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
|
||||
public static final String DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
|
||||
public static final String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
|
||||
public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
|
||||
public static final String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
|
||||
public static final String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
|
||||
public static final String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
|
||||
|
@ -67,30 +67,28 @@ public class NameNodeHttpServer {
|
||||
}
|
||||
|
||||
private void initWebHdfs(Configuration conf) throws IOException {
|
||||
if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
|
||||
// set user pattern based on configuration file
|
||||
UserParam.setUserPattern(conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||
// set user pattern based on configuration file
|
||||
UserParam.setUserPattern(conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
|
||||
|
||||
// add authentication filter for webhdfs
|
||||
final String className = conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
|
||||
final String name = className;
|
||||
// add authentication filter for webhdfs
|
||||
final String className = conf.get(
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
|
||||
final String name = className;
|
||||
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
|
||||
params, new String[] { pathSpec });
|
||||
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
|
||||
+ ")");
|
||||
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
|
||||
Map<String, String> params = getAuthFilterParams(conf);
|
||||
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
|
||||
params, new String[] { pathSpec });
|
||||
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
|
||||
+ ")");
|
||||
|
||||
// add webhdfs packages
|
||||
httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
|
||||
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
||||
pathSpec);
|
||||
}
|
||||
// add webhdfs packages
|
||||
httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
|
||||
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
|
||||
pathSpec);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -211,13 +211,6 @@ public URI getCanonicalUri() {
|
||||
return super.getCanonicalUri();
|
||||
}
|
||||
|
||||
/** Is WebHDFS enabled in conf? */
|
||||
public static boolean isEnabled(final Configuration conf, final Log log) {
|
||||
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
||||
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
|
||||
return b;
|
||||
}
|
||||
|
||||
TokenSelector<DelegationTokenIdentifier> tokenSelector =
|
||||
new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(getTokenKind()){};
|
||||
|
||||
|
@ -128,22 +128,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
|
||||
"nmdCreate: new Configuration");
|
||||
goto error;
|
||||
}
|
||||
if (conf->webhdfsEnabled) {
|
||||
jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr);
|
||||
if (jthr) {
|
||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"nmdCreate: new String");
|
||||
goto error;
|
||||
}
|
||||
jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
|
||||
"setBoolean", "(Ljava/lang/String;Z)V",
|
||||
jconfStr, conf->webhdfsEnabled);
|
||||
if (jthr) {
|
||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"nmdCreate: Configuration::setBoolean");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
if (jthr) {
|
||||
printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"nmdCreate: Configuration::setBoolean");
|
||||
|
@ -85,7 +85,6 @@ protected IOException unwrapException(IOException e) {
|
||||
@BeforeClass
|
||||
public static void beforeClassSetup() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.set(FsPermission.UMASK_LABEL, "000");
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
|
@ -503,7 +503,6 @@ public void testFileChecksum() throws Exception {
|
||||
RAN.setSeed(seed);
|
||||
|
||||
final Configuration conf = getTestConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
|
||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
final FileSystem hdfs = cluster.getFileSystem();
|
||||
|
@ -851,8 +851,7 @@ public void testBlockAllocationAdjustsUsageConservatively()
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
final int BLOCK_SIZE = 6 * 1024;
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
MiniDFSCluster cluster =
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitActive();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
@ -913,7 +912,6 @@ public void testMultipleFilesSmallerThanOneBlock() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
final int BLOCK_SIZE = 6 * 1024;
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
// Make it relinquish locks. When run serially, the result should
|
||||
// be identical.
|
||||
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
|
||||
|
@ -67,7 +67,6 @@ public class TestDelegationToken {
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
config = new HdfsConfiguration();
|
||||
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
||||
config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
|
||||
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
|
||||
|
@ -97,7 +97,6 @@ private static void configureSuperUserIPAddresses(Configuration conf,
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
config = new HdfsConfiguration();
|
||||
config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
config.setLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
|
||||
config.setLong(
|
||||
|
@ -114,7 +114,6 @@ public void setupCluster() throws Exception {
|
||||
final long precision = 1L;
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
|
||||
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
|
||||
setNumFiles(20).build();
|
||||
|
@ -193,7 +193,6 @@ public void testHttpBindHostKey() throws IOException {
|
||||
|
||||
private static void setupSsl() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
|
@ -71,7 +71,6 @@ protected FileSystem createFileSystem() throws Exception {
|
||||
@BeforeClass
|
||||
public static void setupCluster() {
|
||||
final Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
|
@ -52,7 +52,6 @@ public class TestHttpsFileSystem {
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
|
@ -332,18 +332,6 @@ public void testCreateWithNoDN() throws Exception {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* WebHdfs should be enabled by default after HDFS-5532
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testWebHdfsEnabledByDefault() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
|
||||
false));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test snapshot creation through WebHdfs
|
||||
|
@ -60,7 +60,6 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
||||
private UserGroupInformation ugi;
|
||||
|
||||
static {
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
cluster.waitActive();
|
||||
|
@ -205,7 +205,6 @@ public void testLazyTokenFetchForSWebhdfs() throws Exception {
|
||||
String keystoresDir;
|
||||
String sslConfDir;
|
||||
|
||||
clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||
clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||
|
@ -71,8 +71,6 @@ private static void setupCluster(final int nNameNodes, final int nDataNodes)
|
||||
throws Exception {
|
||||
LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
|
||||
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
|
||||
.numDataNodes(nDataNodes)
|
||||
|
@ -42,7 +42,6 @@ public class WebHdfsTestUtil {
|
||||
|
||||
public static Configuration createConf() {
|
||||
final Configuration conf = new Configuration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
|
||||
return conf;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user