HDFS-2604. Add a log message to show if WebHDFS is enabled and a configuration section in the forrest doc.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1208140 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-11-29 23:38:59 +00:00
parent 03e8ec7faf
commit 0398a9e88d
7 changed files with 56 additions and 8 deletions

View File

@ -158,6 +158,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2587. Add apt doc for WebHDFS REST API. (szetszwo)
HDFS-2604. Add a log message to show if WebHDFS is enabled and a
configuration section in the forrest doc. (szetszwo)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -138,6 +138,28 @@
http://<HOST>:<HTTP_PORT>/webhdfs/v1/<PATH>?op=...
</source>
</section>
<!-- ***************************************************************************** -->
<section>
<title>HDFS Configuration Options</title>
<p>
Below are the HDFS configuration options for WebHDFS.
</p>
<table>
<tr><th>Property Name</th><th>Description</th></tr>
<tr><td><code>dfs.webhdfs.enabled</code></td>
<td>Enable/disable WebHDFS in Namenodes and Datanodes
</td></tr>
<tr><td><code>dfs.web.authentication.kerberos.principal</code></td>
<td>The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPENGO specification.
</td></tr>
<tr><td><code>dfs.web.authentication.kerberos.keytab</code></td>
<td>The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
</td></tr>
</table>
</section>
</section>
<!-- ***************************************************************************** -->
<section id="Authentication">

View File

@ -35,9 +35,9 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
@ -50,8 +50,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
@ -95,6 +93,7 @@
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -133,7 +132,6 @@
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolServerSideTranslatorR23;
import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolTranslatorR23;
import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
@ -494,7 +492,7 @@ conf, new AccessControlList(conf.get(DFS_ADMIN, " ")),
this.infoServer.addServlet(null, "/blockScannerReport",
DataBlockScanner.Servlet.class);
if (conf.getBoolean(DFS_WEBHDFS_ENABLED_KEY, DFS_WEBHDFS_ENABLED_DEFAULT)) {
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
infoServer.addJerseyResourcePackage(DatanodeWebHdfsMethods.class
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
WebHdfsFileSystem.PATH_PREFIX + "/*");

View File

@ -108,8 +108,7 @@ public HttpServer run() throws IOException, InterruptedException {
infoPort == 0, conf,
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
{
if (conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT)) {
if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
//add SPNEGO authentication filter for webhdfs
final String name = "SPNEGO";
final String classname = AuthFilter.class.getName();

View File

@ -131,6 +131,14 @@ private static synchronized void addRenewAction(final WebHdfsFileSystem webhdfs)
DT_RENEWER.addRenewAction(webhdfs);
}
/** Is WebHDFS enabled in conf? */
public static boolean isEnabled(final Configuration conf, final Log log) {
final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
log.info(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY + " = " + b);
return b;
}
private final UserGroupInformation ugi;
private InetSocketAddress nnAddr;
private Token<?> delegationToken;

View File

@ -55,6 +55,7 @@
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.TestDoAsEffectiveUser;
import org.apache.hadoop.security.UserGroupInformation;
@ -198,9 +199,9 @@ public String getName() {
Assert.assertEquals("/user/" + PROXY_USER, responsePath);
}
final Path f = new Path("/testWebHdfsDoAs/a.txt");
{
//test create file with doAs
final Path f = new Path("/testWebHdfsDoAs/a.txt");
final PutOpParam.Op op = PutOpParam.Op.CREATE;
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
@ -213,5 +214,21 @@ public String getName() {
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
Assert.assertEquals(PROXY_USER, status.getOwner());
}
{
//test append file with doAs
final PostOpParam.Op op = PostOpParam.Op.APPEND;
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn = WebHdfsTestUtil.twoStepWrite(conn, op);
final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
out.write("\nHello again!".getBytes());
out.close();
final FileStatus status = webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen());
Assert.assertEquals(PROXY_USER, status.getOwner());
}
}
}

View File

@ -33,6 +33,7 @@ static FileStatus toFileStatus(HdfsFileStatus f, String parent) {
return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
f.getPermission(), f.getOwner(), f.getGroup(),
f.isSymlink() ? new Path(f.getSymlink()) : null,
new Path(f.getFullName(parent)));
}