2009-05-19 04:20:40 +00:00
|
|
|
<?xml version="1.0"?>
|
2011-06-25 01:02:41 +00:00
|
|
|
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
|
|
|
2009-08-17 03:53:27 +00:00
|
|
|
<!--
|
|
|
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
contributor license agreements. See the NOTICE file distributed with
|
|
|
|
this work for additional information regarding copyright ownership.
|
|
|
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
|
|
(the "License"); you may not use this file except in compliance with
|
|
|
|
the License. You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
-->
|
2009-05-19 04:20:40 +00:00
|
|
|
|
|
|
|
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
|
|
|
<!-- wish to modify from this file into core-site.xml and change them -->
|
|
|
|
<!-- there. If core-site.xml does not already exist, create it. -->
|
|
|
|
|
|
|
|
<configuration>
|
|
|
|
|
|
|
|
<!--- global properties -->
|
|
|
|
|
2009-09-19 00:26:01 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.common.configuration.version</name>
|
2012-03-28 07:37:06 +00:00
|
|
|
<value>3.0.0</value>
|
2009-09-19 00:26:01 +00:00
|
|
|
<description>version of this configuration file</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.tmp.dir</name>
|
|
|
|
<value>/tmp/hadoop-${user.name}</value>
|
|
|
|
<description>A base for other temporary directories.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
2009-09-19 00:26:01 +00:00
|
|
|
<name>io.native.lib.available</name>
|
2009-05-19 04:20:40 +00:00
|
|
|
<value>true</value>
|
|
|
|
<description>Should native hadoop libraries, if present, be used.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.filter.initializers</name>
|
2011-05-19 18:32:39 +00:00
|
|
|
<value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
|
2009-05-19 04:20:40 +00:00
|
|
|
<description>A comma separated list of class names. Each class in the list
|
|
|
|
must extend org.apache.hadoop.http.FilterInitializer. The corresponding
|
|
|
|
Filter will be initialized. Then, the Filter will be applied to all user
|
|
|
|
facing jsp and servlet web pages. The ordering of the list defines the
|
|
|
|
ordering of the filters.</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<!--- security properties -->
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.authorization</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>Is service-level authorization enabled?</description>
|
|
|
|
</property>
|
|
|
|
|
2012-05-04 03:31:44 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.instrumentation.requires.admin</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>
|
|
|
|
Indicates if administrator ACLs are required to access
|
|
|
|
instrumentation servlets (JMX, METRICS, CONF, STACKS).
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2010-02-04 07:46:20 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.authentication</name>
|
|
|
|
<value>simple</value>
|
|
|
|
<description>Possible values are simple (no authentication), and kerberos
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping</name>
|
|
|
|
<value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
|
|
|
|
<description>
|
|
|
|
Class for user to group mapping (get groups for a given user) for ACL
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.groups.cache.secs</name>
|
|
|
|
<value>300</value>
|
|
|
|
<description>
|
|
|
|
This is the config controlling the validity of the entries in the cache
|
|
|
|
containing the user->group mapping. When this duration has expired,
|
|
|
|
then the implementation of the group mapping provider is invoked to get
|
|
|
|
the groups of the user and then cached back.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-03-20 01:00:14 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.url</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The URL of the LDAP server to use for resolving user groups when using
|
|
|
|
the LdapGroupsMapping user to group mapping.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.ssl</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>
|
|
|
|
Whether or not to use SSL when connecting to the LDAP server.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.ssl.keystore</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
File path to the SSL keystore that contains the SSL certificate required
|
|
|
|
by the LDAP server.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.ssl.keystore.password.file</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The path to a file containing the password of the LDAP SSL keystore.
|
|
|
|
|
|
|
|
IMPORTANT: This file should be readable only by the Unix user running
|
|
|
|
the daemons.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.bind.user</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The distinguished name of the user to bind as when connecting to the LDAP
|
|
|
|
server. This may be left blank if the LDAP server supports anonymous binds.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.bind.password.file</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The path to a file containing the password of the bind user.
|
|
|
|
|
|
|
|
IMPORTANT: This file should be readable only by the Unix user running
|
|
|
|
the daemons.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.base</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The search base for the LDAP connection. This is a distinguished name,
|
|
|
|
and will typically be the root of the LDAP directory.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.search.filter.user</name>
|
|
|
|
<value>(&(objectClass=user)(sAMAccountName={0})</value>
|
|
|
|
<description>
|
|
|
|
An additional filter to use when searching for LDAP users. The default will
|
|
|
|
usually be appropriate for Active Directory installations. If connecting to
|
|
|
|
an LDAP server with a non-AD schema, this should be replaced with
|
|
|
|
(&(objectClass=inetOrgPerson)(uid={0}). {0} is a special string used to
|
|
|
|
denote where the username fits into the filter.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.search.filter.group</name>
|
|
|
|
<value>(objectClass=group)</value>
|
|
|
|
<description>
|
|
|
|
An additional filter to use when searching for LDAP groups. This should be
|
|
|
|
changed when resolving groups against a non-Active Directory installation.
|
|
|
|
posixGroups are currently not a supported group class.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.search.attr.member</name>
|
|
|
|
<value>member</value>
|
|
|
|
<description>
|
|
|
|
The attribute of the group object that identifies the users that are
|
|
|
|
members of the group. The default will usually be appropriate for
|
|
|
|
any LDAP installation.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.security.group.mapping.ldap.search.attr.group.name</name>
|
|
|
|
<value>cn</value>
|
|
|
|
<description>
|
|
|
|
The attribute of the group object that identifies the group name. The
|
|
|
|
default will usually be appropriate for all LDAP systems.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.service.user.name.key</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
For those cases where the same RPC protocol is implemented by multiple
|
|
|
|
servers, this configuration is required for specifying the principal
|
|
|
|
name to use for the service when the client wishes to make an RPC call.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2010-06-05 00:34:36 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.rpc.protection</name>
|
|
|
|
<value>authentication</value>
|
|
|
|
<description>This field sets the quality of protection for secured sasl
|
|
|
|
connections. Possible values are authentication, integrity and privacy.
|
|
|
|
authentication means authentication only and no integrity or privacy;
|
|
|
|
integrity implies authentication and integrity are enabled; and privacy
|
|
|
|
implies all of authentication, integrity and privacy are enabled.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2011-03-11 18:44:25 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.work.around.non.threadsafe.getpwuid</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>Some operating systems or authentication modules are known to
|
|
|
|
have broken implementations of getpwuid_r and getpwgid_r, such that these
|
|
|
|
calls are not thread-safe. Symptoms of this problem include JVM crashes
|
|
|
|
with a stack trace inside these functions. If your system exhibits this
|
|
|
|
issue, enable this configuration parameter to include a lock around the
|
|
|
|
calls as a workaround.
|
|
|
|
|
|
|
|
An incomplete list of some systems known to have this issue is available
|
|
|
|
at http://wiki.apache.org/hadoop/KnownBrokenPwuidImplementations
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2011-04-20 23:53:05 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.kerberos.kinit.command</name>
|
|
|
|
<value>kinit</value>
|
|
|
|
<description>Used to periodically renew Kerberos credentials when provided
|
|
|
|
to Hadoop. The default setting assumes that kinit is in the PATH of users
|
|
|
|
running the Hadoop client. Change this to the absolute path to kinit if this
|
|
|
|
is not the case.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-07-02 22:46:55 +00:00
|
|
|
<property>
|
|
|
|
<name>hadoop.security.auth_to_local</name>
|
|
|
|
<value></value>
|
|
|
|
<description>Maps kerberos principals to local user names</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<!-- i/o properties -->
|
|
|
|
<property>
|
|
|
|
<name>io.file.buffer.size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer for use in sequence files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.bytes.per.checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
io.file.buffer.size.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.skip.checksum.errors</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>If true, when a checksum error is encountered while
|
|
|
|
reading a sequence file, entries are skipped, instead of throwing an
|
|
|
|
exception.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.compression.codecs</name>
|
2012-04-19 19:20:31 +00:00
|
|
|
<value></value>
|
|
|
|
<description>A comma-separated list of the compression codec classes that can
|
|
|
|
be used for compression/decompression. In addition to any classes specified
|
|
|
|
with this property (which take precedence), codec classes on the classpath
|
|
|
|
are discovered using a Java ServiceLoader.</description>
|
2009-05-19 04:20:40 +00:00
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.serializations</name>
|
2010-04-29 18:09:56 +00:00
|
|
|
<value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
|
2009-05-19 04:20:40 +00:00
|
|
|
<description>A list of serialization classes that can be used for
|
|
|
|
obtaining serializers and deserializers.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.seqfile.local.dir</name>
|
|
|
|
<value>${hadoop.tmp.dir}/io/local</value>
|
|
|
|
<description>The local directory where sequence file stores intermediate
|
|
|
|
data files during merge. May be a comma-separated list of
|
|
|
|
directories on different devices in order to spread disk i/o.
|
|
|
|
Directories that do not exist are ignored.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-09-02 01:26:40 +00:00
|
|
|
<property>
|
|
|
|
<name>io.map.index.skip</name>
|
|
|
|
<value>0</value>
|
|
|
|
<description>Number of index entries to skip between each entry.
|
|
|
|
Zero by default. Setting this to values larger than zero can
|
|
|
|
facilitate opening large MapFiles using less memory.</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<property>
|
|
|
|
<name>io.map.index.interval</name>
|
|
|
|
<value>128</value>
|
|
|
|
<description>
|
|
|
|
MapFile consist of two files - data file (tuples) and index file
|
|
|
|
(keys). For every io.map.index.interval records written in the
|
|
|
|
data file, an entry (record-key, data-file-position) is written
|
|
|
|
in the index file. This is to allow for doing binary search later
|
|
|
|
within the index file to look up records by their keys and get their
|
|
|
|
closest positions in the data file.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<!-- file system properties -->
|
|
|
|
|
|
|
|
<property>
|
2009-09-19 00:26:01 +00:00
|
|
|
<name>fs.defaultFS</name>
|
2009-05-19 04:20:40 +00:00
|
|
|
<value>file:///</value>
|
|
|
|
<description>The name of the default file system. A URI whose
|
|
|
|
scheme and authority determine the FileSystem implementation. The
|
|
|
|
uri's scheme determines the config property (fs.SCHEME.impl) naming
|
|
|
|
the FileSystem implementation class. The uri's authority is used to
|
|
|
|
determine the host, port, etc. for a filesystem.</description>
|
|
|
|
</property>
|
|
|
|
|
2012-02-14 23:09:05 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.default.name</name>
|
|
|
|
<value>file:///</value>
|
|
|
|
<description>Deprecated. Use (fs.defaultFS) property
|
|
|
|
instead</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.trash.interval</name>
|
|
|
|
<value>0</value>
|
2010-05-17 23:51:56 +00:00
|
|
|
<description>Number of minutes after which the checkpoint
|
|
|
|
gets deleted.
|
2009-05-19 04:20:40 +00:00
|
|
|
If zero, the trash feature is disabled.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2010-05-17 23:51:56 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.trash.checkpoint.interval</name>
|
|
|
|
<value>0</value>
|
|
|
|
<description>Number of minutes between trash checkpoints.
|
|
|
|
Should be smaller or equal to fs.trash.interval.
|
|
|
|
Every time the checkpointer runs it creates a new checkpoint
|
|
|
|
out of current and removes checkpoints created more than
|
|
|
|
fs.trash.interval minutes ago.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-10-30 22:24:22 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.AbstractFileSystem.file.impl</name>
|
|
|
|
<value>org.apache.hadoop.fs.local.LocalFs</value>
|
|
|
|
<description>The AbstractFileSystem for file: uris.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.AbstractFileSystem.hdfs.impl</name>
|
|
|
|
<value>org.apache.hadoop.fs.Hdfs</value>
|
|
|
|
<description>The FileSystem for hdfs: uris.</description>
|
|
|
|
</property>
|
|
|
|
|
2011-05-06 02:11:31 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.AbstractFileSystem.viewfs.impl</name>
|
|
|
|
<value>org.apache.hadoop.fs.viewfs.ViewFs</value>
|
|
|
|
<description>The AbstractFileSystem for view file system for viewfs: uris
|
|
|
|
(ie client side mount table:).</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.ftp.host</name>
|
|
|
|
<value>0.0.0.0</value>
|
|
|
|
<description>FTP filesystem connects to this server</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.ftp.host.port</name>
|
|
|
|
<value>21</value>
|
|
|
|
<description>
|
|
|
|
FTP filesystem connects to fs.ftp.host on this port
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-09-19 00:26:01 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.df.interval</name>
|
|
|
|
<value>60000</value>
|
|
|
|
<description>Disk usage statistics refresh interval in msec.</description>
|
|
|
|
</property>
|
2009-05-19 04:20:40 +00:00
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.s3.block.size</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size to use when writing files to S3.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.s3.buffer.dir</name>
|
|
|
|
<value>${hadoop.tmp.dir}/s3</value>
|
|
|
|
<description>Determines where on the local filesystem the S3 filesystem
|
|
|
|
should store files before sending them to S3
|
|
|
|
(or after retrieving them from S3).
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.s3.maxRetries</name>
|
|
|
|
<value>4</value>
|
|
|
|
<description>The maximum number of retries for reading or writing files to S3,
|
|
|
|
before we signal failure to the application.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.s3.sleepTimeSeconds</name>
|
|
|
|
<value>10</value>
|
|
|
|
<description>The number of seconds to sleep between each S3 retry.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
|
2009-06-15 20:28:06 +00:00
|
|
|
<property>
|
|
|
|
<name>fs.automatic.close</name>
|
|
|
|
<value>true</value>
|
|
|
|
<description>By default, FileSystem instances are automatically closed at program
|
|
|
|
exit using a JVM shutdown hook. Setting this property to false disables this
|
|
|
|
behavior. This is an advanced option that should only be used by server applications
|
|
|
|
requiring a more carefully orchestrated shutdown sequence.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>fs.s3n.block.size</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size to use when reading files using the native S3
|
|
|
|
filesystem (s3n: URIs).</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
|
|
|
<name>io.seqfile.compress.blocksize</name>
|
|
|
|
<value>1000000</value>
|
|
|
|
<description>The minimum block size for compression in block compressed
|
|
|
|
SequenceFiles.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.seqfile.lazydecompress</name>
|
|
|
|
<value>true</value>
|
|
|
|
<description>Should values of block-compressed SequenceFiles be decompressed
|
|
|
|
only when necessary.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.seqfile.sorter.recordlimit</name>
|
|
|
|
<value>1000000</value>
|
|
|
|
<description>The limit on number of records to be kept in memory in a spill
|
|
|
|
in SequenceFiles.Sorter
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.mapfile.bloom.size</name>
|
|
|
|
<value>1048576</value>
|
|
|
|
<description>The size of BloomFilter-s used in BloomMapFile. Each time this many
|
|
|
|
keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
|
|
|
|
Larger values minimize the number of filters, which slightly increases the performance,
|
|
|
|
but may waste too much space if the total number of keys is usually much smaller
|
|
|
|
than this number.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>io.mapfile.bloom.error.rate</name>
|
|
|
|
<value>0.005</value>
|
|
|
|
<description>The rate of false positives in BloomFilter-s used in BloomMapFile.
|
|
|
|
As this value decreases, the size of BloomFilter-s increases exponentially. This
|
|
|
|
value is the probability of encountering false positives (default is 0.5%).
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.util.hash.type</name>
|
|
|
|
<value>murmur</value>
|
|
|
|
<description>The default implementation of Hash. Currently this can take one of the
|
|
|
|
two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
|
|
|
|
<!-- ipc properties -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.client.idlethreshold</name>
|
|
|
|
<value>4000</value>
|
|
|
|
<description>Defines the threshold number of connections after which
|
|
|
|
connections will be inspected for idleness.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.client.kill.max</name>
|
|
|
|
<value>10</value>
|
|
|
|
<description>Defines the maximum number of clients to disconnect in one go.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.client.connection.maxidletime</name>
|
|
|
|
<value>10000</value>
|
|
|
|
<description>The maximum time in msec after which a client will bring down the
|
|
|
|
connection to the server.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.client.connect.max.retries</name>
|
|
|
|
<value>10</value>
|
|
|
|
<description>Indicates the number of retries a client will make to establish
|
|
|
|
a server connection.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-02-08 17:57:36 +00:00
|
|
|
<property>
|
|
|
|
<name>ipc.client.connect.max.retries.on.timeouts</name>
|
|
|
|
<value>45</value>
|
|
|
|
<description>Indicates the number of retries a client will make on socket timeout
|
|
|
|
to establish a server connection.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
|
|
|
<name>ipc.server.listen.queue.size</name>
|
|
|
|
<value>128</value>
|
|
|
|
<description>Indicates the length of the listen queue for servers accepting
|
|
|
|
client connections.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.server.tcpnodelay</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
|
|
|
the server. Setting to true disables the algorithm and may decrease latency
|
|
|
|
with a cost of more/smaller packets.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ipc.client.tcpnodelay</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>Turn on/off Nagle's algorithm for the TCP socket connection on
|
|
|
|
the client. Setting to true disables the algorithm and may decrease latency
|
|
|
|
with a cost of more/smaller packets.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
|
|
|
|
<!-- Proxy Configuration -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.rpc.socket.factory.class.default</name>
|
|
|
|
<value>org.apache.hadoop.net.StandardSocketFactory</value>
|
|
|
|
<description> Default SocketFactory to use. This parameter is expected to be
|
|
|
|
formatted as "package.FactoryClassName".
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
|
|
|
|
<value></value>
|
|
|
|
<description> SocketFactory to use to connect to a DFS. If null or empty, use
|
|
|
|
hadoop.rpc.socket.class.default. This socket factory is also used by
|
|
|
|
DFSClient to create sockets to DataNodes.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.socks.server</name>
|
|
|
|
<value></value>
|
|
|
|
<description> Address (host:port) of the SOCKS server to be used by the
|
|
|
|
SocksSocketFactory.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-06-18 18:23:10 +00:00
|
|
|
<!-- Topology Configuration -->
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
2012-06-18 18:23:10 +00:00
|
|
|
<name>net.topology.node.switch.mapping.impl</name>
|
2009-05-19 04:20:40 +00:00
|
|
|
<value>org.apache.hadoop.net.ScriptBasedMapping</value>
|
|
|
|
<description> The default implementation of the DNSToSwitchMapping. It
|
2011-07-05 22:39:36 +00:00
|
|
|
invokes a script specified in net.topology.script.file.name to resolve
|
|
|
|
node names. If the value for net.topology.script.file.name is not set, the
|
2009-05-19 04:20:40 +00:00
|
|
|
default value of DEFAULT_RACK is returned for all node names.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-06-18 18:23:10 +00:00
|
|
|
<property>
|
|
|
|
<name>net.topology.impl</name>
|
|
|
|
<value>org.apache.hadoop.net.NetworkTopology</value>
|
|
|
|
<description> The default implementation of NetworkTopology which is classic three layer one.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
<property>
|
2009-09-19 00:26:01 +00:00
|
|
|
<name>net.topology.script.file.name</name>
|
2009-05-19 04:20:40 +00:00
|
|
|
<value></value>
|
|
|
|
<description> The script name that should be invoked to resolve DNS names to
|
|
|
|
NetworkTopology names. Example: the script would take host.foo.bar as an
|
|
|
|
argument, and return /rack1 as the output.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
2009-09-19 00:26:01 +00:00
|
|
|
<name>net.topology.script.number.args</name>
|
2009-05-19 04:20:40 +00:00
|
|
|
<value>100</value>
|
|
|
|
<description> The max number of args that the script configured with
|
2011-07-05 22:39:36 +00:00
|
|
|
net.topology.script.file.name should be run with. Each arg is an
|
2009-05-19 04:20:40 +00:00
|
|
|
IP address.
|
|
|
|
</description>
|
|
|
|
</property>
|
2009-09-19 00:26:01 +00:00
|
|
|
|
2012-03-23 21:03:30 +00:00
|
|
|
<property>
|
|
|
|
<name>net.topology.table.file.name</name>
|
|
|
|
<value></value>
|
|
|
|
<description> The file name for a topology file, which is used when the
|
|
|
|
net.topology.script.file.name property is set to
|
|
|
|
org.apache.hadoop.net.TableMapping. The file format is a two column text
|
|
|
|
file, with columns separated by whitespace. The first column is a DNS or
|
|
|
|
IP address and the second column specifies the rack where the address maps.
|
|
|
|
If no entry corresponding to a host in the cluster is found, then
|
|
|
|
/default-rack is assumed.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-09-19 00:26:01 +00:00
|
|
|
<!-- Local file system -->
|
|
|
|
<property>
|
|
|
|
<name>file.stream-buffer-size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer to stream files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>file.bytes-per-checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
file.stream-buffer-size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>file.client-write-packet-size</name>
|
|
|
|
<value>65536</value>
|
|
|
|
<description>Packet size for clients to write</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>file.blocksize</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>file.replication</name>
|
|
|
|
<value>1</value>
|
|
|
|
<description>Replication factor</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<!-- s3 File System -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3.stream-buffer-size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer to stream files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3.bytes-per-checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
s3.stream-buffer-size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3.client-write-packet-size</name>
|
|
|
|
<value>65536</value>
|
|
|
|
<description>Packet size for clients to write</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3.blocksize</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3.replication</name>
|
|
|
|
<value>3</value>
|
|
|
|
<description>Replication factor</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<!-- s3native File System -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3native.stream-buffer-size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer to stream files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3native.bytes-per-checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
s3native.stream-buffer-size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3native.client-write-packet-size</name>
|
|
|
|
<value>65536</value>
|
|
|
|
<description>Packet size for clients to write</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3native.blocksize</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>s3native.replication</name>
|
|
|
|
<value>3</value>
|
|
|
|
<description>Replication factor</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<!-- Kosmos File System -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>kfs.stream-buffer-size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer to stream files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>kfs.bytes-per-checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
kfs.stream-buffer-size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>kfs.client-write-packet-size</name>
|
|
|
|
<value>65536</value>
|
|
|
|
<description>Packet size for clients to write</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>kfs.blocksize</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>kfs.replication</name>
|
|
|
|
<value>3</value>
|
|
|
|
<description>Replication factor</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<!-- FTP file system -->
|
|
|
|
<property>
|
|
|
|
<name>ftp.stream-buffer-size</name>
|
|
|
|
<value>4096</value>
|
|
|
|
<description>The size of buffer to stream files.
|
|
|
|
The size of this buffer should probably be a multiple of hardware
|
|
|
|
page size (4096 on Intel x86), and it determines how much data is
|
|
|
|
buffered during read and write operations.</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ftp.bytes-per-checksum</name>
|
|
|
|
<value>512</value>
|
|
|
|
<description>The number of bytes per checksum. Must not be larger than
|
|
|
|
ftp.stream-buffer-size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ftp.client-write-packet-size</name>
|
|
|
|
<value>65536</value>
|
|
|
|
<description>Packet size for clients to write</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ftp.blocksize</name>
|
|
|
|
<value>67108864</value>
|
|
|
|
<description>Block size</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ftp.replication</name>
|
|
|
|
<value>3</value>
|
|
|
|
<description>Replication factor</description>
|
|
|
|
</property>
|
|
|
|
|
2010-07-02 18:16:30 +00:00
|
|
|
<!-- Tfile -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>tfile.io.chunk.size</name>
|
|
|
|
<value>1048576</value>
|
|
|
|
<description>
|
|
|
|
Value chunk size in bytes. Default to
|
|
|
|
1MB. Values of the length less than the chunk size is
|
|
|
|
guaranteed to have known value length in read time (See also
|
|
|
|
TFile.Reader.Scanner.Entry.isValueLengthKnown()).
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>tfile.fs.output.buffer.size</name>
|
|
|
|
<value>262144</value>
|
|
|
|
<description>
|
|
|
|
Buffer size used for FSDataOutputStream in bytes.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>tfile.fs.input.buffer.size</name>
|
|
|
|
<value>262144</value>
|
|
|
|
<description>
|
|
|
|
Buffer size used for FSDataInputStream in bytes.
|
|
|
|
</description>
|
|
|
|
</property>
|
2009-09-19 00:26:01 +00:00
|
|
|
|
2011-08-19 22:31:06 +00:00
|
|
|
<!-- HTTP web-consoles Authentication -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.type</name>
|
|
|
|
<value>simple</value>
|
|
|
|
<description>
|
|
|
|
Defines authentication used for Oozie HTTP endpoint.
|
|
|
|
Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.token.validity</name>
|
|
|
|
<value>36000</value>
|
|
|
|
<description>
|
|
|
|
Indicates how long (in seconds) an authentication token is valid before it has
|
|
|
|
to be renewed.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
2011-09-21 16:09:44 +00:00
|
|
|
<name>hadoop.http.authentication.signature.secret.file</name>
|
|
|
|
<value>${user.home}/hadoop-http-auth-signature-secret</value>
|
2011-08-19 22:31:06 +00:00
|
|
|
<description>
|
|
|
|
The signature secret for signing the authentication tokens.
|
|
|
|
If not set a random secret is generated at startup time.
|
|
|
|
The same secret should be used for JT/NN/DN/TT configurations.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.cookie.domain</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The domain to use for the HTTP cookie that stores the authentication token.
|
|
|
|
In order to authentiation to work correctly across all Hadoop nodes web-consoles
|
|
|
|
the domain must be correctly set.
|
|
|
|
IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
|
|
|
|
For this setting to work properly all nodes in the cluster must be configured
|
|
|
|
to generate URLs with hostname.domain names on it.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
|
|
|
|
<value>true</value>
|
|
|
|
<description>
|
|
|
|
Indicates if anonymous requests are allowed when using 'simple' authentication.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.kerberos.principal</name>
|
2012-02-28 18:26:53 +00:00
|
|
|
<value>HTTP/_HOST@LOCALHOST</value>
|
2011-08-19 22:31:06 +00:00
|
|
|
<description>
|
|
|
|
Indicates the Kerberos principal to be used for HTTP endpoint.
|
|
|
|
The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.http.authentication.kerberos.keytab</name>
|
|
|
|
<value>${user.home}/hadoop.keytab</value>
|
|
|
|
<description>
|
|
|
|
Location of the keytab file with the credentials for the principal.
|
|
|
|
Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-02-06 08:21:06 +00:00
|
|
|
<property>
|
|
|
|
<name>dfs.ha.fencing.methods</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
List of fencing methods to use for service fencing. May contain
|
|
|
|
builtin methods (eg shell and sshfence) or user-defined method.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>dfs.ha.fencing.ssh.connect-timeout</name>
|
|
|
|
<value>30000</value>
|
|
|
|
<description>
|
|
|
|
SSH connection timeout, in milliseconds, to use with the builtin
|
|
|
|
sshfence fencer.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>dfs.ha.fencing.ssh.private-key-files</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
The SSH private key files to use with the builtin sshfence fencer.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2012-04-12 21:53:42 +00:00
|
|
|
<property>
|
|
|
|
<name>ha.zookeeper.quorum</name>
|
|
|
|
<description>
|
|
|
|
A list of ZooKeeper server addresses, separated by commas, that are
|
|
|
|
to be used by the ZKFailoverController in automatic failover.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ha.zookeeper.session-timeout.ms</name>
|
|
|
|
<value>5000</value>
|
|
|
|
<description>
|
|
|
|
The session timeout to use when the ZKFC connects to ZooKeeper.
|
|
|
|
Setting this value to a lower value implies that server crashes
|
|
|
|
will be detected more quickly, but risks triggering failover too
|
|
|
|
aggressively in the case of a transient error or network blip.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ha.zookeeper.parent-znode</name>
|
|
|
|
<value>/hadoop-ha</value>
|
|
|
|
<description>
|
|
|
|
The ZooKeeper znode under which the ZK failover controller stores
|
|
|
|
its information. Note that the nameservice ID is automatically
|
|
|
|
appended to this znode, so it is not normally necessary to
|
|
|
|
configure this, even in a federated environment.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ha.zookeeper.acl</name>
|
|
|
|
<value>world:anyone:rwcda</value>
|
|
|
|
<description>
|
|
|
|
A comma-separated list of ZooKeeper ACLs to apply to the znodes
|
|
|
|
used by automatic failover. These ACLs are specified in the same
|
|
|
|
format as used by the ZooKeeper CLI.
|
|
|
|
|
|
|
|
If the ACL itself contains secrets, you may instead specify a
|
|
|
|
path to a file, prefixed with the '@' symbol, and the value of
|
|
|
|
this configuration will be loaded from within.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>ha.zookeeper.auth</name>
|
|
|
|
<value></value>
|
|
|
|
<description>
|
|
|
|
A comma-separated list of ZooKeeper authentications to add when
|
|
|
|
connecting to ZooKeeper. These are specified in the same format
|
|
|
|
as used by the "addauth" command in the ZK CLI. It is
|
|
|
|
important that the authentications specified here are sufficient
|
|
|
|
to access znodes with the ACL specified in ha.zookeeper.acl.
|
|
|
|
|
|
|
|
If the auths contain secrets, you may instead specify a
|
|
|
|
path to a file, prefixed with the '@' symbol, and the value of
|
|
|
|
this configuration will be loaded from within.
|
|
|
|
</description>
|
|
|
|
</property>
|
2012-05-28 15:36:00 +00:00
|
|
|
|
|
|
|
<!-- Static Web User Filter properties. -->
|
|
|
|
<property>
|
|
|
|
<description>
|
|
|
|
The user name to filter as, on static web filters
|
|
|
|
while rendering content. An example use is the HDFS
|
|
|
|
web UI (user to be used for browsing files).
|
|
|
|
</description>
|
|
|
|
<name>hadoop.http.staticuser.user</name>
|
|
|
|
<value>dr.who</value>
|
|
|
|
</property>
|
2012-07-26 13:23:05 +00:00
|
|
|
|
|
|
|
<!-- SSLFactory configuration -->
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.ssl.keystores.factory.class</name>
|
|
|
|
<value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
|
|
|
|
<description>
|
|
|
|
The keystores factory to use for retrieving certificates.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.ssl.require.client.cert</name>
|
|
|
|
<value>false</value>
|
|
|
|
<description>Whether client certificates are required</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.ssl.hostname.verifier</name>
|
|
|
|
<value>DEFAULT</value>
|
|
|
|
<description>
|
|
|
|
The hostname verifier to provide for HttpsURLConnections.
|
|
|
|
Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
|
|
|
|
ALLOW_ALL
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.ssl.server.conf</name>
|
|
|
|
<value>ssl-server.xml</value>
|
|
|
|
<description>
|
|
|
|
Resource file from which ssl server keystore information will be extracted.
|
|
|
|
This file is looked up in the classpath, typically it should be in Hadoop
|
|
|
|
conf/ directory.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
|
|
|
<property>
|
|
|
|
<name>hadoop.ssl.client.conf</name>
|
|
|
|
<value>ssl-client.xml</value>
|
|
|
|
<description>
|
|
|
|
Resource file from which ssl client keystore information will be extracted
|
|
|
|
This file is looked up in the classpath, typically it should be in Hadoop
|
|
|
|
conf/ directory.
|
|
|
|
</description>
|
|
|
|
</property>
|
|
|
|
|
2009-05-19 04:20:40 +00:00
|
|
|
</configuration>
|