HADOOP-18810. Document missing a lot of properties in core-default.xml. (#5912) Contributed by WangYuanben.
Reviewed-by: Shilun Fan <slfan1989@apache.org> Signed-off-by: Shilun Fan <slfan1989@apache.org>
This commit is contained in:
parent
440698eb07
commit
1e3e246934
@ -73,6 +73,27 @@
|
||||
<description>Is service-level authorization enabled?</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.service.authorization.default.acl</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Define the default acl for the Hadoop service if the acl of Hadoop
|
||||
service is not defined in hadoop-policy.xml. If not set, `*` is applied
|
||||
meaning that all users are allowed to access the service. The list of
|
||||
users and groups are both comma-separated list of names separated by
|
||||
a space. Example: `user1,user2 group1,group2`.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.service.authorization.default.acl.blocked</name>
|
||||
<value></value>
|
||||
<description>
|
||||
This property specifies the list of users and groups who are not
|
||||
authorized to access Hadoop service.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.instrumentation.requires.admin</name>
|
||||
<value>false</value>
|
||||
@ -225,6 +246,17 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.group.mapping.ldap.ctx.factory.class</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Used to specify the fully qualified class name of the initial context
|
||||
factory when connecting to an LDAP server. The default value is
|
||||
"com.sun.jndi.ldap.LdapCtxFactory", but set to null now to avoid
|
||||
LifecycleExecutionException with JDK 11(see HADOOP-15941).
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.group.mapping.ldap.connection.timeout.ms</name>
|
||||
<value>60000</value>
|
||||
@ -803,7 +835,19 @@
|
||||
<property>
|
||||
<name>hadoop.token.files</name>
|
||||
<value></value>
|
||||
<description>List of token cache files that have delegation tokens for hadoop service</description>
|
||||
<description>
|
||||
A comma-separated list of token cache files that have delegation tokens
|
||||
for hadoop service
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.tokens</name>
|
||||
<value></value>
|
||||
<description>
|
||||
A comma-separated list of delegation tokens from base64 encoding
|
||||
for hadoop service.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- i/o properties -->
|
||||
@ -855,6 +899,65 @@
|
||||
operate entirely in Java, specify "java-builtin".</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.lz4.buffersize</name>
|
||||
<value>262144</value>
|
||||
<description>
|
||||
Internal buffer size for Lz4 compressor/decompressors.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.lz4.use.lz4hc</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Enable lz4hc(slow but with high compression ratio) for lz4 compression.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.lzo.buffersize</name>
|
||||
<value>65536</value>
|
||||
<description>
|
||||
Internal buffer size for Lzo compressor/decompressors.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.lzo.class</name>
|
||||
<value>org.apache.hadoop.io.compress.LzoCodec</value>
|
||||
<description>
|
||||
Codec class that implements Lzo compression algorithm.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.snappy.buffersize</name>
|
||||
<value>262144</value>
|
||||
<description>
|
||||
Internal buffer size for Snappy compressor/decompressors.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.zstd.buffersize</name>
|
||||
<value>0</value>
|
||||
<description>
|
||||
Indicate ZStandard buffer size. The default value 0 means use the
|
||||
recommended zstd buffer size that the library recommends.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.zstd.level</name>
|
||||
<value>3</value>
|
||||
<description>
|
||||
Indicate ZStandard compression level. The higher the compression level,
|
||||
the higher the compression ratio and memory usage, but the slower the
|
||||
compression and decompression speed.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.serializations</name>
|
||||
<value>org.apache.hadoop.io.serializer.WritableSerialization, org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
|
||||
@ -1145,6 +1248,33 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.file.impl</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Specify the implementation class used for accessing the file system. It
|
||||
is a fully qualified class name, including both the package name and the
|
||||
class name.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.creation.parallel.count</name>
|
||||
<value>64</value>
|
||||
<description>
|
||||
This property sets a a semaphore to throttle the number of FileSystem
|
||||
instances which can be created simultaneously. This is designed to reduce
|
||||
the impact of many threads in an application calling FileSystem#get() on
|
||||
a filesystem which takes time to instantiate -for example to an object
|
||||
where HTTPS connections are set up during initialization. Many threads
|
||||
trying to do this may create spurious delays by conflicting for access
|
||||
to synchronized blocks, when simply limiting the parallelism diminishes
|
||||
the conflict, so speeds up all threads trying to access the store. If a
|
||||
service appears to be blocking on all threads initializing connections to
|
||||
abfs, s3a or store, try a smaller (possibly significantly smaller) value.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.AbstractFileSystem.ftp.impl</name>
|
||||
<value>org.apache.hadoop.fs.ftp.FtpFs</value>
|
||||
@ -1231,6 +1361,22 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.iostatistics.logging.level</name>
|
||||
<value>debug</value>
|
||||
<description>
|
||||
Logging level for IOStatistics.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.iostatistics.thread.level.enabled</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
Enable IOStatisticsContext support for thread level.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3a.access.key</name>
|
||||
<description>AWS access key ID used by S3A file system. Omit for IAM role-based or provider-based authentication.</description>
|
||||
@ -2230,6 +2376,13 @@ The switch to turn S3A auditing on or off.
|
||||
|
||||
|
||||
<!-- ipc properties -->
|
||||
<property>
|
||||
<name>ipc.client.async.calls.max</name>
|
||||
<value>100</value>
|
||||
<description>
|
||||
Define the maximum number of outstanding async calls.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.idlethreshold</name>
|
||||
@ -2239,6 +2392,14 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connection.idle-scan-interval.ms</name>
|
||||
<value>10000</value>
|
||||
<description>
|
||||
Indicate how often the server scans for idle connections.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.kill.max</name>
|
||||
<value>10</value>
|
||||
@ -2286,6 +2447,14 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.max.retries.on.sasl</name>
|
||||
<value>5</value>
|
||||
<description>
|
||||
The maximum retries on SASL connection failures in RPC client.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.tcpnodelay</name>
|
||||
<value>true</value>
|
||||
@ -2329,6 +2498,14 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.tcpnodelay</name>
|
||||
<value>true</value>
|
||||
<description>
|
||||
If true then disable Nagle's Algorithm.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.handler.queue.size</name>
|
||||
<value>100</value>
|
||||
@ -2338,6 +2515,24 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.max.response.size</name>
|
||||
<value>1048576</value>
|
||||
<description>
|
||||
The maximum size when large IPC handler response buffer is reset.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.metrics.update.runner.interval</name>
|
||||
<value>5000</value>
|
||||
<description>
|
||||
To configure scheduling of server metrics update thread. This config is
|
||||
used to indicate initial delay and delay between each execution of the
|
||||
metric update runnable thread.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.listen.queue.size</name>
|
||||
<value>256</value>
|
||||
@ -2363,6 +2558,22 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.read.connection-queue.size</name>
|
||||
<value>100</value>
|
||||
<description>
|
||||
Number of pending connections that may be queued per socket reader.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.read.threadpool.size</name>
|
||||
<value>1</value>
|
||||
<description>
|
||||
Indicates the number of threads in RPC server reading from the socket.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.maximum.data.length</name>
|
||||
<value>134217728</value>
|
||||
@ -2392,6 +2603,14 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>callqueue.overflow.trigger.failover</name>
|
||||
<value>false</value>
|
||||
<description>
|
||||
Enable callqueue overflow trigger failover for stateless servers.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- FairCallQueue properties -->
|
||||
<!-- See FairCallQueue documentation for a table of all properties -->
|
||||
|
||||
@ -2476,6 +2695,20 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.[port_number].callqueue.capacity.weights</name>
|
||||
<value></value>
|
||||
<description>
|
||||
When FairCallQueue is enabled, user can specify capacity allocation
|
||||
among all sub-queues via this property. The value of this config is
|
||||
a comma-separated list of positive integers, each of which specifies
|
||||
the weight associated with the sub-queue at that index. This list
|
||||
length should be IPC scheduler priority levels, defined by
|
||||
"scheduler.priority.levels". By default, each sub-queue is associated
|
||||
with weight 1, i.e., all sub-queues are allocated with the same capacity.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.[port_number].scheduler.priority.levels</name>
|
||||
<value>4</value>
|
||||
@ -2744,6 +2977,24 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>net.topology.configured.node.mapping</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Key to define the node mapping as a comma-delimited list of host=rack
|
||||
mappings. e.g. host1=r1,host2=r1,host3=r2. Important: spaces not trimmed
|
||||
and are considered significant.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>net.topology.dependency.script.file.name</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Key to the dependency script filename.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Local file system -->
|
||||
<property>
|
||||
<name>file.stream-buffer-size</name>
|
||||
@ -3274,6 +3525,17 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.user.group.metrics.percentiles.intervals</name>
|
||||
<value></value>
|
||||
<description>
|
||||
A comma-delimited list of integers denoting the desired rollover
|
||||
intervals (in seconds) for percentile latency metrics on the Namenode
|
||||
and Datanode for each user in the group. By default, percentile
|
||||
latency metrics are disabled.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>rpc.metrics.quantile.enable</name>
|
||||
<value>false</value>
|
||||
@ -3532,6 +3794,24 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kms.client.failover.max.retries</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Default value is the number of providers specified.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.kerberos.ticket.cache.path</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Path to the Kerberos ticket cache. Setting this will force
|
||||
UserGroupInformation to use only this ticket cache file when
|
||||
creating a FileSystem instance.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.max.connections</name>
|
||||
<value>0</value>
|
||||
@ -3944,6 +4224,30 @@ The switch to turn S3A auditing on or off.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.zk.server.principal</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Principal name for zookeeper servers.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.zk.kerberos.principal</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Kerberos principal name for zookeeper connection.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.zk.kerberos.keytab</name>
|
||||
<value></value>
|
||||
<description>
|
||||
Kerberos keytab for zookeeper connection.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.zk.ssl.keystore.location</name>
|
||||
<description>
|
||||
@ -3986,7 +4290,15 @@ The switch to turn S3A auditing on or off.
|
||||
<value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT
|
||||
,SERVER,DEBUG,DEPRECATED,COMMON,OPTIONAL</value>
|
||||
<description>
|
||||
System tags to group related properties together.
|
||||
A comma-separated list of system tags to group related properties together.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.tags.custom</name>
|
||||
<value></value>
|
||||
<description>
|
||||
A comma-separated list of custom tags to group related properties together.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
@ -157,6 +157,7 @@ public void initializeMemberVariables() {
|
||||
xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.impl");
|
||||
xmlPropsToSkipCompare.add("ipc.scheduler.impl");
|
||||
xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.priority.levels");
|
||||
xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.capacity.weights");
|
||||
xmlPropsToSkipCompare.add(
|
||||
"ipc.[port_number].faircallqueue.multiplexer.weights");
|
||||
xmlPropsToSkipCompare.add("ipc.[port_number].identity-provider.impl");
|
||||
|
Loading…
Reference in New Issue
Block a user