cd7157784e
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1161332 13f79535-47bb-0310-9956-ffa450edef68
1152 lines
41 KiB
XML
1152 lines
41 KiB
XML
<?xml version="1.0"?>
|
|
<!--
|
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
contributor license agreements. See the NOTICE file distributed with
|
|
this work for additional information regarding copyright ownership.
|
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
(the "License"); you may not use this file except in compliance with
|
|
the License. You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
-->
|
|
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|
|
|
<!-- Do not modify this file directly. Instead, copy entries that you -->
|
|
<!-- wish to modify from this file into mapred-site.xml and change them -->
|
|
<!-- there. If mapred-site.xml does not already exist, create it. -->
|
|
|
|
<configuration>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.jobhistory.location</name>
|
|
<value></value>
|
|
<description> If job tracker is static the history files are stored
|
|
in this single well known place. If No value is set here, by default,
|
|
it is in the local file system at ${hadoop.log.dir}/history.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name>
|
|
<value>12</value>
|
|
<description> Every task attempt progresses from 0.0 to 1.0 [unless
|
|
it fails or is killed]. We record, for each task attempt, certain
|
|
statistics over each twelfth of the progress range. You can change
|
|
the number of intervals we divide the entire range of progress into
|
|
by setting this property. Higher values give more precision to the
|
|
recorded data, but costs more memory in the job tracker at runtime.
|
|
Each increment in this attribute costs 16 bytes per running task.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.userhistorylocation</name>
|
|
<value></value>
|
|
<description> User can specify a location to store the history files of
|
|
a particular job. If nothing is specified, the logs are stored in
|
|
output directory. The files are stored in "_logs/history/" in the directory.
|
|
User can stop logging by giving the value "none".
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.jobhistory.completed.location</name>
|
|
<value></value>
|
|
<description> The completed job history files are stored at this single well
|
|
known location. If nothing is specified, the files are stored at
|
|
${mapreduce.jobtracker.jobhistory.location}/done.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.committer.setup.cleanup.needed</name>
|
|
<value>true</value>
|
|
<description> true, if job needs job-setup and job-cleanup.
|
|
false, otherwise
|
|
</description>
|
|
</property>
|
|
<!-- i/o properties -->
|
|
|
|
<property>
|
|
<name>mapreduce.task.io.sort.factor</name>
|
|
<value>10</value>
|
|
<description>The number of streams to merge at once while sorting
|
|
files. This determines the number of open file handles.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.io.sort.mb</name>
|
|
<value>100</value>
|
|
<description>The total amount of buffer memory to use while sorting
|
|
files, in megabytes. By default, gives each merge stream 1MB, which
|
|
should minimize seeks.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.sort.spill.percent</name>
|
|
<value>0.80</value>
|
|
<description>The soft limit in the serialization buffer. Once reached, a
|
|
thread will begin to spill the contents to disk in the background. Note that
|
|
collection will not block if this threshold is exceeded while a spill is
|
|
already in progress, so spills may be larger than this threshold when it is
|
|
set to less than .5</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.address</name>
|
|
<value>local</value>
|
|
<description>The host and port that the MapReduce job tracker runs
|
|
at. If "local", then jobs are run in-process as a single map
|
|
and reduce task.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.http.address</name>
|
|
<value>0.0.0.0:50030</value>
|
|
<description>
|
|
The job tracker http server address and port the server will listen on.
|
|
If the port is 0 then the server will start on a free port.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.handler.count</name>
|
|
<value>10</value>
|
|
<description>
|
|
The number of server threads for the JobTracker. This should be roughly
|
|
4% of the number of tasktracker nodes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.report.address</name>
|
|
<value>127.0.0.1:0</value>
|
|
<description>The interface and port that task tracker server listens on.
|
|
Since it is only connected to by the tasks, it uses the local interface.
|
|
EXPERT ONLY. Should only be changed if your host does not have the loopback
|
|
interface.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.cluster.local.dir</name>
|
|
<value>${hadoop.tmp.dir}/mapred/local</value>
|
|
<description>The local directory where MapReduce stores intermediate
|
|
data files. May be a comma-separated list of
|
|
directories on different devices in order to spread disk i/o.
|
|
Directories that do not exist are ignored.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.system.dir</name>
|
|
<value>${hadoop.tmp.dir}/mapred/system</value>
|
|
<description>The directory where MapReduce stores control files.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.staging.root.dir</name>
|
|
<value>${hadoop.tmp.dir}/mapred/staging</value>
|
|
<description>The root of the staging area for users' job files
|
|
In practice, this should be the directory where users' home
|
|
directories are located (usually /user)
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.cluster.temp.dir</name>
|
|
<value>${hadoop.tmp.dir}/mapred/temp</value>
|
|
<description>A shared directory for temporary files.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.local.dir.minspacestart</name>
|
|
<value>0</value>
|
|
<description>If the space in mapreduce.cluster.local.dir drops under this,
|
|
do not ask for more tasks.
|
|
Value in bytes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.local.dir.minspacekill</name>
|
|
<value>0</value>
|
|
<description>If the space in mapreduce.cluster.local.dir drops under this,
|
|
do not ask more tasks until all the current ones have finished and
|
|
cleaned up. Also, to save the rest of the tasks we have running,
|
|
kill one of them, to clean up some space. Start with the reduce tasks,
|
|
then go with the ones that have finished the least.
|
|
Value in bytes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.expire.trackers.interval</name>
|
|
<value>600000</value>
|
|
<description>Expert: The time-interval, in miliseconds, after which
|
|
a tasktracker is declared 'lost' if it doesn't send heartbeats.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.instrumentation</name>
|
|
<value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value>
|
|
<description>Expert: The instrumentation class to associate with each TaskTracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.resourcecalculatorplugin</name>
|
|
<value></value>
|
|
<description>
|
|
Name of the class whose instance will be used to query resource information
|
|
on the tasktracker.
|
|
|
|
The class must be an instance of
|
|
org.apache.hadoop.util.ResourceCalculatorPlugin. If the value is null, the
|
|
tasktracker attempts to use a class appropriate to the platform.
|
|
Currently, the only platform supported is Linux.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name>
|
|
<value>5000</value>
|
|
<description>The interval, in milliseconds, for which the tasktracker waits
|
|
between two cycles of monitoring its tasks' memory usage. Used only if
|
|
tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name>
|
|
<value>5000</value>
|
|
<description>The time, in milliseconds, the tasktracker waits for sending a
|
|
SIGKILL to a task, after it has been sent a SIGTERM. This is currently
|
|
not used on WINDOWS where tasks are just sent a SIGTERM.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.maps</name>
|
|
<value>2</value>
|
|
<description>The default number of map tasks per job.
|
|
Ignored when mapreduce.jobtracker.address is "local".
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.reduces</name>
|
|
<value>1</value>
|
|
<description>The default number of reduce tasks per job. Typically set to 99%
|
|
of the cluster's reduce capacity, so that if a node fails the reduces can
|
|
still be executed in a single wave.
|
|
Ignored when mapreduce.jobtracker.address is "local".
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.restart.recover</name>
|
|
<value>false</value>
|
|
<description>"true" to enable (job) recovery upon restart,
|
|
"false" to start afresh
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.jobhistory.block.size</name>
|
|
<value>3145728</value>
|
|
<description>The block size of the job history file. Since the job recovery
|
|
uses job history, its important to dump job history to disk as
|
|
soon as possible. Note that this is an expert level parameter.
|
|
The default value is set to 3 MB.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.taskscheduler</name>
|
|
<value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
|
|
<description>The class responsible for scheduling the tasks.</description>
|
|
</property>
|
|
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.split.metainfo.maxsize</name>
|
|
<value>10000000</value>
|
|
<description>The maximum permissible size of the split metainfo file.
|
|
The JobTracker won't attempt to read split metainfo files bigger than
|
|
the configured value.
|
|
No limits if set to -1.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob</name>
|
|
<value></value>
|
|
<description>The maximum number of running tasks for a job before
|
|
it gets preempted. No limits if undefined.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.maxattempts</name>
|
|
<value>4</value>
|
|
<description>Expert: The maximum number of attempts per map task.
|
|
In other words, framework will try to execute a map task these many number
|
|
of times before giving up on it.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.maxattempts</name>
|
|
<value>4</value>
|
|
<description>Expert: The maximum number of attempts per reduce task.
|
|
In other words, framework will try to execute a reduce task these many number
|
|
of times before giving up on it.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.shuffle.parallelcopies</name>
|
|
<value>5</value>
|
|
<description>The default number of parallel transfers run by reduce
|
|
during the copy(shuffle) phase.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.shuffle.connect.timeout</name>
|
|
<value>180000</value>
|
|
<description>Expert: The maximum amount of time (in milli seconds) reduce
|
|
task spends in trying to connect to a tasktracker for getting map output.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.shuffle.read.timeout</name>
|
|
<value>180000</value>
|
|
<description>Expert: The maximum amount of time (in milli seconds) reduce
|
|
task waits for map output data to be available for reading after obtaining
|
|
connection.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.timeout</name>
|
|
<value>600000</value>
|
|
<description>The number of milliseconds before a task will be
|
|
terminated if it neither reads an input, writes an output, nor
|
|
updates its status string.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.map.tasks.maximum</name>
|
|
<value>2</value>
|
|
<description>The maximum number of map tasks that will be run
|
|
simultaneously by a task tracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.reduce.tasks.maximum</name>
|
|
<value>2</value>
|
|
<description>The maximum number of reduce tasks that will be run
|
|
simultaneously by a task tracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.retiredjobs.cache.size</name>
|
|
<value>1000</value>
|
|
<description>The number of retired job status to keep in the cache.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.outofband.heartbeat</name>
|
|
<value>false</value>
|
|
<description>Expert: Set this to true to let the tasktracker send an
|
|
out-of-band heartbeat on task-completion for better latency.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.jobhistory.lru.cache.size</name>
|
|
<value>5</value>
|
|
<description>The number of job history files loaded in memory. The jobs are
|
|
loaded when they are first accessed. The cache is cleared based on LRU.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.instrumentation</name>
|
|
<value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value>
|
|
<description>Expert: The instrumentation class to associate with each JobTracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapred.child.java.opts</name>
|
|
<value>-Xmx200m</value>
|
|
<description>Java opts for the task tracker child processes.
|
|
The following symbol, if present, will be interpolated: @taskid@ is replaced
|
|
by current TaskID. Any other occurrences of '@' will go unchanged.
|
|
For example, to enable verbose gc logging to a file named for the taskid in
|
|
/tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
|
|
-Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
|
|
|
|
The configuration variable mapred.child.ulimit can be used to control the
|
|
maximum virtual memory of the child processes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapred.child.env</name>
|
|
<value></value>
|
|
<description>User added environment variables for the task tracker child
|
|
processes. Example :
|
|
1) A=foo This will set the env variable A to foo
|
|
2) B=$B:c This is inherit tasktracker's B env variable.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapred.child.ulimit</name>
|
|
<value></value>
|
|
<description>The maximum virtual memory, in KB, of a process launched by the
|
|
Map-Reduce framework. This can be used to control both the Mapper/Reducer
|
|
tasks and applications using Hadoop Pipes, Hadoop Streaming etc.
|
|
By default it is left unspecified to let cluster admins control it via
|
|
limits.conf and other such relevant mechanisms.
|
|
|
|
Note: mapred.child.ulimit must be greater than or equal to the -Xmx passed to
|
|
JavaVM, else the VM might not start.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.tmp.dir</name>
|
|
<value>./tmp</value>
|
|
<description> To set the value of tmp directory for map and reduce tasks.
|
|
If the value is an absolute path, it is directly assigned. Otherwise, it is
|
|
prepended with task's working directory. The java tasks are executed with
|
|
option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
|
|
streaming are set with environment variable,
|
|
TMPDIR='the absolute path of the tmp dir'
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.log.level</name>
|
|
<value>INFO</value>
|
|
<description>The logging level for the map task. The allowed levels are:
|
|
OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.log.level</name>
|
|
<value>INFO</value>
|
|
<description>The logging level for the reduce task. The allowed levels are:
|
|
OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.merge.inmem.threshold</name>
|
|
<value>1000</value>
|
|
<description>The threshold, in terms of the number of files
|
|
for the in-memory merge process. When we accumulate threshold number of files
|
|
we initiate the in-memory merge and spill to disk. A value of 0 or less than
|
|
0 indicates we want to DON'T have any threshold and instead depend only on
|
|
the ramfs's memory consumption to trigger the merge.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.shuffle.merge.percent</name>
|
|
<value>0.66</value>
|
|
<description>The usage threshold at which an in-memory merge will be
|
|
initiated, expressed as a percentage of the total memory allocated to
|
|
storing in-memory map outputs, as defined by
|
|
mapreduce.reduce.shuffle.input.buffer.percent.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.shuffle.input.buffer.percent</name>
|
|
<value>0.70</value>
|
|
<description>The percentage of memory to be allocated from the maximum heap
|
|
size to storing map outputs during the shuffle.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.input.buffer.percent</name>
|
|
<value>0.0</value>
|
|
<description>The percentage of memory- relative to the maximum heap size- to
|
|
retain map outputs during the reduce. When the shuffle is concluded, any
|
|
remaining map outputs in memory must consume less than this threshold before
|
|
the reduce can begin.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.markreset.buffer.percent</name>
|
|
<value>0.0</value>
|
|
<description>The percentage of memory -relative to the maximum heap size- to
|
|
be used for caching values when using the mark-reset functionality.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.speculative</name>
|
|
<value>true</value>
|
|
<description>If true, then multiple instances of some map tasks
|
|
may be executed in parallel.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.speculative</name>
|
|
<value>true</value>
|
|
<description>If true, then multiple instances of some reduce tasks
|
|
may be executed in parallel.</description>
|
|
</property>
|
|
<property>
|
|
<name>mapreduce.job.speculative.speculativecap</name>
|
|
<value>0.1</value>
|
|
<description>The max percent (0-1) of running tasks that
|
|
can be speculatively re-executed at any time.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.speculative.slowtaskthreshold</name>
|
|
<value>1.0</value>The number of standard deviations by which a task's
|
|
ave progress-rates must be lower than the average of all running tasks'
|
|
for the task to be considered too slow.
|
|
<description>
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.speculative.slownodethreshold</name>
|
|
<value>1.0</value>
|
|
<description>The number of standard deviations by which a Task
|
|
Tracker's ave map and reduce progress-rates (finishTime-dispatchTime)
|
|
must be lower than the average of all successful map/reduce task's for
|
|
the TT to be considered too slow to give a speculative task to.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.jvm.numtasks</name>
|
|
<value>1</value>
|
|
<description>How many tasks to run per jvm. If set to -1, there is
|
|
no limit.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.input.fileinputformat.split.minsize</name>
|
|
<value>0</value>
|
|
<description>The minimum size chunk that map input should be split
|
|
into. Note that some file formats may have minimum split sizes that
|
|
take priority over this setting.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.maxtasks.perjob</name>
|
|
<value>-1</value>
|
|
<description>The maximum number of tasks for a single job.
|
|
A value of -1 indicates that there is no maximum. </description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.client.submit.file.replication</name>
|
|
<value>10</value>
|
|
<description>The replication level for submitted job files. This
|
|
should be around the square root of the number of nodes.
|
|
</description>
|
|
</property>
|
|
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.dns.interface</name>
|
|
<value>default</value>
|
|
<description>The name of the Network Interface from which a task
|
|
tracker should report its IP address.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.dns.nameserver</name>
|
|
<value>default</value>
|
|
<description>The host name or IP address of the name server (DNS)
|
|
which a TaskTracker should use to determine the host name used by
|
|
the JobTracker for communication and display purposes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.http.threads</name>
|
|
<value>40</value>
|
|
<description>The number of worker threads that for the http server. This is
|
|
used for map output fetching
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.http.address</name>
|
|
<value>0.0.0.0:50060</value>
|
|
<description>
|
|
The task tracker http server address and port.
|
|
If the port is 0 then the server will start on a free port.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.files.preserve.failedtasks</name>
|
|
<value>false</value>
|
|
<description>Should the files for failed tasks be kept. This should only be
|
|
used on jobs that are failing, because the storage is never
|
|
reclaimed. It also prevents the map outputs from being erased
|
|
from the reduce directory as they are consumed.</description>
|
|
</property>
|
|
|
|
|
|
<!--
|
|
<property>
|
|
<name>mapreduce.task.files.preserve.filepattern</name>
|
|
<value>.*_m_123456_0</value>
|
|
<description>Keep all files from tasks whose task names match the given
|
|
regular expression. Defaults to none.</description>
|
|
</property>
|
|
-->
|
|
|
|
<property>
|
|
<name>mapreduce.output.fileoutputformat.compress</name>
|
|
<value>false</value>
|
|
<description>Should the job outputs be compressed?
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.output.fileoutputformat.compression.type</name>
|
|
<value>RECORD</value>
|
|
<description>If the job outputs are to compressed as SequenceFiles, how should
|
|
they be compressed? Should be one of NONE, RECORD or BLOCK.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.output.fileoutputformat.compression.codec</name>
|
|
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
|
|
<description>If the job outputs are compressed, how should they be compressed?
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.output.compress</name>
|
|
<value>false</value>
|
|
<description>Should the outputs of the maps be compressed before being
|
|
sent across the network. Uses SequenceFile compression.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.output.compress.codec</name>
|
|
<value>org.apache.hadoop.io.compress.DefaultCodec</value>
|
|
<description>If the map outputs are compressed, how should they be
|
|
compressed?
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>map.sort.class</name>
|
|
<value>org.apache.hadoop.util.QuickSort</value>
|
|
<description>The default sort class for sorting keys.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.userlog.limit.kb</name>
|
|
<value>0</value>
|
|
<description>The maximum size of user-logs of each task in KB. 0 disables the cap.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.userlog.retain.hours</name>
|
|
<value>24</value>
|
|
<description>The maximum time, in hours, for which the user-logs are to be
|
|
retained after the job completion.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.hosts.filename</name>
|
|
<value></value>
|
|
<description>Names a file that contains the list of nodes that may
|
|
connect to the jobtracker. If the value is empty, all hosts are
|
|
permitted.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.hosts.exclude.filename</name>
|
|
<value></value>
|
|
<description>Names a file that contains the list of hosts that
|
|
should be excluded by the jobtracker. If the value is empty, no
|
|
hosts are excluded.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.heartbeats.in.second</name>
|
|
<value>100</value>
|
|
<description>Expert: Approximate number of heart-beats that could arrive
|
|
at JobTracker in a second. Assuming each RPC can be processed
|
|
in 10msec, the default value is made 100 RPCs in a second.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
|
|
<value>4</value>
|
|
<description>The number of blacklists for a taskTracker by various jobs
|
|
after which the task tracker could be blacklisted across
|
|
all jobs. The tracker will be given a tasks later
|
|
(after a day). The tracker will become a healthy
|
|
tracker after a restart.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.maxtaskfailures.per.tracker</name>
|
|
<value>4</value>
|
|
<description>The number of task-failures on a tasktracker of a given job
|
|
after which new tasks of that job aren't assigned to it.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.client.output.filter</name>
|
|
<value>FAILED</value>
|
|
<description>The filter for controlling the output of the task's userlogs sent
|
|
to the console of the JobClient.
|
|
The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and
|
|
ALL.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.client.completion.pollinterval</name>
|
|
<value>5000</value>
|
|
<description>The interval (in milliseconds) between which the JobClient
|
|
polls the JobTracker for updates about job status. You may want to set this
|
|
to a lower value to make tests run faster on a single node system. Adjusting
|
|
this value in production may lead to unwanted client-server traffic.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.client.progressmonitor.pollinterval</name>
|
|
<value>1000</value>
|
|
<description>The interval (in milliseconds) between which the JobClient
|
|
reports status to the console and checks for job completion. You may want to set this
|
|
to a lower value to make tests run faster on a single node system. Adjusting
|
|
this value in production may lead to unwanted client-server traffic.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.persist.jobstatus.active</name>
|
|
<value>true</value>
|
|
<description>Indicates if persistency of job status information is
|
|
active or not.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.persist.jobstatus.hours</name>
|
|
<value>1</value>
|
|
<description>The number of hours job status information is persisted in DFS.
|
|
The job status information will be available after it drops of the memory
|
|
queue and between jobtracker restarts. With a zero value the job status
|
|
information is not persisted at all in DFS.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.persist.jobstatus.dir</name>
|
|
<value>/jobtracker/jobsInfo</value>
|
|
<description>The directory where the job status information is persisted
|
|
in a file system to be available after it drops of the memory queue and
|
|
between jobtracker restarts.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.profile</name>
|
|
<value>false</value>
|
|
<description>To set whether the system should collect profiler
|
|
information for some of the tasks in this job? The information is stored
|
|
in the user log directory. The value is "true" if task profiling
|
|
is enabled.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.profile.maps</name>
|
|
<value>0-2</value>
|
|
<description> To set the ranges of map tasks to profile.
|
|
mapreduce.task.profile has to be set to true for the value to be accounted.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.profile.reduces</name>
|
|
<value>0-2</value>
|
|
<description> To set the ranges of reduce tasks to profile.
|
|
mapreduce.task.profile has to be set to true for the value to be accounted.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.skip.start.attempts</name>
|
|
<value>2</value>
|
|
<description> The number of Task attempts AFTER which skip mode
|
|
will be kicked off. When skip mode is kicked off, the
|
|
tasks reports the range of records which it will process
|
|
next, to the TaskTracker. So that on failures, TT knows which
|
|
ones are possibly the bad records. On further executions,
|
|
those are skipped.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.skip.proc.count.autoincr</name>
|
|
<value>true</value>
|
|
<description> The flag which if set to true,
|
|
SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented
|
|
by MapRunner after invoking the map function. This value must be set to
|
|
false for applications which process the records asynchronously
|
|
or buffer the input records. For example streaming.
|
|
In such cases applications should increment this counter on their own.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.skip.proc.count.autoincr</name>
|
|
<value>true</value>
|
|
<description> The flag which if set to true,
|
|
SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented
|
|
by framework after invoking the reduce function. This value must be set to
|
|
false for applications which process the records asynchronously
|
|
or buffer the input records. For example streaming.
|
|
In such cases applications should increment this counter on their own.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.skip.outdir</name>
|
|
<value></value>
|
|
<description> If no value is specified here, the skipped records are
|
|
written to the output directory at _logs/skip.
|
|
User can stop writing skipped records by giving the value "none".
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.map.skip.maxrecords</name>
|
|
<value>0</value>
|
|
<description> The number of acceptable skip records surrounding the bad
|
|
record PER bad record in mapper. The number includes the bad record as well.
|
|
To turn the feature of detection/skipping of bad records off, set the
|
|
value to 0.
|
|
The framework tries to narrow down the skipped range by retrying
|
|
until this threshold is met OR all attempts get exhausted for this task.
|
|
Set the value to Long.MAX_VALUE to indicate that framework need not try to
|
|
narrow down. Whatever records(depends on application) get skipped are
|
|
acceptable.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.reduce.skip.maxgroups</name>
|
|
<value>0</value>
|
|
<description> The number of acceptable skip groups surrounding the bad
|
|
group PER bad group in reducer. The number includes the bad group as well.
|
|
To turn the feature of detection/skipping of bad groups off, set the
|
|
value to 0.
|
|
The framework tries to narrow down the skipped range by retrying
|
|
until this threshold is met OR all attempts get exhausted for this task.
|
|
Set the value to Long.MAX_VALUE to indicate that framework need not try to
|
|
narrow down. Whatever groups(depends on application) get skipped are
|
|
acceptable.
|
|
</description>
|
|
</property>
|
|
|
|
<!-- Job Notification Configuration -->
|
|
|
|
<!--
|
|
<property>
|
|
<name>mapreduce.job.end-notification.url</name>
|
|
<value>http://localhost:8080/jobstatus.php?jobId=$jobId&jobStatus=$jobStatus</value>
|
|
<description>Indicates url which will be called on completion of job to inform
|
|
end status of job.
|
|
User can give at most 2 variables with URI : $jobId and $jobStatus.
|
|
If they are present in URI, then they will be replaced by their
|
|
respective values.
|
|
</description>
|
|
</property>
|
|
-->
|
|
|
|
<property>
|
|
<name>mapreduce.job.end-notification.retry.attempts</name>
|
|
<value>0</value>
|
|
<description>Indicates how many times hadoop should attempt to contact the
|
|
notification URL </description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.end-notification.retry.interval</name>
|
|
<value>30000</value>
|
|
<description>Indicates time in milliseconds between notification URL retry
|
|
calls</description>
|
|
</property>
|
|
|
|
<!-- Proxy Configuration -->
|
|
<property>
|
|
<name>mapreduce.jobtracker.taskcache.levels</name>
|
|
<value>2</value>
|
|
<description> This is the max level of the task cache. For example, if
|
|
the level is 2, the tasks cached are at the host level and at the rack
|
|
level.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.queuename</name>
|
|
<value>default</value>
|
|
<description> Queue to which a job is submitted. This must match one of the
|
|
queues defined in mapred-queues.xml for the system. Also, the ACL setup
|
|
for the queue must allow the current user to submit a job to the queue.
|
|
Before specifying a queue, ensure that the system is configured with
|
|
the queue, and access is allowed for submitting jobs to the queue.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.cluster.acls.enabled</name>
|
|
<value>false</value>
|
|
<description> Specifies whether ACLs should be checked
|
|
for authorization of users for doing various queue and job level operations.
|
|
ACLs are disabled by default. If enabled, access control checks are made by
|
|
JobTracker and TaskTracker when requests are made by users for queue
|
|
operations like submit job to a queue and kill a job in the queue and job
|
|
operations like viewing the job-details (See mapreduce.job.acl-view-job)
|
|
or for modifying the job (See mapreduce.job.acl-modify-job) using
|
|
Map/Reduce APIs, RPCs or via the console and web user interfaces.
|
|
For enabling this flag(mapreduce.cluster.acls.enabled), this is to be set
|
|
to true in mapred-site.xml on JobTracker node and on all TaskTracker nodes.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.acl-modify-job</name>
|
|
<value> </value>
|
|
<description> Job specific access-control list for 'modifying' the job. It
|
|
is only used if authorization is enabled in Map/Reduce by setting the
|
|
configuration property mapreduce.cluster.acls.enabled to true.
|
|
This specifies the list of users and/or groups who can do modification
|
|
operations on the job. For specifying a list of users and groups the
|
|
format to use is "user1,user2 group1,group". If set to '*', it allows all
|
|
users/groups to modify this job. If set to ' '(i.e. space), it allows
|
|
none. This configuration is used to guard all the modifications with respect
|
|
to this job and takes care of all the following operations:
|
|
o killing this job
|
|
o killing a task of this job, failing a task of this job
|
|
o setting the priority of this job
|
|
Each of these operations are also protected by the per-queue level ACL
|
|
"acl-administer-jobs" configured via mapred-queues.xml. So a caller should
|
|
have the authorization to satisfy either the queue-level ACL or the
|
|
job-level ACL.
|
|
|
|
Irrespective of this ACL configuration, (a) job-owner, (b) the user who
|
|
started the cluster, (c) cluster administrators
|
|
configured via mapreduce.cluster.administrators and (d) queue
|
|
administrators of the queue to which this job was submitted to configured
|
|
via acl-administer-jobs for the specific queue in mapred-queues.xml can
|
|
do all the modification operations on a job.
|
|
|
|
By default, nobody else besides job-owner, the user who started the cluster,
|
|
cluster administrators and queue administrators can perform modification
|
|
operations on a job.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.acl-view-job</name>
|
|
<value> </value>
|
|
<description> Job specific access-control list for 'viewing' the job. It is
|
|
only used if authorization is enabled in Map/Reduce by setting the
|
|
configuration property mapreduce.cluster.acls.enabled to true.
|
|
This specifies the list of users and/or groups who can view private details
|
|
about the job. For specifying a list of users and groups the
|
|
format to use is "user1,user2 group1,group". If set to '*', it allows all
|
|
users/groups to modify this job. If set to ' '(i.e. space), it allows
|
|
none. This configuration is used to guard some of the job-views and at
|
|
present only protects APIs that can return possibly sensitive information
|
|
of the job-owner like
|
|
o job-level counters
|
|
o task-level counters
|
|
o tasks' diagnostic information
|
|
o task-logs displayed on the TaskTracker web-UI and
|
|
o job.xml showed by the JobTracker's web-UI
|
|
Every other piece of information of jobs is still accessible by any other
|
|
user, for e.g., JobStatus, JobProfile, list of jobs in the queue, etc.
|
|
|
|
Irrespective of this ACL configuration, (a) job-owner, (b) the user who
|
|
started the cluster, (c) cluster administrators
|
|
configured via mapreduce.cluster.administrators and (d) queue
|
|
administrators of the queue to which this job was submitted to configured
|
|
via acl-administer-jobs for the specific queue in mapred-queues.xml can
|
|
do all the view operations on a job.
|
|
|
|
By default, nobody else besides job-owner, the user who started the
|
|
cluster, cluster administrators and queue administrators can perform
|
|
view operations on a job.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.jobtracker.webinterface.trusted</name>
|
|
<value>false</value>
|
|
<description> If set to true, the web interface of the JobTracker
|
|
will include actions such as kill job that are security sensitive.
|
|
Leave this option as false if untrusted users have access to the web interface.
|
|
</description>
|
|
</property>
|
|
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.indexcache.mb</name>
|
|
<value>10</value>
|
|
<description> The maximum memory that a task tracker allows for the
|
|
index cache that is used when serving map outputs to reducers.
|
|
</description>
|
|
</property>
|
|
|
|
<!-- TaskTracker DistributedCache configuration -->
|
|
<property>
|
|
<name>mapreduce.tasktracker.cache.local.size</name>
|
|
<value>10737418240</value>
|
|
<description>The number of bytes to allocate in each local TaskTracker
|
|
directory for holding Distributed Cache data.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.cache.local.numberdirectories</name>
|
|
<value>10000</value>
|
|
<description>
|
|
The maximum number of subdirectories that should be created in any particular
|
|
distributed cache store. After this many directories have been created,
|
|
cache items will be expunged regardless of whether the total size threshold
|
|
has been exceeded.
|
|
</description>
|
|
</property>
|
|
<!-- End of TaskTracker DistributedCache configuration -->
|
|
|
|
<property>
|
|
<name>mapreduce.task.combine.progress.records</name>
|
|
<value>10000</value>
|
|
<description> The number of records to process during combine output collection
|
|
before sending a progress notification to the TaskTracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.task.merge.progress.records</name>
|
|
<value>10000</value>
|
|
<description> The number of records to process during merge before
|
|
sending a progress notification to the TaskTracker.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.reduce.slowstart.completedmaps</name>
|
|
<value>0.05</value>
|
|
<description>Fraction of the number of maps in the job which should be
|
|
complete before reduces are scheduled for the job.
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.job.complete.cancel.delegation.tokens</name>
|
|
<value>true</value>
|
|
<description> if false - do not unregister/cancel delegation tokens from
|
|
renewal, because same tokens may be used by spawned jobs
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.taskcontroller</name>
|
|
<value>org.apache.hadoop.mapred.DefaultTaskController</value>
|
|
<description>TaskController which is used to launch and manage task execution
|
|
</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.group</name>
|
|
<value></value>
|
|
<description>Expert: Group to which TaskTracker belongs. If
|
|
LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller,
|
|
the group owner of the task-controller binary should be same as this group.
|
|
</description>
|
|
</property>
|
|
|
|
<!-- Node health script variables -->
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.healthchecker.script.path</name>
|
|
<value></value>
|
|
<description>Absolute path to the script which is
|
|
periodicallyrun by the node health monitoring service to determine if
|
|
the node is healthy or not. If the value of this key is empty or the
|
|
file does not exist in the location configured here, the node health
|
|
monitoring service is not started.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.healthchecker.interval</name>
|
|
<value>60000</value>
|
|
<description>Frequency of the node health script to be run,
|
|
in milliseconds</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.healthchecker.script.timeout</name>
|
|
<value>600000</value>
|
|
<description>Time after node health script should be killed if
|
|
unresponsive and considered that the script has failed.</description>
|
|
</property>
|
|
|
|
<property>
|
|
<name>mapreduce.tasktracker.healthchecker.script.args</name>
|
|
<value></value>
|
|
<description>List of arguments which are to be passed to
|
|
node health script when it is being launched comma seperated.
|
|
</description>
|
|
</property>
|
|
|
|
<!-- end of node health script variables -->
|
|
|
|
</configuration>
|