YARN-1306. Clean up hadoop-sls sample-conf according to YARN-1228 (Wei Yan via Sandy Ryza)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1536982 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Sanford Ryza 2013-10-30 06:32:53 +00:00
parent 68a79b0d3f
commit 87adffe877
5 changed files with 51 additions and 82 deletions

View File

@ -57,11 +57,4 @@
<name>yarn.scheduler.capacity.root.sls_queue_3.maximum-capacity</name> <name>yarn.scheduler.capacity.root.sls_queue_3.maximum-capacity</name>
<value>100</value> <value>100</value>
</property> </property>
<property>
<name>yarn.scheduler.capacity.maximum-applications</name>
<value>1000</value>
<description>Maximum number of applications in the system which
can be concurrently active both running and pending</description>
</property>
</configuration> </configuration>

View File

@ -1,50 +0,0 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!--
This file contains pool and user allocations for the Fair Scheduler.
Its format is explained in the Fair Scheduler documentation at
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html
The documentation also includes a sample config file.
-->
<allocations>
<user name="jenkins">
<!-- Limit on running jobs for the user across all pools. If more
jobs than this are submitted, only the first <maxRunningJobs> will
be scheduled at any given time. Defaults to infinity or the
userMaxJobsDefault value set below. -->
<maxRunningJobs>1000</maxRunningJobs>
</user>
<userMaxAppsDefault>1000</userMaxAppsDefault>
<queue name="sls_queue_1">
<minResources>1024 mb, 1 vcores</minResources>
<schedulingMode>fair</schedulingMode>
<weight>0.25</weight>
<minSharePreemptionTimeout>2</minSharePreemptionTimeout>
</queue>
<queue name="sls_queue_2">
<minResources>1024 mb, 1 vcores</minResources>
<schedulingMode>fair</schedulingMode>
<weight>0.25</weight>
<minSharePreemptionTimeout>2</minSharePreemptionTimeout>
</queue>
<queue name="sls_queue_3">
<minResources>1024 mb, 1 vcores</minResources>
<weight>0.5</weight>
<schedulingMode>fair</schedulingMode>
<minSharePreemptionTimeout>2</minSharePreemptionTimeout>
</queue>
</allocations>

View File

@ -20,28 +20,31 @@
The documentation also includes a sample config file. The documentation also includes a sample config file.
--> -->
<configuration> <allocations>
<property> <user name="jenkins">
<description>Absolute path to allocation file. An allocation file is an XML <!-- Limit on running jobs for the user across all pools. If more
manifest describing queues and their properties, in addition to certain jobs than this are submitted, only the first <maxRunningJobs> will
policy defaults. This file must be in XML format as described in be scheduled at any given time. Defaults to infinity or the
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html. userMaxJobsDefault value set below. -->
</description> <maxRunningJobs>1000</maxRunningJobs>
<name>yarn.scheduler.fair.allocation.file</name> </user>
<value>fair-scheduler-allocation.xml</value> <userMaxAppsDefault>1000</userMaxAppsDefault>
</property> <queue name="sls_queue_1">
<minResources>1024 mb, 1 vcores</minResources>
<property> <schedulingMode>fair</schedulingMode>
<description>Whether to use preemption. Note that preemption is experimental <weight>0.25</weight>
in the current version. Defaults to false.</description> <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
<name>yarn.scheduler.fair.preemption</name> </queue>
<value>true</value> <queue name="sls_queue_2">
</property> <minResources>1024 mb, 1 vcores</minResources>
<schedulingMode>fair</schedulingMode>
<property> <weight>0.25</weight>
<description>Whether to allow multiple container assignments in one <minSharePreemptionTimeout>2</minSharePreemptionTimeout>
heartbeat. Defaults to false.</description> </queue>
<name>yarn.scheduler.fair.assignmultiple</name> <queue name="sls_queue_3">
<value>true</value> <minResources>1024 mb, 1 vcores</minResources>
</property> <weight>0.5</weight>
</configuration> <schedulingMode>fair</schedulingMode>
<minSharePreemptionTimeout>2</minSharePreemptionTimeout>
</queue>
</allocations>

View File

@ -57,4 +57,24 @@
<value>false</value> <value>false</value>
</property> </property>
<property>
<name>yarn.scheduler.capacity.maximum-applications</name>
<value>1000</value>
<description>Maximum number of applications in the system which
can be concurrently active both running and pending</description>
</property>
<property>
<description>Whether to use preemption. Note that preemption is experimental
in the current version. Defaults to false.</description>
<name>yarn.scheduler.fair.preemption</name>
<value>true</value>
</property>
<property>
<description>Whether to allow multiple container assignments in one
heartbeat. Defaults to false.</description>
<name>yarn.scheduler.fair.assignmultiple</name>
<value>true</value>
</property>
</configuration> </configuration>

View File

@ -70,6 +70,9 @@ Release 2.3.0 - UNRELEASED
HADOOP-9598. Improve code coverage of RMAdminCLI (Aleksey Gorshkov and HADOOP-9598. Improve code coverage of RMAdminCLI (Aleksey Gorshkov and
Andrey Klochkov via jeagles) Andrey Klochkov via jeagles)
YARN-1306. Clean up hadoop-sls sample-conf according to YARN-1228 (Wei Yan
via Sandy Ryza)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES