YARN-8122. Added component health monitoring for YARN service.

Contributed by Gour Saha
This commit is contained in:
Eric Yang 2018-04-26 19:17:32 -04:00
parent 09c9cf1d43
commit 93979db840
7 changed files with 434 additions and 9 deletions

View File

@ -48,6 +48,7 @@
import org.apache.hadoop.yarn.service.api.records.ServiceState;
import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
import org.apache.hadoop.yarn.service.monitor.ComponentHealthThresholdMonitor;
import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
import org.apache.hadoop.yarn.service.monitor.probe.Probe;
import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService;
@ -73,6 +74,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@ -83,10 +85,7 @@
import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
import static org.apache.hadoop.yarn.service.component.ComponentState.*;
import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURE_THRESHOLD;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED_DEFAULT;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
public class Component implements EventHandler<ComponentEvent> {
private static final Logger LOG = LoggerFactory.getLogger(Component.class);
@ -112,6 +111,7 @@ public class Component implements EventHandler<ComponentEvent> {
// The number of containers failed since last reset. This excludes preempted,
// disk_failed containers etc. This will be reset to 0 periodically.
public AtomicInteger currentContainerFailure = new AtomicInteger(0);
private boolean healthThresholdMonitorEnabled = false;
private AtomicBoolean upgradeInProgress = new AtomicBoolean(false);
private ComponentEvent upgradeEvent;
@ -205,6 +205,7 @@ public Component(
componentSpec.getConfiguration(), scheduler.getConfig());
createNumCompInstances(component.getNumberOfContainers());
setDesiredContainers(component.getNumberOfContainers().intValue());
checkAndScheduleHealthThresholdMonitor();
}
private void createNumCompInstances(long count) {
@ -222,6 +223,73 @@ private void createOneCompInstance() {
pendingInstances.add(instance);
}
private void checkAndScheduleHealthThresholdMonitor() {
// Determine health threshold percent
int healthThresholdPercent = YarnServiceConf.getInt(
CONTAINER_HEALTH_THRESHOLD_PERCENT,
DEFAULT_CONTAINER_HEALTH_THRESHOLD_PERCENT,
componentSpec.getConfiguration(), scheduler.getConfig());
// Validations
if (healthThresholdPercent == CONTAINER_HEALTH_THRESHOLD_PERCENT_DISABLED) {
LOG.info("No health threshold monitor enabled for component {}",
componentSpec.getName());
return;
}
// If threshold is set to outside acceptable range then don't enable monitor
if (healthThresholdPercent <= 0 || healthThresholdPercent > 100) {
LOG.error(
"Invalid health threshold percent {}% for component {}. Monitor not "
+ "enabled.",
healthThresholdPercent, componentSpec.getName());
return;
}
// Determine the threshold properties
long window = YarnServiceConf.getLong(CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC,
DEFAULT_CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC,
componentSpec.getConfiguration(), scheduler.getConfig());
long initDelay = YarnServiceConf.getLong(
CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC,
DEFAULT_CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC,
componentSpec.getConfiguration(), scheduler.getConfig());
long pollFrequency = YarnServiceConf.getLong(
CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC,
DEFAULT_CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC,
componentSpec.getConfiguration(), scheduler.getConfig());
// Validations
if (window <= 0) {
LOG.error(
"Invalid health monitor window {} secs for component {}. Monitor not "
+ "enabled.",
window, componentSpec.getName());
return;
}
if (initDelay < 0) {
LOG.error("Invalid health monitor init delay {} secs for component {}. "
+ "Monitor not enabled.", initDelay, componentSpec.getName());
return;
}
if (pollFrequency <= 0) {
LOG.error(
"Invalid health monitor poll frequency {} secs for component {}. "
+ "Monitor not enabled.",
pollFrequency, componentSpec.getName());
return;
}
LOG.info(
"Scheduling the health threshold monitor for component {} with percent "
+ "= {}%, window = {} secs, poll freq = {} secs, init-delay = {} "
+ "secs",
componentSpec.getName(), healthThresholdPercent, window, pollFrequency,
initDelay);
// Add 3 extra seconds to initial delay to account for the time taken to
// request containers before the monitor starts calculating health.
this.scheduler.executorService.scheduleAtFixedRate(
new ComponentHealthThresholdMonitor(this, healthThresholdPercent,
window),
initDelay + 3, pollFrequency, TimeUnit.SECONDS);
setHealthThresholdMonitorEnabled(true);
}
private static class FlexComponentTransition implements
MultipleArcTransition<Component, ComponentEvent, ComponentState> {
// For flex up, go to FLEXING state
@ -872,4 +940,13 @@ public ServiceContext getContext() {
public List<ComponentInstance> getPendingInstances() {
return pendingInstances;
}
public boolean isHealthThresholdMonitorEnabled() {
return healthThresholdMonitorEnabled;
}
public void setHealthThresholdMonitorEnabled(
boolean healthThresholdMonitorEnabled) {
this.healthThresholdMonitorEnabled = healthThresholdMonitorEnabled;
}
}

View File

@ -250,8 +250,11 @@ public void transition(ComponentInstance compInstance,
}
compInstance.component.decRunningContainers();
boolean shouldExit = false;
// check if it exceeds the failure threshold
if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) {
// Check if it exceeds the failure threshold, but only if health threshold
// monitor is not enabled
if (!comp.isHealthThresholdMonitorEnabled()
&& comp.currentContainerFailure
.get() > comp.maxContainerFailurePerComp) {
String exitDiag = MessageFormat.format(
"[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. Shutting down now... "
+ System.lineSeparator(),

View File

@ -124,6 +124,57 @@ public class YarnServiceConf {
public static final String DEPENDENCY_TARBALL_PATH = YARN_SERVICE_PREFIX
+ "framework.path";
public static final String YARN_SERVICE_CONTAINER_HEALTH_THRESHOLD_PREFIX =
YARN_SERVICE_PREFIX + "container-health-threshold.";
/**
* The container health threshold percent when explicitly set for a specific
* component or globally for all components, will schedule a health check
* monitor to periodically check for the percentage of healthy containers. It
* runs the check at a specified/default poll frequency. It allows a component
* to be below the health threshold for a specified/default window after which
* it considers the service to be unhealthy and triggers a service stop. When
* health threshold percent is enabled, CONTAINER_FAILURE_THRESHOLD is
* ignored.
*/
public static final String CONTAINER_HEALTH_THRESHOLD_PERCENT =
YARN_SERVICE_CONTAINER_HEALTH_THRESHOLD_PREFIX + "percent";
/**
* Health check monitor poll frequency. It is an advanced setting and does not
* need to be set unless the service owner understands the implication and
* does not want the default.
*/
public static final String CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC =
YARN_SERVICE_CONTAINER_HEALTH_THRESHOLD_PREFIX + "poll-frequency-secs";
/**
* The amount of time the health check monitor allows a specific component to
* be below the health threshold after which it considers the service to be
* unhealthy.
*/
public static final String CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC =
YARN_SERVICE_CONTAINER_HEALTH_THRESHOLD_PREFIX + "window-secs";
/**
* The amount of initial time the health check monitor waits before the first
* check kicks in. It gives a lead time for the service containers to come up
* for the first time.
*/
public static final String CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC =
YARN_SERVICE_CONTAINER_HEALTH_THRESHOLD_PREFIX + "init-delay-secs";
/**
* By default the health threshold percent does not come into play until it is
* explicitly set in resource config for a specific component or globally for
* all components. -1 signifies disabled.
*/
public static final int CONTAINER_HEALTH_THRESHOLD_PERCENT_DISABLED = -1;
public static final int DEFAULT_CONTAINER_HEALTH_THRESHOLD_PERCENT =
CONTAINER_HEALTH_THRESHOLD_PERCENT_DISABLED;
public static final long DEFAULT_CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC = 10;
public static final long DEFAULT_CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC = 600;
// The default for initial delay is same as default health window
public static final long DEFAULT_CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC =
DEFAULT_CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC;
/**
* Get long value for the property. First get from the userConf, if not
* present, get from systemConf.

View File

@ -0,0 +1,151 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service.monitor;
import java.util.Date;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.service.component.Component;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Monitors the health of containers of a specific component at a regular
* interval. It takes necessary actions when the health of a component drops
* below a desired threshold.
*/
public class ComponentHealthThresholdMonitor implements Runnable {
private static final Logger LOG = LoggerFactory
.getLogger(ComponentHealthThresholdMonitor.class);
private final Component component;
private final int healthThresholdPercent;
private final long healthThresholdWindowSecs;
private final long healthThresholdWindowNanos;
private long firstOccurrenceTimestamp = 0;
// Sufficient logging happens when component health is below threshold.
// However, there has to be some logging when it is above threshold, otherwise
// service owners have no idea how the health is fluctuating. So let's log
// whenever there is a change in component health, thereby preventing
// excessive logging on every poll.
private float prevReadyContainerFraction = 0;
public ComponentHealthThresholdMonitor(Component component,
int healthThresholdPercent, long healthThresholdWindowSecs) {
this.component = component;
this.healthThresholdPercent = healthThresholdPercent;
this.healthThresholdWindowSecs = healthThresholdWindowSecs;
this.healthThresholdWindowNanos = TimeUnit.NANOSECONDS
.convert(healthThresholdWindowSecs, TimeUnit.SECONDS);
}
@Override
public void run() {
LOG.debug("ComponentHealthThresholdMonitor run method");
// Perform container health checks against desired threshold
long desiredContainerCount = component.getNumDesiredInstances();
// If desired container count for this component is 0 then nothing to do
if (desiredContainerCount == 0) {
return;
}
long readyContainerCount = component.getNumReadyInstances();
float thresholdFraction = (float) healthThresholdPercent / 100;
// No possibility of div by 0 since desiredContainerCount won't be 0 here
float readyContainerFraction = (float) readyContainerCount
/ desiredContainerCount;
boolean healthChanged = false;
if (Math.abs(
readyContainerFraction - prevReadyContainerFraction) > .0000001) {
prevReadyContainerFraction = readyContainerFraction;
healthChanged = true;
}
String readyContainerPercentStr = String.format("%.2f",
readyContainerFraction * 100);
// Check if the current ready container percent is less than the
// threshold percent
if (readyContainerFraction < thresholdFraction) {
// Check if it is the first occurrence and if yes set the timestamp
long currentTimestamp = System.nanoTime();
if (firstOccurrenceTimestamp == 0) {
firstOccurrenceTimestamp = currentTimestamp;
Date date = new Date();
LOG.info(
"[COMPONENT {}] Health has gone below threshold. Starting health "
+ "threshold timer at ts = {} ({})",
component.getName(), date.getTime(), date);
}
long elapsedTime = currentTimestamp - firstOccurrenceTimestamp;
long elapsedTimeSecs = TimeUnit.SECONDS.convert(elapsedTime,
TimeUnit.NANOSECONDS);
LOG.warn(
"[COMPONENT {}] Current health {}% is below health threshold of "
+ "{}% for {} secs (threshold window = {} secs)",
component.getName(), readyContainerPercentStr,
healthThresholdPercent, elapsedTimeSecs, healthThresholdWindowSecs);
if (elapsedTime > healthThresholdWindowNanos) {
LOG.warn(
"[COMPONENT {}] Current health {}% has been below health "
+ "threshold of {}% for {} secs (threshold window = {} secs)",
component.getName(), readyContainerPercentStr,
healthThresholdPercent, elapsedTimeSecs, healthThresholdWindowSecs);
// Trigger service stop
String exitDiag = String.format(
"Service is being killed because container health for component "
+ "%s was %s%% (health threshold = %d%%) for %d secs "
+ "(threshold window = %d secs)",
component.getName(), readyContainerPercentStr,
healthThresholdPercent, elapsedTimeSecs, healthThresholdWindowSecs);
// Append to global diagnostics that will be reported to RM.
component.getScheduler().getDiagnostics().append(exitDiag);
LOG.warn(exitDiag);
// Sleep for 5 seconds in hope that the state can be recorded in ATS.
// In case there's a client polling the component state, it can be
// notified.
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
LOG.error("Interrupted on sleep while exiting.", e);
}
ExitUtil.terminate(-1);
}
} else {
String logMsg = "[COMPONENT {}] Health threshold = {}%, Current health "
+ "= {}% (Current Ready count = {}, Desired count = {})";
if (healthChanged) {
LOG.info(logMsg, component.getName(), healthThresholdPercent,
readyContainerPercentStr, readyContainerCount,
desiredContainerCount);
} else {
LOG.debug(logMsg, component.getName(), healthThresholdPercent,
readyContainerPercentStr, readyContainerCount,
desiredContainerCount);
}
// The container health might have recovered above threshold after being
// below for less than the threshold window amount of time. So we need
// to reset firstOccurrenceTimestamp to 0.
if (firstOccurrenceTimestamp != 0) {
Date date = new Date();
LOG.info(
"[COMPONENT {}] Health recovered above threshold at ts = {} ({})",
component.getName(), date.getTime(), date);
firstOccurrenceTimestamp = 0;
}
}
}
}

View File

@ -31,9 +31,10 @@
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.service.api.records.ComponentState;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.service.api.records.Component;
import org.apache.hadoop.yarn.service.api.records.ComponentState;
import org.apache.hadoop.yarn.service.api.records.Configuration;
import org.apache.hadoop.yarn.service.api.records.Container;
import org.apache.hadoop.yarn.service.api.records.ContainerState;
import org.apache.hadoop.yarn.service.api.records.PlacementConstraint;
@ -62,7 +63,7 @@
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.yarn.api.records.YarnApplicationState.FINISHED;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH;
import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*;
import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.EXIT_COMMAND_ARGUMENT_ERROR;
import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes.EXIT_NOT_FOUND;
@ -582,6 +583,109 @@ private static List<String> getSortedContainerIds(Service s) {
return containerIds;
}
// Test to verify component health threshold monitor. It uses anti-affinity
// placement policy to make it easier to simulate container failure by
// allocating more containers than the no of NMs.
// 1. Start mini cluster with 3 NMs and scheduler placement-constraint handler
// 2. Create an example service of 3 containers with anti-affinity placement
// policy and health threshold = 65%, window = 3 secs, init-delay = 0 secs,
// poll-frequency = 1 secs
// 3. Flex the component to 4 containers. This makes health = 75%, so based on
// threshold the service will continue to run beyond the window of 3 secs.
// 4. Flex the component to 5 containers. This makes health = 60%, so based on
// threshold the service will be stopped after the window of 3 secs.
@Test (timeout = 200000)
public void testComponentHealthThresholdMonitor() throws Exception {
// We need to enable scheduler placement-constraint at the cluster level to
// let apps use placement policies.
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
setConf(conf);
setupInternal(3);
ServiceClient client = createClient(getConf());
Service exampleApp = new Service();
exampleApp.setName("example-app");
exampleApp.setVersion("v1");
Component comp = createComponent("compa", 3L, "sleep 1000");
PlacementPolicy pp = new PlacementPolicy();
PlacementConstraint pc = new PlacementConstraint();
pc.setName("CA1");
pc.setTargetTags(Collections.singletonList("compa"));
pc.setScope(PlacementScope.NODE);
pc.setType(PlacementType.ANTI_AFFINITY);
pp.setConstraints(Collections.singletonList(pc));
comp.setPlacementPolicy(pp);
Configuration config = new Configuration();
config.setProperty(CONTAINER_HEALTH_THRESHOLD_PERCENT, "65");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_WINDOW_SEC, "3");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_INIT_DELAY_SEC, "0");
config.setProperty(CONTAINER_HEALTH_THRESHOLD_POLL_FREQUENCY_SEC, "1");
config.setProperty(DEFAULT_READINESS_CHECK_ENABLED, "false");
comp.setConfiguration(config);
exampleApp.addComponent(comp);
// Make sure AM does not come up after service is killed for this test
Configuration serviceConfig = new Configuration();
serviceConfig.setProperty(AM_RESTART_MAX, "1");
exampleApp.setConfiguration(serviceConfig);
client.actionCreate(exampleApp);
waitForServiceToBeStable(client, exampleApp);
// Check service is stable and all 3 containers are running
Service service = client.getStatus(exampleApp.getName());
Component component = service.getComponent("compa");
Assert.assertEquals("Service state should be STABLE", ServiceState.STABLE,
service.getState());
Assert.assertEquals("3 containers are expected to be running", 3,
component.getContainers().size());
// Flex compa up to 4 - will make health 75% (3 out of 4 running), but still
// above threshold of 65%, so service will continue to run.
Map<String, Long> compCounts = new HashMap<>();
compCounts.put("compa", 4L);
exampleApp.getComponent("compa").setNumberOfContainers(4L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// Wait for 6 secs (window 3 secs + 1 for next poll + 2 for buffer). Since
// the service will never go to stable state (because of anti-affinity the
// 4th container will never be allocated) it will timeout. However, after
// the timeout the service should continue to run since health is 75%
// which is above the threshold of 65%.
waitForServiceToBeStable(client, exampleApp, 6000);
Assert.fail("Service should not be in a stable state. It should throw "
+ "a timeout exception.");
} catch (Exception e) {
// Check that service state is STARTED and only 3 containers are running
service = client.getStatus(exampleApp.getName());
component = service.getComponent("compa");
Assert.assertEquals("Service state should be STARTED",
ServiceState.STARTED, service.getState());
Assert.assertEquals("Component state should be FLEXING",
ComponentState.FLEXING, component.getState());
Assert.assertEquals("3 containers are expected to be running", 3,
component.getContainers().size());
}
// Flex compa up to 5 - will make health 60% (3 out of 5 running), so
// service will stop since it is below threshold of 65%.
compCounts.put("compa", 5L);
exampleApp.getComponent("compa").setNumberOfContainers(5L);
client.flexByRestService(exampleApp.getName(), compCounts);
try {
// Wait for 14 secs (window 3 secs + 1 for next poll + 2 for buffer + 5
// secs of service wait before shutting down + 3 secs app cleanup so that
// API returns that service is in FAILED state). Note, because of
// anti-affinity the 4th and 5th container will never be allocated.
waitForServiceToBeInState(client, exampleApp, ServiceState.FAILED,
14000);
} catch (Exception e) {
Assert.fail("Should not have thrown exception");
}
LOG.info("Destroy service {}", exampleApp);
client.actionDestroy(exampleApp.getName());
}
// Check containers launched are in dependency order
// Get all containers into a list and sort based on container launch time e.g.
// compa-c1, compa-c2, compb-c1, compb-c2;

View File

@ -140,9 +140,13 @@ Component-level service AM configuration properties can be specified either in t
|yarn.service.container-failure.retry.max | Max number of retries for the container to be auto restarted if it fails (default -1, which means forever).|
|yarn.service.container-failure.retry-interval-ms | Retry interval in milliseconds for the container to be restarted (default 30000, i.e. 30 seconds).|
|yarn.service.container-failure.validity-interval-ms | Failure validity interval in milliseconds. When set to a value greater than 0, the container retry policy will not take the failures that happened outside of this interval into the failure count (default -1, which means that all the failures so far will be included in the failure count).|
|yarn.service.container-failure-per-component.threshold | Max number of container failures (not including retries) for a given component before the AM stops the service (default 10).|
|yarn.service.container-failure-per-component.threshold | Max absolute number of container failures (not including retries) for a given component before the AM stops the service (default 10).|
|yarn.service.node-blacklist.threshold | Maximum number of container failures on a node (not including retries) before the node is blacklisted by the AM (default 3).|
|yarn.service.default-readiness-check.enabled | Whether or not the default readiness check is enabled (default true).|
|yarn.service.container-health-threshold.percent | The container health threshold percent when explicitly set for a specific component or globally for all components, will schedule a health check monitor to periodically check for the percentage of healthy containers. A container is healthy if it is in READY state. It runs the check at a specified/default poll frequency. It allows a component to be below the health threshold for a specified/default window after which it considers the service to be unhealthy and triggers a service stop. When health threshold percent is enabled, yarn.service.container-failure-per-component.threshold is ignored.
|yarn.service.container-health-threshold.poll-frequency-secs | Health check monitor poll frequency. It is an advanced setting and does not need to be set unless the service owner understands the implication and does not want the default. The default is 10 secs.
|yarn.service.container-health-threshold.window-secs | The amount of time the health check monitor allows a specific component to be below the health threshold after which it considers the service to be unhealthy. The default is 600 secs (10 mins).
|yarn.service.container-health-threshold.init-delay-secs | The amount of initial time the health check monitor waits before the first check kicks in. It gives a lead time for the service containers to come up for the first time. The default is 600 secs (10 mins).
There is one component-level configuration property that is set differently in the `yarn-site.xml` file than it is in the service specification.
To select the docker network type that will be used for docker containers, `docker.network` may be set in the service `Configuration` `properties` or the component `Configuration` `properties`.

View File

@ -823,3 +823,38 @@ fulfilled and the service will be in non-STABLE state.
"quicklinks": {}
}
```
### Create a service with health threshold monitor enabled for a component
POST URL - http://localhost:8088/app/v1/services
##### POST Request JSON
```json
{
"name": "hello-world",
"version": "1.0.0",
"description": "hello world example with health threshold monitor",
"components" :
[
{
"name": "hello",
"number_of_containers": 100,
"artifact": {
"id": "nginx:latest",
"type": "DOCKER"
},
"launch_command": "./start_nginx.sh",
"resource": {
"cpus": 1,
"memory": "256"
},
"configuration": {
"properties": {
"yarn.service.container-health-threshold.percent": "90",
"yarn.service.container-health-threshold.window-secs": "400",
"yarn.service.container-health-threshold.init-delay-secs": "800"
}
}
}
]
}
```