HDDS-259. Implement ContainerReportPublisher and NodeReportPublisher. Contributed by Nanda kumar.

This commit is contained in:
Xiaoyu Yao 2018-07-20 09:07:58 -07:00
parent e9c44ecfc6
commit 68b57ad32c
16 changed files with 162 additions and 74 deletions

View File

@ -17,15 +17,35 @@
*/ */
package org.apache.hadoop.hdds; package org.apache.hadoop.hdds;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
/** /**
* Config class for HDDS. * This class contains constants for configuration keys and default values
* used in hdds.
*/ */
public final class HddsConfigKeys { public final class HddsConfigKeys {
/**
* Do not instantiate.
*/
private HddsConfigKeys() { private HddsConfigKeys() {
} }
public static final String HDDS_HEARTBEAT_INTERVAL =
"hdds.heartbeat.interval";
public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT =
"30s";
public static final String HDDS_NODE_REPORT_INTERVAL =
"hdds.node.report.interval";
public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String HDDS_CONTAINER_REPORT_INTERVAL =
"hdds.container.report.interval";
public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL = public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
"hdds.command.status.report.interval"; "hdds.command.status.report.interval";
public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT = public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT; "60s";
} }

View File

@ -156,11 +156,6 @@ public final class ScmConfigKeys {
"ozone.scm.handler.count.key"; "ozone.scm.handler.count.key";
public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10; public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10;
public static final String OZONE_SCM_HEARTBEAT_INTERVAL =
"ozone.scm.heartbeat.interval";
public static final String OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT =
"30s";
public static final String OZONE_SCM_DEADNODE_INTERVAL = public static final String OZONE_SCM_DEADNODE_INTERVAL =
"ozone.scm.dead.node.interval"; "ozone.scm.dead.node.interval";
public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT = public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =

View File

@ -200,11 +200,6 @@ public final class OzoneConfigKeys {
public static final int public static final int
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
public static final String OZONE_CONTAINER_REPORT_INTERVAL =
"ozone.container.report.interval";
public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
"60s";
public static final String DFS_CONTAINER_RATIS_ENABLED_KEY public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
= ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT

View File

@ -153,13 +153,29 @@
<description>The timeout duration for ratis server request.</description> <description>The timeout duration for ratis server request.</description>
</property> </property>
<property> <property>
<name>ozone.container.report.interval</name> <name>hdds.node.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send node report. Each
datanode periodically send node report to SCM. Unit could be
defined with postfix (ns,ms,s,m,h,d)</description>
</property>
<property>
<name>hdds.container.report.interval</name>
<value>60000ms</value> <value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag> <tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send container report. Each <description>Time interval of the datanode to send container report. Each
datanode periodically send container report upon receive datanode periodically send container report to SCM. Unit could be
sendContainerReport from SCM. Unit could be defined with defined with postfix (ns,ms,s,m,h,d)</description>
postfix (ns,ms,s,m,h,d)</description> </property>
<property>
<name>hdds.command.status.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
<description>Time interval of the datanode to send status of command
execution. Each datanode periodically the execution status of commands
received from SCM to SCM. Unit could be defined with postfix
(ns,ms,s,m,h,d)</description>
</property> </property>
<!--Ozone Settings--> <!--Ozone Settings-->
<property> <property>
@ -677,7 +693,7 @@
</description> </description>
</property> </property>
<property> <property>
<name>ozone.scm.heartbeat.interval</name> <name>hdds.heartbeat.interval</name>
<value>30s</value> <value>30s</value>
<tag>OZONE, MANAGEMENT</tag> <tag>OZONE, MANAGEMENT</tag>
<description> <description>

View File

@ -29,12 +29,14 @@
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL; .OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL_DEFAULT; .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT; .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -181,9 +183,8 @@ public static long getScmheartbeatCheckerInterval(Configuration conf) {
* @return - HB interval in seconds. * @return - HB interval in seconds.
*/ */
public static long getScmHeartbeatInterval(Configuration conf) { public static long getScmHeartbeatInterval(Configuration conf) {
return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT, HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
TimeUnit.SECONDS);
} }
/** /**
@ -225,7 +226,7 @@ public static long getStaleNodeInterval(Configuration conf) {
sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000); sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
} catch (IllegalArgumentException ex) { } catch (IllegalArgumentException ex) {
LOG.error("Stale Node Interval MS is cannot be honored due to " + LOG.error("Stale Node Interval MS is cannot be honored due to " +
"mis-configured {}. ex: {}", OZONE_SCM_HEARTBEAT_INTERVAL, ex); "mis-configured {}. ex: {}", HDDS_HEARTBEAT_INTERVAL, ex);
throw ex; throw ex;
} }
return staleNodeIntervalMs; return staleNodeIntervalMs;

View File

@ -19,12 +19,20 @@
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
import org.apache.hadoop.hdds.protocol.proto. import org.apache.hadoop.hdds.protocol.proto.
StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
import org.apache.hadoop.hdds.scm.HddsServerUtil;
import org.apache.hadoop.ozone.protocol.commands.CommandStatus; import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT;
/** /**
* Publishes CommandStatusReport which will be sent to SCM as part of * Publishes CommandStatusReport which will be sent to SCM as part of
* heartbeat. CommandStatusReport consist of the following information: * heartbeat. CommandStatusReport consist of the following information:
@ -42,9 +50,17 @@ public class CommandStatusReportPublisher extends
protected long getReportFrequency() { protected long getReportFrequency() {
if (cmdStatusReportInterval == -1) { if (cmdStatusReportInterval == -1) {
cmdStatusReportInterval = getConf().getTimeDuration( cmdStatusReportInterval = getConf().getTimeDuration(
HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, HDDS_COMMAND_STATUS_REPORT_INTERVAL,
HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT, HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < cmdStatusReportInterval,
HDDS_COMMAND_STATUS_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
} }
return cmdStatusReportInterval; return cmdStatusReportInterval;
} }

View File

@ -17,13 +17,20 @@
package org.apache.hadoop.ozone.container.common.report; package org.apache.hadoop.ozone.container.common.report;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.scm.HddsServerUtil;
import java.io.IOException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
/** /**
* Publishes ContainerReport which will be sent to SCM as part of heartbeat. * Publishes ContainerReport which will be sent to SCM as part of heartbeat.
@ -49,9 +56,17 @@ public class ContainerReportPublisher extends
protected long getReportFrequency() { protected long getReportFrequency() {
if (containerReportInterval == null) { if (containerReportInterval == null) {
containerReportInterval = getConf().getTimeDuration( containerReportInterval = getConf().getTimeDuration(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL, HDDS_CONTAINER_REPORT_INTERVAL,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < containerReportInterval,
HDDS_CONTAINER_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
} }
// Add a random delay (0~30s) on top of the container report // Add a random delay (0~30s) on top of the container report
// interval (60s) so tha the SCM is overwhelmed by the container reports // interval (60s) so tha the SCM is overwhelmed by the container reports
@ -64,7 +79,7 @@ private long getRandomReportDelay() {
} }
@Override @Override
protected ContainerReportsProto getReport() { protected ContainerReportsProto getReport() throws IOException {
return ContainerReportsProto.getDefaultInstance(); return getContext().getParent().getContainer().getContainerReport();
} }
} }

View File

@ -17,8 +17,18 @@
package org.apache.hadoop.ozone.container.common.report; package org.apache.hadoop.ozone.container.common.report;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.NodeReportProto; .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.scm.HddsServerUtil;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_NODE_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_NODE_REPORT_INTERVAL_DEFAULT;
/** /**
* Publishes NodeReport which will be sent to SCM as part of heartbeat. * Publishes NodeReport which will be sent to SCM as part of heartbeat.
@ -28,13 +38,29 @@
*/ */
public class NodeReportPublisher extends ReportPublisher<NodeReportProto> { public class NodeReportPublisher extends ReportPublisher<NodeReportProto> {
private Long nodeReportInterval;
@Override @Override
protected long getReportFrequency() { protected long getReportFrequency() {
return 90000L; if (nodeReportInterval == null) {
nodeReportInterval = getConf().getTimeDuration(
HDDS_NODE_REPORT_INTERVAL,
HDDS_NODE_REPORT_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS);
long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
getConf());
Preconditions.checkState(
heartbeatFrequency < nodeReportInterval,
HDDS_NODE_REPORT_INTERVAL +
" cannot be configured lower than heartbeat frequency.");
}
return nodeReportInterval;
} }
@Override @Override
protected NodeReportProto getReport() { protected NodeReportProto getReport() throws IOException {
return NodeReportProto.getDefaultInstance(); return getContext().getParent().getContainer().getNodeReport();
} }
} }

View File

@ -23,7 +23,10 @@
import org.apache.hadoop.ozone.container.common.statemachine import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine.DatanodeStates; .DatanodeStateMachine.DatanodeStates;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -34,6 +37,9 @@
public abstract class ReportPublisher<T extends GeneratedMessage> public abstract class ReportPublisher<T extends GeneratedMessage>
implements Configurable, Runnable { implements Configurable, Runnable {
private static final Logger LOG = LoggerFactory.getLogger(
ReportPublisher.class);
private Configuration config; private Configuration config;
private StateContext context; private StateContext context;
private ScheduledExecutorService executor; private ScheduledExecutorService executor;
@ -76,7 +82,11 @@ public void run() {
* Generates and publishes the report to datanode state context. * Generates and publishes the report to datanode state context.
*/ */
private void publishReport() { private void publishReport() {
try {
context.addReport(getReport()); context.addReport(getReport());
} catch (IOException e) {
LOG.error("Exception while publishing report.", e);
}
} }
/** /**
@ -91,7 +101,7 @@ private void publishReport() {
* *
* @return datanode report * @return datanode report
*/ */
protected abstract T getReport(); protected abstract T getReport() throws IOException;
/** /**
* Returns {@link StateContext}. * Returns {@link StateContext}.

View File

@ -180,14 +180,9 @@ public void testCommandStatusPublisher() throws InterruptedException {
@Test @Test
public void testAddingReportToHeartbeat() { public void testAddingReportToHeartbeat() {
Configuration conf = new OzoneConfiguration(); GeneratedMessage nodeReport = NodeReportProto.getDefaultInstance();
ReportPublisherFactory factory = new ReportPublisherFactory(conf); GeneratedMessage containerReport = ContainerReportsProto
ReportPublisher nodeReportPublisher = factory.getPublisherFor( .getDefaultInstance();
NodeReportProto.class);
ReportPublisher containerReportPubliser = factory.getPublisherFor(
ContainerReportsProto.class);
GeneratedMessage nodeReport = nodeReportPublisher.getReport();
GeneratedMessage containerReport = containerReportPubliser.getReport();
SCMHeartbeatRequestProto.Builder heartbeatBuilder = SCMHeartbeatRequestProto.Builder heartbeatBuilder =
SCMHeartbeatRequestProto.newBuilder(); SCMHeartbeatRequestProto.newBuilder();
heartbeatBuilder.setDatanodeDetails( heartbeatBuilder.setDatanodeDetails(

View File

@ -37,10 +37,10 @@
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.hdds.HddsConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL; .HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.hdds.HddsConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT; .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT;
/** /**
* A class that manages closing of containers. This allows transition from a * A class that manages closing of containers. This allows transition from a
@ -75,8 +75,8 @@ public ContainerCloser(NodeManager nodeManager, Configuration conf) {
this.threadRunCount = new AtomicInteger(0); this.threadRunCount = new AtomicInteger(0);
this.isRunning = new AtomicBoolean(false); this.isRunning = new AtomicBoolean(false);
this.reportInterval = this.configuration.getTimeDuration( this.reportInterval = this.configuration.getTimeDuration(
OZONE_CONTAINER_REPORT_INTERVAL, HDDS_CONTAINER_REPORT_INTERVAL,
OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS); HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
Preconditions.checkState(this.reportInterval > 0, Preconditions.checkState(this.reportInterval > 0,
"report interval has to be greater than 0"); "report interval has to be greater than 0");
} }

View File

@ -44,6 +44,8 @@
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT; .OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -52,8 +54,6 @@
.CREATE; .CREATE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
.CREATED; .CREATED;
import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CONTAINER_REPORT_INTERVAL;
/** /**
* Test class for Closing Container. * Test class for Closing Container.
@ -72,7 +72,7 @@ public static void setUp() throws Exception {
configuration = SCMTestUtils.getConf(); configuration = SCMTestUtils.getConf();
size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB, size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB,
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL,
1, TimeUnit.SECONDS); 1, TimeUnit.SECONDS);
testDir = GenericTestUtils testDir = GenericTestUtils
.getTestDir(TestContainerMapping.class.getSimpleName()); .getTestDir(TestContainerMapping.class.getSimpleName());
@ -137,7 +137,7 @@ public void testRepeatedClose() throws IOException,
// second report is discarded by the system if it lands in the 3 * report // second report is discarded by the system if it lands in the 3 * report
// frequency window. // frequency window.
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1, configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS); TimeUnit.SECONDS);
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer( ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(

View File

@ -58,10 +58,9 @@
import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL; .OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -359,7 +358,7 @@ public void testScmSanityOfUserConfig1() throws IOException,
final int interval = 100; final int interval = 100;
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@ -388,7 +387,7 @@ public void testScmSanityOfUserConfig2() throws IOException,
final int interval = 100; final int interval = 100;
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS);
// This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL
// and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL
@ -413,7 +412,7 @@ public void testScmDetectStaleAndDeadNode() throws IOException,
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -551,7 +550,7 @@ public void testScmClusterIsInExpectedState1() throws IOException,
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -729,7 +728,7 @@ public void testScmClusterIsInExpectedState2() throws IOException,
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
@ -820,7 +819,7 @@ public void testScmCanHandleScale() throws IOException,
OzoneConfiguration conf = getConf(); OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1,
SECONDS); SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000,
MILLISECONDS); MILLISECONDS);
@ -985,7 +984,7 @@ public void testScmNodeReportUpdate() throws IOException,
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval,
MILLISECONDS); MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);

View File

@ -59,6 +59,7 @@
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY; .HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
@ -392,11 +393,11 @@ private void configureSCM() {
private void configureSCMheartbeat() { private void configureSCMheartbeat() {
if (hbInterval.isPresent()) { if (hbInterval.isPresent()) {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
hbInterval.get(), TimeUnit.MILLISECONDS); hbInterval.get(), TimeUnit.MILLISECONDS);
} else { } else {
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL,
DEFAULT_HB_INTERVAL_MS, DEFAULT_HB_INTERVAL_MS,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);
} }

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.ozone; package org.apache.hadoop.ozone;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.RandomUtils;
@ -68,6 +67,9 @@
import org.mockito.Mockito; import org.mockito.Mockito;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.junit.Assert.fail;
/** /**
* Test class that exercises the StorageContainerManager. * Test class that exercises the StorageContainerManager.
*/ */
@ -186,9 +188,7 @@ private void verifyPermissionDeniedException(Exception e, String userName) {
public void testBlockDeletionTransactions() throws Exception { public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5; int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration(); OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL, conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 5, TimeUnit.SECONDS);
5,
TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
3000, 3000,
TimeUnit.MILLISECONDS); TimeUnit.MILLISECONDS);

View File

@ -31,14 +31,13 @@
import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.concurrent.TimeUnit.SECONDS;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_DEADNODE_INTERVAL; .OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@ -61,7 +60,7 @@ public void setUp() throws Exception {
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
interval, TimeUnit.MILLISECONDS); interval, TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);