diff --git a/hadoop-dist/src/main/compose/cblock/docker-config b/hadoop-dist/src/main/compose/cblock/docker-config
index 4690de0fdc..f69bef0771 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-config
+++ b/hadoop-dist/src/main/compose/cblock/docker-config
@@ -27,7 +27,8 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index 8e5efa961f..c693db0428 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index ef96f37939..72531a26f1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -230,6 +230,9 @@ public final class OzoneConfigKeys {
public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
"ozone.web.authentication.kerberos.principal";
+ public static final String HDDS_DATANODE_PLUGINS_KEY =
+ "hdds.datanode.plugins";
+
/**
* There is no need to instantiate this class.
*/
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 8018d294d1..78525a60a3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1027,5 +1027,13 @@
+
+ hdds.datanode.plugins
+ org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+
+ Comma-separated list of HDDS datanode plug-ins to be activated when
+ HDDS service starts as part of datanode.
+
+
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 7213e7e2e3..1c30f2e46d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -17,65 +17,71 @@
*/
package org.apache.hadoop.ozone;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine;
+import org.apache.hadoop.util.ServicePlugin;
+import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.net.InetAddress;
+import java.util.List;
import java.util.UUID;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
/**
* Datanode service plugin to start the HDDS container services.
*/
-public class HddsDatanodeService implements DataNodeServicePlugin {
+public class HddsDatanodeService implements ServicePlugin {
private static final Logger LOG = LoggerFactory.getLogger(
HddsDatanodeService.class);
- private final boolean isOzoneEnabled;
private Configuration conf;
private DatanodeDetails datanodeDetails;
private DatanodeStateMachine datanodeStateMachine;
-
- public HddsDatanodeService() {
- try {
- OzoneConfiguration.activate();
- this.conf = new OzoneConfiguration();
- this.isOzoneEnabled = HddsUtils.isHddsEnabled(conf);
- if (isOzoneEnabled) {
- this.datanodeDetails = getDatanodeDetails(conf);
- String hostname = DataNode.getHostName(conf);
- String ip = InetAddress.getByName(hostname).getHostAddress();
- this.datanodeDetails.setHostName(hostname);
- this.datanodeDetails.setIpAddress(ip);
- }
- } catch (IOException e) {
- throw new RuntimeException("Can't start the HDDS datanode plugin", e);
- }
- }
+ private List plugins;
@Override
public void start(Object service) {
- if (isOzoneEnabled) {
+ OzoneConfiguration.activate();
+ if (service instanceof Configurable) {
+ conf = new OzoneConfiguration(((Configurable) service).getConf());
+ } else {
+ conf = new OzoneConfiguration();
+ }
+ if (HddsUtils.isHddsEnabled(conf)) {
try {
- DataNode dataNode = (DataNode) service;
- datanodeDetails.setInfoPort(dataNode.getInfoPort());
- datanodeDetails.setInfoSecurePort(dataNode.getInfoSecurePort());
+ String hostname = DataNode.getHostName(conf);
+ String ip = InetAddress.getByName(hostname).getHostAddress();
+ datanodeDetails = initializeDatanodeDetails();
+ datanodeDetails.setHostName(hostname);
+ datanodeDetails.setIpAddress(ip);
+
+ //Below block should be removed as part of HDFS-13324
+ if (service != null) {
+ DataNode dataNode = (DataNode) service;
+ datanodeDetails.setInfoPort(dataNode.getInfoPort());
+ datanodeDetails.setInfoSecurePort(dataNode.getInfoSecurePort());
+ }
datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf);
+ startPlugins();
+ // Starting HDDS Daemons
datanodeStateMachine.startDaemon();
} catch (IOException e) {
throw new RuntimeException("Can't start the HDDS datanode plugin", e);
@@ -84,11 +90,11 @@ public void start(Object service) {
}
/**
- * Returns ContainerNodeIDProto or null in case of Error.
+ * Returns DatanodeDetails or null in case of Error.
*
- * @return ContainerNodeIDProto
+ * @return DatanodeDetails
*/
- private static DatanodeDetails getDatanodeDetails(Configuration conf)
+ private DatanodeDetails initializeDatanodeDetails()
throws IOException {
String idFilePath = HddsUtils.getDatanodeIdFilePath(conf);
if (idFilePath == null || idFilePath.isEmpty()) {
@@ -111,24 +117,62 @@ private static DatanodeDetails getDatanodeDetails(Configuration conf)
return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build();
}
}
+ private void startPlugins() {
+ try {
+ plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY,
+ ServicePlugin.class);
+ } catch (RuntimeException e) {
+ String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY);
+ LOG.error("Unable to load HDDS DataNode plugins. " +
+ "Specified list of plugins: {}",
+ pluginsValue, e);
+ throw e;
+ }
+ for (ServicePlugin plugin : plugins) {
+ try {
+ plugin.start(this);
+ LOG.info("Started plug-in {}", plugin);
+ } catch (Throwable t) {
+ LOG.warn("ServicePlugin {} could not be started", plugin, t);
+ }
+ }
+ }
+ public Configuration getConf() {
+ return conf;
+ }
/**
*
* Return DatanodeDetails if set, return null otherwise.
*
* @return DatanodeDetails
*/
+ @VisibleForTesting
public DatanodeDetails getDatanodeDetails() {
return datanodeDetails;
}
- @InterfaceAudience.Private
+ @VisibleForTesting
public DatanodeStateMachine getDatanodeStateMachine() {
return datanodeStateMachine;
}
+ public void join() throws InterruptedException {
+ datanodeStateMachine.join();
+ }
+
@Override
public void stop() {
+ if (plugins != null) {
+ for (ServicePlugin plugin : plugins) {
+ try {
+ plugin.stop();
+ LOG.info("Stopped plug-in {}", plugin);
+ } catch (Throwable t) {
+ LOG.warn("ServicePlugin {} could not be stopped", plugin, t);
+ }
+ }
+ }
if (datanodeStateMachine != null) {
datanodeStateMachine.stopDaemon();
}
@@ -137,4 +181,20 @@ public void stop() {
@Override
public void close() throws IOException {
}
+
+ public static HddsDatanodeService createHddsDatanodeService(String args[]) {
+ StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
+ return new HddsDatanodeService();
+ }
+
+ public static void main(String args[]) {
+ try {
+ HddsDatanodeService hddsDatanodeService = createHddsDatanodeService(args);
+ hddsDatanodeService.start(null);
+ hddsDatanodeService.join();
+ } catch (Throwable e) {
+ LOG.error("Exception in while starting HddsDatanodeService.", e);
+ terminate(1, e);
+ }
+ }
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 8e9482f565..ef1ba59534 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -296,6 +296,16 @@ public void startDaemon() {
stateMachineThread.start();
}
+ /**
+ * Waits for DatanodeStateMachine to exit.
+ *
+ * @throws InterruptedException
+ */
+ public void join() throws InterruptedException {
+ stateMachineThread.join();
+ cmdProcessThread.join();
+ }
+
/**
* Stop the daemon thread of the datanode state machine.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeServicePlugin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeServicePlugin.java
deleted file mode 100644
index 08aae8ba0b..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeServicePlugin.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode;
-
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.util.ServicePlugin;
-
-/**
- * Datanode specific service plugin with additional hooks.
- */
-public interface DataNodeServicePlugin extends ServicePlugin{
-
- /**
- * Extension point to modify the datanode id.
- *
- * @param dataNodeId
- */
- default void onDatanodeIdCreation(DatanodeID dataNodeId) {
- //NOOP
- }
-
- /**
- * Extension point to modify the datanode id.
- *
- * @param dataNodeId
- */
- default void onDatanodeSuccessfulNamenodeRegisration(
- DatanodeRegistration dataNodeId) {
- //NOOP
- }
-}
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/docker-config
index 8e5efa961f..c693db0428 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-config
@@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
index 9335c7c9b8..6a79cd988a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
@@ -77,6 +77,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
import static org.junit.Assert.assertFalse;
/**
@@ -501,8 +502,9 @@ public MiniOzoneClassicCluster build() throws IOException {
conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_PLUGINS_KEY,
- "org.apache.hadoop.ozone.web.ObjectStoreRestPlugin," +
"org.apache.hadoop.ozone.HddsDatanodeService");
+ conf.set(HDDS_DATANODE_PLUGINS_KEY,
+ "org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
// Configure KSM and SCM handlers
conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
index 2ab427c24b..fc6380ab52 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.container.common.statemachine
@@ -32,6 +33,10 @@ public class MiniOzoneTestHelper {
private MiniOzoneTestHelper() {
}
+ public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
+ return findHddsPlugin(dataNode).getDatanodeDetails();
+ }
+
public static OzoneContainer getOzoneContainer(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine()
.getContainer();
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
similarity index 56%
rename from hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java
rename to hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
index 2128b806cf..2283ba6523 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
@@ -18,17 +18,11 @@
package org.apache.hadoop.ozone.web;
import java.io.IOException;
-import java.nio.channels.ServerSocketChannel;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.util.ServicePlugin;
@@ -38,57 +32,39 @@
/**
* DataNode service plugin implementation to start ObjectStore rest server.
*/
-public class ObjectStoreRestPlugin implements DataNodeServicePlugin {
+public class OzoneHddsDatanodeService implements ServicePlugin {
private static final Logger LOG =
- LoggerFactory.getLogger(ObjectStoreRestPlugin.class);
-
- private final boolean isOzoneEnabled;
+ LoggerFactory.getLogger(OzoneHddsDatanodeService.class);
private Configuration conf;
private ObjectStoreHandler handler;
private ObjectStoreRestHttpServer objectStoreRestHttpServer;
- public ObjectStoreRestPlugin() {
- OzoneConfiguration.activate();
- this.conf = new OzoneConfiguration();
- this.isOzoneEnabled = OzoneUtils.isOzoneEnabled(conf);
- }
-
@Override
public void start(Object service) {
- DataNode dataNode = (DataNode) service;
- if (isOzoneEnabled) {
+ if (service instanceof HddsDatanodeService) {
try {
- handler = new ObjectStoreHandler(dataNode.getConf());
- ServerSocketChannel httpServerChannel =
- dataNode.getSecureResources() != null ?
- dataNode.getSecureResources().getHttpServerChannel() :
- null;
-
- objectStoreRestHttpServer =
- new ObjectStoreRestHttpServer(dataNode.getConf(), httpServerChannel,
- handler);
-
+ HddsDatanodeService hddsDatanodeService = (HddsDatanodeService) service;
+ conf = hddsDatanodeService.getConf();
+ handler = new ObjectStoreHandler(conf);
+ objectStoreRestHttpServer = new ObjectStoreRestHttpServer(
+ conf, null, handler);
objectStoreRestHttpServer.start();
- getDatanodeDetails(dataNode).setOzoneRestPort(
+ hddsDatanodeService.getDatanodeDetails().setOzoneRestPort(
objectStoreRestHttpServer.getHttpAddress().getPort());
+
} catch (IOException e) {
throw new RuntimeException("Can't start the Object Store Rest server",
e);
}
+ } else {
+ LOG.error("Not starting {}, as the plugin is not invoked through {}",
+ OzoneHddsDatanodeService.class.getSimpleName(),
+ HddsDatanodeService.class.getSimpleName());
}
}
- public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
- for (ServicePlugin plugin : dataNode.getPlugins()) {
- if (plugin instanceof HddsDatanodeService) {
- return ((HddsDatanodeService) plugin).getDatanodeDetails();
- }
- }
- throw new RuntimeException("Not able to find HddsDatanodeService in the" +
- " list of plugins loaded by DataNode.");
- }
@Override
public void stop() {
@@ -100,7 +76,7 @@ public void stop() {
}
@Override
- public void close() throws IOException {
+ public void close() {
IOUtils.closeQuietly(objectStoreRestHttpServer);
IOUtils.closeQuietly(handler);
}