HDFS-13395. Ozone: Plugins support in HDSL Datanode Service. Contributed by Nanda Kumar.

This commit is contained in:
Xiaoyu Yao 2018-04-10 11:28:52 -07:00 committed by Owen O'Malley
parent 0ec88ea42b
commit 25f2398bbd
11 changed files with 139 additions and 120 deletions

View File

@ -27,7 +27,8 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true

View File

@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -230,6 +230,9 @@ public final class OzoneConfigKeys {
public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL = public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
"ozone.web.authentication.kerberos.principal"; "ozone.web.authentication.kerberos.principal";
public static final String HDDS_DATANODE_PLUGINS_KEY =
"hdds.datanode.plugins";
/** /**
* There is no need to instantiate this class. * There is no need to instantiate this class.
*/ */

View File

@ -1027,5 +1027,13 @@
</description> </description>
</property> </property>
<property>
<name>hdds.datanode.plugins</name>
<value>org.apache.hadoop.ozone.web.OzoneHddsDatanodeService</value>
<description>
Comma-separated list of HDDS datanode plug-ins to be activated when
HDDS service starts as part of datanode.
</description>
</property>
</configuration> </configuration>

View File

@ -17,65 +17,71 @@
*/ */
package org.apache.hadoop.ozone; package org.apache.hadoop.ozone;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin;
import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.statemachine import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine; .DatanodeStateMachine;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import java.util.List;
import java.util.UUID; import java.util.UUID;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
/** /**
* Datanode service plugin to start the HDDS container services. * Datanode service plugin to start the HDDS container services.
*/ */
public class HddsDatanodeService implements DataNodeServicePlugin { public class HddsDatanodeService implements ServicePlugin {
private static final Logger LOG = LoggerFactory.getLogger( private static final Logger LOG = LoggerFactory.getLogger(
HddsDatanodeService.class); HddsDatanodeService.class);
private final boolean isOzoneEnabled;
private Configuration conf; private Configuration conf;
private DatanodeDetails datanodeDetails; private DatanodeDetails datanodeDetails;
private DatanodeStateMachine datanodeStateMachine; private DatanodeStateMachine datanodeStateMachine;
private List<ServicePlugin> plugins;
public HddsDatanodeService() {
try {
OzoneConfiguration.activate();
this.conf = new OzoneConfiguration();
this.isOzoneEnabled = HddsUtils.isHddsEnabled(conf);
if (isOzoneEnabled) {
this.datanodeDetails = getDatanodeDetails(conf);
String hostname = DataNode.getHostName(conf);
String ip = InetAddress.getByName(hostname).getHostAddress();
this.datanodeDetails.setHostName(hostname);
this.datanodeDetails.setIpAddress(ip);
}
} catch (IOException e) {
throw new RuntimeException("Can't start the HDDS datanode plugin", e);
}
}
@Override @Override
public void start(Object service) { public void start(Object service) {
if (isOzoneEnabled) { OzoneConfiguration.activate();
if (service instanceof Configurable) {
conf = new OzoneConfiguration(((Configurable) service).getConf());
} else {
conf = new OzoneConfiguration();
}
if (HddsUtils.isHddsEnabled(conf)) {
try { try {
DataNode dataNode = (DataNode) service; String hostname = DataNode.getHostName(conf);
datanodeDetails.setInfoPort(dataNode.getInfoPort()); String ip = InetAddress.getByName(hostname).getHostAddress();
datanodeDetails.setInfoSecurePort(dataNode.getInfoSecurePort()); datanodeDetails = initializeDatanodeDetails();
datanodeDetails.setHostName(hostname);
datanodeDetails.setIpAddress(ip);
//Below block should be removed as part of HDFS-13324
if (service != null) {
DataNode dataNode = (DataNode) service;
datanodeDetails.setInfoPort(dataNode.getInfoPort());
datanodeDetails.setInfoSecurePort(dataNode.getInfoSecurePort());
}
datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf); datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf);
startPlugins();
// Starting HDDS Daemons
datanodeStateMachine.startDaemon(); datanodeStateMachine.startDaemon();
} catch (IOException e) { } catch (IOException e) {
throw new RuntimeException("Can't start the HDDS datanode plugin", e); throw new RuntimeException("Can't start the HDDS datanode plugin", e);
@ -84,11 +90,11 @@ public void start(Object service) {
} }
/** /**
* Returns ContainerNodeIDProto or null in case of Error. * Returns DatanodeDetails or null in case of Error.
* *
* @return ContainerNodeIDProto * @return DatanodeDetails
*/ */
private static DatanodeDetails getDatanodeDetails(Configuration conf) private DatanodeDetails initializeDatanodeDetails()
throws IOException { throws IOException {
String idFilePath = HddsUtils.getDatanodeIdFilePath(conf); String idFilePath = HddsUtils.getDatanodeIdFilePath(conf);
if (idFilePath == null || idFilePath.isEmpty()) { if (idFilePath == null || idFilePath.isEmpty()) {
@ -111,24 +117,62 @@ private static DatanodeDetails getDatanodeDetails(Configuration conf)
return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build(); return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build();
} }
} }
private void startPlugins() {
try {
plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY,
ServicePlugin.class);
} catch (RuntimeException e) {
String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY);
LOG.error("Unable to load HDDS DataNode plugins. " +
"Specified list of plugins: {}",
pluginsValue, e);
throw e;
}
for (ServicePlugin plugin : plugins) {
try {
plugin.start(this);
LOG.info("Started plug-in {}", plugin);
} catch (Throwable t) {
LOG.warn("ServicePlugin {} could not be started", plugin, t);
}
}
}
public Configuration getConf() {
return conf;
}
/** /**
* *
* Return DatanodeDetails if set, return null otherwise. * Return DatanodeDetails if set, return null otherwise.
* *
* @return DatanodeDetails * @return DatanodeDetails
*/ */
@VisibleForTesting
public DatanodeDetails getDatanodeDetails() { public DatanodeDetails getDatanodeDetails() {
return datanodeDetails; return datanodeDetails;
} }
@InterfaceAudience.Private @VisibleForTesting
public DatanodeStateMachine getDatanodeStateMachine() { public DatanodeStateMachine getDatanodeStateMachine() {
return datanodeStateMachine; return datanodeStateMachine;
} }
public void join() throws InterruptedException {
datanodeStateMachine.join();
}
@Override @Override
public void stop() { public void stop() {
if (plugins != null) {
for (ServicePlugin plugin : plugins) {
try {
plugin.stop();
LOG.info("Stopped plug-in {}", plugin);
} catch (Throwable t) {
LOG.warn("ServicePlugin {} could not be stopped", plugin, t);
}
}
}
if (datanodeStateMachine != null) { if (datanodeStateMachine != null) {
datanodeStateMachine.stopDaemon(); datanodeStateMachine.stopDaemon();
} }
@ -137,4 +181,20 @@ public void stop() {
@Override @Override
public void close() throws IOException { public void close() throws IOException {
} }
public static HddsDatanodeService createHddsDatanodeService(String args[]) {
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
return new HddsDatanodeService();
}
public static void main(String args[]) {
try {
HddsDatanodeService hddsDatanodeService = createHddsDatanodeService(args);
hddsDatanodeService.start(null);
hddsDatanodeService.join();
} catch (Throwable e) {
LOG.error("Exception in while starting HddsDatanodeService.", e);
terminate(1, e);
}
}
} }

View File

@ -296,6 +296,16 @@ public void startDaemon() {
stateMachineThread.start(); stateMachineThread.start();
} }
/**
* Waits for DatanodeStateMachine to exit.
*
* @throws InterruptedException
*/
public void join() throws InterruptedException {
stateMachineThread.join();
cmdProcessThread.join();
}
/** /**
* Stop the daemon thread of the datanode state machine. * Stop the daemon thread of the datanode state machine.
*/ */

View File

@ -1,48 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.util.ServicePlugin;
/**
* Datanode specific service plugin with additional hooks.
*/
public interface DataNodeServicePlugin extends ServicePlugin{
/**
* Extension point to modify the datanode id.
*
* @param dataNodeId
*/
default void onDatanodeIdCreation(DatanodeID dataNodeId) {
//NOOP
}
/**
* Extension point to modify the datanode id.
*
* @param dataNodeId
*/
default void onDatanodeSuccessfulNamenodeRegisration(
DatanodeRegistration dataNodeId) {
//NOOP
}
}

View File

@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -77,6 +77,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY; .HEALTHY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
/** /**
@ -501,8 +502,9 @@ public MiniOzoneClassicCluster build() throws IOException {
conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0"); conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0");
conf.set(DFS_DATANODE_PLUGINS_KEY, conf.set(DFS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.web.ObjectStoreRestPlugin," +
"org.apache.hadoop.ozone.HddsDatanodeService"); "org.apache.hadoop.ozone.HddsDatanodeService");
conf.set(HDDS_DATANODE_PLUGINS_KEY,
"org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
// Configure KSM and SCM handlers // Configure KSM and SCM handlers
conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers); conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers);

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.ozone; package org.apache.hadoop.ozone;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.container.common.statemachine import org.apache.hadoop.ozone.container.common.statemachine
@ -32,6 +33,10 @@ public class MiniOzoneTestHelper {
private MiniOzoneTestHelper() { private MiniOzoneTestHelper() {
} }
public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeDetails();
}
public static OzoneContainer getOzoneContainer(DataNode dataNode) { public static OzoneContainer getOzoneContainer(DataNode dataNode) {
return findHddsPlugin(dataNode).getDatanodeStateMachine() return findHddsPlugin(dataNode).getDatanodeStateMachine()
.getContainer(); .getContainer();

View File

@ -18,17 +18,11 @@
package org.apache.hadoop.ozone.web; package org.apache.hadoop.ozone.web;
import java.io.IOException; import java.io.IOException;
import java.nio.channels.ServerSocketChannel;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer; import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer;
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.ServicePlugin;
@ -38,57 +32,39 @@
/** /**
* DataNode service plugin implementation to start ObjectStore rest server. * DataNode service plugin implementation to start ObjectStore rest server.
*/ */
public class ObjectStoreRestPlugin implements DataNodeServicePlugin { public class OzoneHddsDatanodeService implements ServicePlugin {
private static final Logger LOG = private static final Logger LOG =
LoggerFactory.getLogger(ObjectStoreRestPlugin.class); LoggerFactory.getLogger(OzoneHddsDatanodeService.class);
private final boolean isOzoneEnabled;
private Configuration conf; private Configuration conf;
private ObjectStoreHandler handler; private ObjectStoreHandler handler;
private ObjectStoreRestHttpServer objectStoreRestHttpServer; private ObjectStoreRestHttpServer objectStoreRestHttpServer;
public ObjectStoreRestPlugin() {
OzoneConfiguration.activate();
this.conf = new OzoneConfiguration();
this.isOzoneEnabled = OzoneUtils.isOzoneEnabled(conf);
}
@Override @Override
public void start(Object service) { public void start(Object service) {
DataNode dataNode = (DataNode) service; if (service instanceof HddsDatanodeService) {
if (isOzoneEnabled) {
try { try {
handler = new ObjectStoreHandler(dataNode.getConf()); HddsDatanodeService hddsDatanodeService = (HddsDatanodeService) service;
ServerSocketChannel httpServerChannel = conf = hddsDatanodeService.getConf();
dataNode.getSecureResources() != null ? handler = new ObjectStoreHandler(conf);
dataNode.getSecureResources().getHttpServerChannel() : objectStoreRestHttpServer = new ObjectStoreRestHttpServer(
null; conf, null, handler);
objectStoreRestHttpServer =
new ObjectStoreRestHttpServer(dataNode.getConf(), httpServerChannel,
handler);
objectStoreRestHttpServer.start(); objectStoreRestHttpServer.start();
getDatanodeDetails(dataNode).setOzoneRestPort( hddsDatanodeService.getDatanodeDetails().setOzoneRestPort(
objectStoreRestHttpServer.getHttpAddress().getPort()); objectStoreRestHttpServer.getHttpAddress().getPort());
} catch (IOException e) { } catch (IOException e) {
throw new RuntimeException("Can't start the Object Store Rest server", throw new RuntimeException("Can't start the Object Store Rest server",
e); e);
} }
} else {
LOG.error("Not starting {}, as the plugin is not invoked through {}",
OzoneHddsDatanodeService.class.getSimpleName(),
HddsDatanodeService.class.getSimpleName());
} }
} }
public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
for (ServicePlugin plugin : dataNode.getPlugins()) {
if (plugin instanceof HddsDatanodeService) {
return ((HddsDatanodeService) plugin).getDatanodeDetails();
}
}
throw new RuntimeException("Not able to find HddsDatanodeService in the" +
" list of plugins loaded by DataNode.");
}
@Override @Override
public void stop() { public void stop() {
@ -100,7 +76,7 @@ public void stop() {
} }
@Override @Override
public void close() throws IOException { public void close() {
IOUtils.closeQuietly(objectStoreRestHttpServer); IOUtils.closeQuietly(objectStoreRestHttpServer);
IOUtils.closeQuietly(handler); IOUtils.closeQuietly(handler);
} }