HDDS-94. Change ozone datanode command to start the standalone datanode plugin.

Contributed by  Sandeep Nemuri.
This commit is contained in:
Anu Engineer 2018-06-26 18:28:47 -07:00
parent 1e30547642
commit 18932717c4
11 changed files with 15 additions and 77 deletions

View File

@ -16,18 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../../ozone:/opt/hadoop
ports:
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -16,19 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ../../ozone:/opt/hadoop
- ./jmxpromo.jar:/opt/jmxpromo.jar
ports:
- 9870:9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

View File

@ -25,9 +25,11 @@
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
@ -229,9 +231,18 @@ public static HddsDatanodeService createHddsDatanodeService(
public static void main(String[] args) {
try {
if (DFSUtil.parseHelpArgument(args, "Starts HDDS Datanode", System.out, false)) {
System.exit(0);
}
Configuration conf = new OzoneConfiguration();
GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
if (!hParser.isParseSuccessful()) {
GenericOptionsParser.printGenericCommandUsage(System.err);
System.exit(1);
}
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
HddsDatanodeService hddsDatanodeService =
createHddsDatanodeService(new OzoneConfiguration());
createHddsDatanodeService(conf);
hddsDatanodeService.start(null);
hddsDatanodeService.join();
} catch (Throwable e) {

View File

@ -16,18 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ${OZONEDIR}:/opt/hadoop
ports:
- 9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
OZONE-SITE.XML_ozone.scm.names=scm
@ -24,13 +23,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender

View File

@ -28,7 +28,6 @@ Startup Ozone cluster with size
Daemons are running without error
Is daemon running without error ksm
Is daemon running without error scm
Is daemon running without error namenode
Is daemon running without error datanode
Check if datanode is connected to the scm

View File

@ -16,18 +16,6 @@
version: "3"
services:
namenode:
image: apache/hadoop-runner
hostname: namenode
volumes:
- ${OZONEDIR}:/opt/hadoop
ports:
- 9870
environment:
ENSURE_NAMENODE_DIR: /data/namenode
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
@ -25,13 +24,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender

View File

@ -34,7 +34,7 @@ function hadoop_usage
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
hadoop_add_subcommand "datanode" daemon "run a HDDS datanode"
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
hadoop_add_subcommand "freon" client "runs an ozone data generator"
hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
@ -45,7 +45,7 @@ function hadoop_usage
hadoop_add_subcommand "o3" client "command line interface for ozone"
hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
hadoop_add_subcommand "version" client "print the version"
hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path"
@ -68,10 +68,7 @@ function ozonecmd_case
;;
datanode)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService
;;
envvars)
echo "JAVA_HOME='${JAVA_HOME}'"