HDDS-146. Refactor the structure of the acceptance tests.
Contributed by Elek, Marton.
This commit is contained in:
parent
5d7449d2b8
commit
020dd61988
@ -15,4 +15,4 @@
|
||||
# limitations under the License.
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
$DIR/robot.sh $DIR/../../src/test/robotframework/acceptance
|
||||
$DIR/robot.sh $DIR/../../src/test/acceptance
|
||||
|
63
hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
Executable file
63
hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -x
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
#Dir od the definition of the dind based test exeucution container
|
||||
DOCKERDIR="$DIR/../docker"
|
||||
|
||||
#Dir to save the results
|
||||
TARGETDIR="$DIR/../../target/dnd"
|
||||
|
||||
#Dir to mount the distribution from
|
||||
OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone"
|
||||
|
||||
#Name and imagename of the temporary, dind based test containers
|
||||
DOCKER_IMAGE_NAME=ozoneacceptance
|
||||
DOCKER_INSTANCE_NAME="${DOCKER_INSTANCE_NAME:-ozoneacceptance}"
|
||||
|
||||
teardown() {
|
||||
docker stop "$DOCKER_INSTANCE_NAME"
|
||||
}
|
||||
|
||||
trap teardown EXIT
|
||||
|
||||
#Make sure it will work even if the ozone is built by an other user. We
|
||||
# eneable to run the distribution by an other user
|
||||
mkdir -p "$TARGETDIR"
|
||||
mkdir -p "$OZONEDIST/logs"
|
||||
chmod o+w "$OZONEDIST/logs" || true
|
||||
chmod -R o+w "$OZONEDIST/etc/hadoop" || true
|
||||
chmod o+w "$OZONEDIST" || true
|
||||
|
||||
rm "$TARGETDIR/docker-compose.log"
|
||||
docker rm "$DOCKER_INSTANCE_NAME" || true
|
||||
docker build -t "$DOCKER_IMAGE_NAME" $DIR/../docker
|
||||
|
||||
#Starting the dind based environment
|
||||
docker run --rm -v $DIR/../../../..:/opt/hadoop --privileged -d --name "$DOCKER_INSTANCE_NAME" $DOCKER_IMAGE_NAME
|
||||
sleep 5
|
||||
|
||||
#Starting the tests
|
||||
docker exec "$DOCKER_INSTANCE_NAME" /opt/hadoop/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
|
||||
RESULT=$?
|
||||
|
||||
docker cp "$DOCKER_INSTANCE_NAME:/root/log.html" "$TARGETDIR/"
|
||||
docker cp "$DOCKER_INSTANCE_NAME:/root/junit-results.xml" "$TARGETDIR/"
|
||||
docker cp "$DOCKER_INSTANCE_NAME:/root/docker-compose.log" "$TARGETDIR/"
|
||||
exit $RESULT
|
@ -14,10 +14,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -x
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
#basedir is the directory of the whole hadoop project. Used to calculate the
|
||||
#exact path to the hadoop-dist project
|
||||
BASEDIR=${DIR}/../../../..
|
||||
|
||||
if [ ! "$(which robot)" ] ; then
|
||||
echo ""
|
||||
@ -29,10 +28,10 @@ if [ ! "$(which robot)" ] ; then
|
||||
exit -1
|
||||
fi
|
||||
|
||||
OZONEDISTDIR="$BASEDIR/hadoop-dist/target/ozone"
|
||||
OZONEDISTDIR="$DIR/../../../../hadoop-dist/target/ozone"
|
||||
if [ ! -d "$OZONEDISTDIR" ]; then
|
||||
echo "Ozone can't be found in the $OZONEDISTDIR."
|
||||
echo "You may need a full build with -Phdds and -Pdist profiles"
|
||||
exit -1
|
||||
fi
|
||||
robot -v basedir:$BASEDIR $@
|
||||
robot -x junit-results.xml "$@"
|
||||
|
21
hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile
Normal file
21
hadoop-ozone/acceptance-test/dev-support/docker/Dockerfile
Normal file
@ -0,0 +1,21 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker:18-dind
|
||||
RUN apk add --update python3 bash curl jq sudo
|
||||
RUN pip3 install robotframework docker-compose
|
||||
WORKDIR /root
|
||||
USER root
|
@ -0,0 +1,23 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3"
|
||||
services:
|
||||
robotenv:
|
||||
build: .
|
||||
privileged: true
|
||||
volumes:
|
||||
- ../../../..:/opt/hadoop
|
@ -43,6 +43,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<testCasesDirectory>src/test/acceptance</testCasesDirectory>
|
||||
<variables>
|
||||
<variable>basedir:${project.basedir}/../..</variable>
|
||||
</variables>
|
||||
|
@ -14,4 +14,4 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OZONEDIR=../../../hadoop-dist/target/ozone
|
||||
OZONEDIR=../../../../../../hadoop-dist/target/ozone
|
@ -0,0 +1,50 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*** Settings ***
|
||||
Documentation Smoketest ozone cluster startup
|
||||
Library OperatingSystem
|
||||
Suite Setup Startup Ozone cluster with size 5
|
||||
Suite Teardown Teardown Ozone cluster
|
||||
Resource ../commonlib.robot
|
||||
|
||||
*** Variables ***
|
||||
${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
|
||||
${COMPOSEFILE} ${CURDIR}/docker-compose.yaml
|
||||
${PROJECTDIR} ${CURDIR}/../../../../../..
|
||||
|
||||
|
||||
*** Test Cases ***
|
||||
|
||||
Test rest interface
|
||||
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
||||
Should contain ${result} 201 Created
|
||||
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
|
||||
Should contain ${result} 201 Created
|
||||
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
|
||||
Should contain ${result} 200 OK
|
||||
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
||||
Should contain ${result} 200 OK
|
||||
|
||||
Check webui static resources
|
||||
${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
|
||||
Should contain ${result} 200
|
||||
${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
|
||||
Should contain ${result} 200
|
||||
|
||||
Start freon testing
|
||||
${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
|
||||
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
|
||||
Should Not Contain ${result} ERROR
|
@ -25,12 +25,14 @@ OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
|
||||
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
|
||||
HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
|
||||
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
|
||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||
HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
|
||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||
LOG4J.PROPERTIES_log4j.category.org.apache.hadoop.util.NativeCodeLoader=ERROR
|
@ -0,0 +1,85 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*** Settings ***
|
||||
Documentation Test ozone shell CLI usage
|
||||
Library OperatingSystem
|
||||
Suite Setup Startup Ozone cluster with size 5
|
||||
Suite Teardown Teardown Ozone cluster
|
||||
Resource ../commonlib.robot
|
||||
Test Timeout 2 minute
|
||||
|
||||
*** Variables ***
|
||||
${basedir}
|
||||
${COMPOSEFILE} ${CURDIR}/docker-compose.yaml
|
||||
${PROJECTDIR} ${CURDIR}/../../../../../..
|
||||
|
||||
*** Test Cases ***
|
||||
RestClient without http port
|
||||
Test ozone shell http:// ksm restwoport True
|
||||
|
||||
RestClient with http port
|
||||
Test ozone shell http:// ksm:9874 restwport True
|
||||
|
||||
RestClient without host name
|
||||
Test ozone shell http:// ${EMPTY} restwohost True
|
||||
|
||||
RpcClient with port
|
||||
Test ozone shell o3:// ksm:9862 rpcwoport False
|
||||
|
||||
RpcClient without host
|
||||
Test ozone shell o3:// ${EMPTY} rpcwport False
|
||||
|
||||
RpcClient without scheme
|
||||
Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme False
|
||||
|
||||
|
||||
*** Keywords ***
|
||||
Test ozone shell
|
||||
[arguments] ${protocol} ${server} ${volume} ${withkeytest}
|
||||
${result} = Execute on datanode ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root
|
||||
Should not contain ${result} Failed
|
||||
Should contain ${result} Creating Volume: ${volume}
|
||||
${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket ${protocol}${server}/${volume}/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} ${volume}
|
||||
Run Keyword and Return If ${withkeytest} Test key handling ${protocol} ${server} ${volume}
|
||||
Execute on datanode ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1
|
||||
Execute on datanode ozone oz -deleteVolume ${protocol}${server}/${volume} -user bilbo
|
||||
|
||||
Test key handling
|
||||
[arguments] ${protocol} ${server} ${volume}
|
||||
Execute on datanode ozone oz -putKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt
|
||||
Execute on datanode rm -f NOTICE.txt.1
|
||||
Execute on datanode ozone oz -getKey ${protocol}${server}/${volume}/bb1/key1 -file NOTICE.txt.1
|
||||
Execute on datanode ls -l NOTICE.txt.1
|
||||
${result} = Execute on datanode ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
|
||||
Should contain ${result} createdOn
|
||||
${result} = Execute on datanode ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
|
||||
Should Be Equal ${result} key1
|
||||
Execute on datanode ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v
|
@ -13,16 +13,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*** Settings ***
|
||||
Documentation Smoke test to start cluster with docker-compose environments.
|
||||
Library OperatingSystem
|
||||
Suite Setup Startup Ozone Cluster
|
||||
Suite Teardown Teardown Ozone Cluster
|
||||
*** Keywords ***
|
||||
|
||||
*** Variables ***
|
||||
${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
|
||||
${basedir}
|
||||
*** Test Cases ***
|
||||
Startup Ozone cluster with size
|
||||
[arguments] ${datanodeno}
|
||||
${rc} ${output} = Run docker compose down
|
||||
Run echo "Starting new docker-compose environment" >> docker-compose.log
|
||||
${rc} ${output} = Run docker compose up -d
|
||||
Should Be Equal As Integers ${rc} 0
|
||||
Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening
|
||||
Daemons are running without error
|
||||
Scale datanodes up 5
|
||||
|
||||
Daemons are running without error
|
||||
Is daemon running without error ksm
|
||||
@ -37,37 +38,14 @@ Scale it up to 5 datanodes
|
||||
Scale datanodes up 5
|
||||
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5
|
||||
|
||||
Test rest interface
|
||||
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
||||
Should contain ${result} 201 Created
|
||||
${result} = Execute on datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
|
||||
Should contain ${result} 201 Created
|
||||
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
|
||||
Should contain ${result} 200 OK
|
||||
${result} = Execute on datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
|
||||
Should contain ${result} 200 OK
|
||||
Scale datanodes up
|
||||
[arguments] ${datanodeno}
|
||||
Run docker compose scale datanode=${datanodeno}
|
||||
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes ${datanodeno}
|
||||
|
||||
Check webui static resources
|
||||
${result} = Execute on scm curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
|
||||
Should contain ${result} 200
|
||||
${result} = Execute on ksm curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
|
||||
Should contain ${result} 200
|
||||
|
||||
Start freon testing
|
||||
${result} = Execute on ksm ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
|
||||
Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
|
||||
Should Not Contain ${result} ERROR
|
||||
|
||||
*** Keywords ***
|
||||
|
||||
Startup Ozone Cluster
|
||||
${rc} ${output} = Run docker compose down
|
||||
${rc} ${output} = Run docker compose up -d
|
||||
Should Be Equal As Integers ${rc} 0
|
||||
Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening
|
||||
|
||||
Teardown Ozone Cluster
|
||||
Teardown Ozone cluster
|
||||
Run docker compose down
|
||||
Run docker compose logs >> docker-compose.log
|
||||
|
||||
Is daemon running without error
|
||||
[arguments] ${name}
|
||||
@ -86,19 +64,16 @@ Have healthy datanodes
|
||||
${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
|
||||
Should Be Equal ${result} ${requirednodes}
|
||||
|
||||
Scale datanodes up
|
||||
[arguments] ${requirednodes}
|
||||
Run docker compose scale datanode=${requirednodes}
|
||||
|
||||
Execute on
|
||||
[arguments] ${componentname} ${command}
|
||||
${rc} ${return} = Run docker compose exec ${componentname} ${command}
|
||||
${rc} ${return} = Run docker compose exec -T ${componentname} ${command}
|
||||
[return] ${return}
|
||||
|
||||
Run docker compose
|
||||
[arguments] ${command}
|
||||
Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone
|
||||
${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command}
|
||||
Set Environment Variable COMPOSE_INTERACTIVE_NO_CLI 1
|
||||
Set Environment Variable OZONEDIR ${PROJECTDIR}/hadoop-dist/target/ozone
|
||||
${rc} ${output} = Run And Return Rc And Output docker-compose -f ${COMPOSEFILE} ${command}
|
||||
Log ${output}
|
||||
Should Be Equal As Integers ${rc} 0
|
||||
[return] ${rc} ${output}
|
@ -0,0 +1,17 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OZONEDIR=../../../../../../hadoop-dist/target/ozone
|
@ -1,256 +0,0 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*** Settings ***
|
||||
Documentation Smoke test to start cluster with docker-compose environments.
|
||||
Library OperatingSystem
|
||||
Suite Setup Startup Ozone Cluster
|
||||
Suite Teardown Teardown Ozone Cluster
|
||||
|
||||
*** Variables ***
|
||||
${basedir}
|
||||
*** Test Cases ***
|
||||
|
||||
Daemons are running without error
|
||||
Is daemon running without error ksm
|
||||
Is daemon running without error scm
|
||||
Is daemon running without error namenode
|
||||
Is daemon running without error datanode
|
||||
|
||||
Check if datanode is connected to the scm
|
||||
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1
|
||||
|
||||
Scale it up to 5 datanodes
|
||||
Scale datanodes up 5
|
||||
Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 5
|
||||
|
||||
Test ozone shell (RestClient without http port)
|
||||
Execute on datanode ozone oz -createVolume http://ksm/hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume http://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume http://ksm/hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume http://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket http://ksm/hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket http://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket http://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -putKey http://ksm/hive/bb1/key1 -file NOTICE.txt
|
||||
Execute on datanode rm -f NOTICE.txt.1
|
||||
Execute on datanode ozone oz -getKey http://ksm/hive/bb1/key1 -file NOTICE.txt.1
|
||||
Execute on datanode ls -l NOTICE.txt.1
|
||||
${result} = Execute on datanode ozone oz -infoKey http://ksm/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
|
||||
Should contain ${result} createdOn
|
||||
${result} = Execute on datanode ozone oz -listKey http://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
|
||||
Should Be Equal ${result} key1
|
||||
Execute on datanode ozone oz -deleteKey http://ksm/hive/bb1/key1 -v
|
||||
Execute on datanode ozone oz -deleteBucket http://ksm/hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume http://ksm/hive -user bilbo
|
||||
|
||||
Test ozone shell (RestClient with http port)
|
||||
Execute on datanode ozone oz -createVolume http://ksm:9874/hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume http://ksm:9874 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume http://ksm:9874/hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume http://ksm:9874/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket http://ksm:9874/hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket http://ksm:9874/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket http://ksm:9874/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -putKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt
|
||||
Execute on datanode rm -f NOTICE.txt.1
|
||||
Execute on datanode ozone oz -getKey http://ksm:9874/hive/bb1/key1 -file NOTICE.txt.1
|
||||
Execute on datanode ls -l NOTICE.txt.1
|
||||
${result} = Execute on datanode ozone oz -infoKey http://ksm:9874/hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
|
||||
Should contain ${result} createdOn
|
||||
${result} = Execute on datanode ozone oz -listKey http://ksm:9874/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
|
||||
Should Be Equal ${result} key1
|
||||
Execute on datanode ozone oz -deleteKey http://ksm:9874/hive/bb1/key1 -v
|
||||
Execute on datanode ozone oz -deleteBucket http://ksm:9874/hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume http://ksm:9874/hive -user bilbo
|
||||
|
||||
Test ozone shell (RestClient without hostname)
|
||||
Execute on datanode ozone oz -createVolume http:///hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume http:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume http:///hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume http:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket http:///hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket http:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket http:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -putKey http:///hive/bb1/key1 -file NOTICE.txt
|
||||
Execute on datanode rm -f NOTICE.txt.1
|
||||
Execute on datanode ozone oz -getKey http:///hive/bb1/key1 -file NOTICE.txt.1
|
||||
Execute on datanode ls -l NOTICE.txt.1
|
||||
${result} = Execute on datanode ozone oz -infoKey http:///hive/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
|
||||
Should contain ${result} createdOn
|
||||
${result} = Execute on datanode ozone oz -listKey http:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
|
||||
Should Be Equal ${result} key1
|
||||
Execute on datanode ozone oz -deleteKey http:///hive/bb1/key1 -v
|
||||
Execute on datanode ozone oz -deleteBucket http:///hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume http:///hive -user bilbo
|
||||
|
||||
Test ozone shell (RpcClient without http port)
|
||||
Execute on datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket o3://ksm/hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket o3://ksm/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3://ksm/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket o3://ksm/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -deleteBucket o3://ksm/hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume o3://ksm/hive -user bilbo
|
||||
|
||||
Test ozone shell (RpcClient with http port)
|
||||
Execute on datanode ozone oz -createVolume o3://ksm:9862/hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume o3://ksm:9862 -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume o3://ksm:9862/hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3://ksm:9862/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket o3://ksm:9862/hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket o3://ksm:9862/hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3://ksm:9862/hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket o3://ksm:9862/hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -deleteBucket o3://ksm:9862/hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume o3://ksm:9862/hive -user bilbo
|
||||
|
||||
Test ozone shell (RpcClient without hostname)
|
||||
Execute on datanode ozone oz -createVolume o3:///hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume o3:/// -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume o3:///hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume o3:///hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket o3:///hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket o3:///hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket o3:///hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket o3:///hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -deleteBucket o3:///hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume o3:///hive -user bilbo
|
||||
|
||||
Test ozone shell (no scheme - RpcClient used by default)
|
||||
Execute on datanode ozone oz -createVolume /hive -user bilbo -quota 100TB -root
|
||||
${result} = Execute on datanode ozone oz -listVolume / -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
|
||||
Should contain ${result} createdOn
|
||||
Execute on datanode ozone oz -updateVolume /hive -user bill -quota 10TB
|
||||
${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
|
||||
Should Be Equal ${result} bill
|
||||
${result} = Execute on datanode ozone oz -infoVolume /hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .quota | .size'
|
||||
Should Be Equal ${result} 10
|
||||
Execute on datanode ozone oz -createBucket /hive/bb1
|
||||
${result} = Execute on datanode ozone oz -infoBucket /hive/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
|
||||
Should Be Equal ${result} DISK
|
||||
${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
|
||||
Should Be Equal ${result} GROUP
|
||||
${result} = Execute on datanode ozone oz -updateBucket /hive/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
|
||||
Should Be Equal ${result} USER
|
||||
${result} = Execute on datanode ozone oz -listBucket /hive/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
|
||||
Should Be Equal ${result} hive
|
||||
Execute on datanode ozone oz -deleteBucket /hive/bb1
|
||||
Execute on datanode ozone oz -deleteVolume /hive -user bilbo
|
||||
|
||||
*** Keywords ***
|
||||
|
||||
Startup Ozone Cluster
|
||||
${rc} ${output} = Run docker compose down
|
||||
${rc} ${output} = Run docker compose up -d
|
||||
Should Be Equal As Integers ${rc} 0
|
||||
Wait Until Keyword Succeeds 1min 5sec Is Daemon started ksm HTTP server of KSM is listening
|
||||
|
||||
Teardown Ozone Cluster
|
||||
Run docker compose down
|
||||
|
||||
Is daemon running without error
|
||||
[arguments] ${name}
|
||||
${result} = Run docker ps
|
||||
Should contain ${result} _${name}_1
|
||||
${rc} ${result} = Run docker compose logs ${name}
|
||||
Should not contain ${result} ERROR
|
||||
|
||||
Is Daemon started
|
||||
[arguments] ${name} ${expression}
|
||||
${rc} ${result} = Run docker compose logs
|
||||
Should contain ${result} ${expression}
|
||||
|
||||
Have healthy datanodes
|
||||
[arguments] ${requirednodes}
|
||||
${result} = Execute on scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
|
||||
Should Be Equal ${result} ${requirednodes}
|
||||
|
||||
Scale datanodes up
|
||||
[arguments] ${requirednodes}
|
||||
Run docker compose scale datanode=${requirednodes}
|
||||
|
||||
Execute on
|
||||
[arguments] ${componentname} ${command}
|
||||
${rc} ${return} = Run docker compose exec ${componentname} ${command}
|
||||
[return] ${return}
|
||||
|
||||
Run docker compose
|
||||
[arguments] ${command}
|
||||
Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone
|
||||
${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml ${command}
|
||||
Log ${output}
|
||||
Should Be Equal As Integers ${rc} 0
|
||||
[return] ${rc} ${output}
|
@ -66,13 +66,17 @@ ENV HOME /home/${USER_NAME}
|
||||
|
||||
UserSpecificDocker
|
||||
|
||||
#If this env varible is empty, docker will be started
|
||||
# in non interactive mode
|
||||
DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"}
|
||||
|
||||
# By mapping the .m2 directory you can do an mvn install from
|
||||
# within the container and use the result on your normal
|
||||
# system. And this also is a significant speedup in subsequent
|
||||
# builds because the dependencies are downloaded only once.
|
||||
docker run --rm=true -t -i \
|
||||
docker run --rm=true $DOCKER_INTERACTIVE_RUN \
|
||||
-v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \
|
||||
-w "/home/${USER_NAME}/hadoop" \
|
||||
-v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
|
||||
-u "${USER_NAME}" \
|
||||
"hadoop-build-${USER_ID}"
|
||||
"hadoop-build-${USER_ID}" "$@"
|
||||
|
Loading…
Reference in New Issue
Block a user