HDDS-764. Run S3 smoke tests with replication STANDARD. (#462)

HDDS-764. Run S3 smoke tests with replication STANDARD. Contributed by Elek, Marton.
This commit is contained in:
Elek, Márton 2019-01-23 20:37:49 +01:00 committed by Bharat Viswanadham
parent 221e308cb5
commit 0b91329ed6
7 changed files with 62 additions and 29 deletions

View File

@ -36,13 +36,13 @@ ${BUCKET} generated
*** Test Cases ***
Test Multipart Upload
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey --storage-class REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
Should contain ${result} UploadId
# initiate again
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey --storage-class REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey
${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
@ -67,7 +67,7 @@ Test Multipart Upload
Test Multipart Upload Complete
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey1 --storage-class REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey1
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
@ -101,7 +101,7 @@ Test Multipart Upload Complete
Test Multipart Upload Complete Entity too small
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey2 --storage-class REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey2
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey
@ -124,7 +124,7 @@ Test Multipart Upload Complete Entity too small
Test Multipart Upload Complete Invalid part
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey3 --storage-class REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey3
${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0
Should contain ${result} ${BUCKET}
Should contain ${result} multipartKey

View File

@ -29,9 +29,9 @@ ${BUCKET} generated
File upload and directory list
Execute date > /tmp/testfile
${result} = Execute AWSS3Cli cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}
Should contain ${result} upload
${result} = Execute AWSS3Cli cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/dir1/dir2/file
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file
Should contain ${result} upload
${result} = Execute AWSS3Cli ls s3://${BUCKET}
Should contain ${result} testfile

View File

@ -39,22 +39,22 @@ Create Dest Bucket
Copy Object Happy Scenario
Run Keyword if '${DESTBUCKET}' == 'generated1' Create Dest Bucket
Execute date > /tmp/copyfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix copyobject/
Should contain ${result} f1
${result} = Execute AWSS3ApiCli copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix copyobject/
Should contain ${result} f1
#copying again will not throw error
${result} = Execute AWSS3ApiCli copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix copyobject/
Should contain ${result} f1
Copy Object Where Bucket is not available
${result} = Execute AWSS3APICli and checkrc copy-object --storage-class REDUCED_REDUNDANCY --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1 255
${result} = Execute AWSS3APICli and checkrc copy-object --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1 255
Should contain ${result} NoSuchBucket
${result} = Execute AWSS3APICli and checkrc copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1 255
${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1 255
Should contain ${result} NoSuchBucket
Copy Object Where both source and dest are same with change to storageclass
@ -62,5 +62,5 @@ Copy Object Where both source and dest are same with change to storageclass
Should contain ${result} ETag
Copy Object Where Key not available
${result} = Execute AWSS3APICli and checkrc copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey 255
${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey 255
Should contain ${result} NoSuchKey

View File

@ -28,7 +28,7 @@ ${BUCKET} generated
*** Test Cases ***
Delete file with s3api
Execute date > /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix deletetestapi/
Should contain ${result} f1
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapi/f1
@ -46,7 +46,7 @@ Delete file with s3api, file doesn't exist
Delete dir with s3api
Execute date > /tmp/testfile
${result} = Execute AWSS3Cli cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapidir/
Should contain ${result} f1
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapidir/
@ -57,7 +57,7 @@ Delete dir with s3api
Delete file with s3api, file doesn't exist, prefix of a real file
Execute date > /tmp/testfile
${result} = Execute AWSS3Cli cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapiprefix/
Should contain ${result} filefile
${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapiprefix/file

View File

@ -29,20 +29,20 @@ ${BUCKET} generated
Delete file with multi delete
Execute date > /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/
Should contain ${result} multidelete/f1
Should contain ${result} multidelete/f2
Should contain ${result} multidelete/f3
Should contain ${result} REDUCED_REDUNDANCY
Should not contain ${result} STANDARD
Should contain ${result} STANDARD
Should not contain ${result} REDUCED_REDUNDANCY
${result} = Execute AWSS3APICli delete-objects --bucket ${BUCKET} --delete 'Objects=[{Key=multidelete/f1},{Key=multidelete/f2},{Key=multidelete/f4}]'
Should not contain ${result} Error
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/
Should not contain ${result} multidelete/f1
Should not contain ${result} multidelete/f2
Should contain ${result} multidelete/f3
Should contain ${result} REDUCED_REDUNDANCY
Should not contain ${result} STANDARD
Should contain ${result} STANDARD
Should not contain ${result} REDUCED_REDUNDANCY

View File

@ -30,12 +30,12 @@ ${BUCKET} generated
Put object to s3
Execute echo "Randomtext" > /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix putobject/
Should contain ${result} f1
Execute touch -f /tmp/zerobyte
${result} = Execute AWSS3ApiCli put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key putobject/zerobyte --body /tmp/zerobyte
${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key putobject/zerobyte --body /tmp/zerobyte
${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix putobject/
Should contain ${result} zerobyte

View File

@ -14,7 +14,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
RESULT_DIR=result
@ -24,6 +23,41 @@ mkdir -p "$DIR/$RESULT_DIR"
#Should be writeable from the docker containers where user is different.
chmod ogu+w "$DIR/$RESULT_DIR"
## @description wait until 3 datanodes are up (or 30 seconds)
## @param the docker-compose file
wait_for_datanodes(){
#Reset the timer
SECONDS=0
#Don't give it up until 30 seconds
while [[ $SECONDS -lt 30 ]]; do
#This line checks the number of HEALTHY datanodes registered in scm over the
# jmx HTTP servlet
datanodes=$(docker-compose -f "$1" exec scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value')
if [[ "$datanodes" == "3" ]]; then
#It's up and running. Let's return from the function.
echo "$datanodes datanodes are up and registered to the scm"
return
else
#Print it only if a number. Could be not a number if scm is not yet started
if [[ "$datanodes" ]]; then
echo "$datanodes datanode is up and healhty (until now)"
fi
fi
sleep 2
done
echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files"
}
## @description Execute selected test suites in a specified docker-compose engironment
## @param the name of the docker-compose env relative to ../compose
## @param the name of the tests (array of subdir names of the dir of this script)
execute_tests(){
COMPOSE_DIR=$1
COMPOSE_FILE=$DIR/../compose/$COMPOSE_DIR/docker-compose.yaml
@ -37,9 +71,8 @@ execute_tests(){
echo " Command to rerun: ./test.sh --keep --env $COMPOSE_DIR $TESTS"
echo "-------------------------------------------------"
docker-compose -f "$COMPOSE_FILE" down
docker-compose -f "$COMPOSE_FILE" up -d
echo "Waiting 30s for cluster start up..."
sleep 30
docker-compose -f "$COMPOSE_FILE" up -d --scale datanode=3
wait_for_datanodes "$COMPOSE_FILE"
for TEST in "${TESTS[@]}"; do
TITLE="Ozone $TEST tests with $COMPOSE_DIR cluster"
set +e