HDDS-290. putKey is failing with KEY_ALLOCATION_ERROR. Contributed by Xiaoyu Yao.
This commit is contained in:
parent
7c368575a3
commit
e83719c830
@ -22,6 +22,7 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
||||
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||
OZONE-SITE.XML_ozone.replication=1
|
||||
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
|
||||
|
@ -0,0 +1,49 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
*** Settings ***
|
||||
Documentation Ozonefs Single Node Test
|
||||
Library OperatingSystem
|
||||
Suite Setup Startup Ozone cluster with size 1
|
||||
Suite Teardown Teardown Ozone cluster
|
||||
Resource ../commonlib.robot
|
||||
|
||||
*** Variables ***
|
||||
${COMPOSEFILE} ${CURDIR}/docker-compose.yaml
|
||||
${PROJECTDIR} ${CURDIR}/../../../../../..
|
||||
|
||||
|
||||
*** Test Cases ***
|
||||
Create volume and bucket
|
||||
Execute on datanode ozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root
|
||||
Execute on datanode ozone oz -createBucket http://ozoneManager/fstest/bucket1
|
||||
|
||||
Check volume from ozonefs
|
||||
${result} = Execute on hadooplast hdfs dfs -ls o3://bucket1.fstest/
|
||||
|
||||
Create directory from ozonefs
|
||||
Execute on hadooplast hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep
|
||||
${result} = Execute on ozoneManager ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
|
||||
Should contain ${result} testdir/deep
|
||||
Test key handling
|
||||
Execute on datanode ozone oz -putKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt -replicationFactor 1
|
||||
Execute on datanode rm -f NOTICE.txt.1
|
||||
Execute on datanode ozone oz -getKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt.1
|
||||
Execute on datanode ls -l NOTICE.txt.1
|
||||
${result} = Execute on datanode ozone oz -infoKey o3://ozoneManager/fstest/bucket1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
|
||||
Should contain ${result} createdOn
|
||||
${result} = Execute on datanode ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
|
||||
Should Be Equal ${result} key1
|
||||
Execute on datanode ozone oz -deleteKey o3://ozoneManager/fstest/bucket1/key1 -v
|
@ -92,6 +92,7 @@ public class Shell extends Configured implements Tool {
|
||||
public static final String DELETE_KEY = "deleteKey";
|
||||
public static final String LIST_KEY = "listKey";
|
||||
public static final String FILE = "file";
|
||||
public static final String REPLICATION_FACTOR = "replicationFactor";
|
||||
|
||||
// Listing related command line arguments
|
||||
public static final String LIST_LENGTH = "length";
|
||||
@ -292,6 +293,9 @@ private void addKeyCommands(Options opts) {
|
||||
new Option(FILE, true, "Data file path");
|
||||
opts.addOption(fileArgument);
|
||||
|
||||
Option repFactor =
|
||||
new Option(REPLICATION_FACTOR, true, "Replication factor (1 or 3)");
|
||||
opts.addOption(repFactor);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -44,7 +44,9 @@
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT;
|
||||
|
||||
/**
|
||||
* Puts a file into an ozone bucket.
|
||||
@ -103,11 +105,17 @@ protected void execute(CommandLine cmd)
|
||||
}
|
||||
|
||||
Configuration conf = new OzoneConfiguration();
|
||||
ReplicationFactor replicationFactor = ReplicationFactor.valueOf(
|
||||
conf.getInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()));
|
||||
ReplicationType replicationType = ReplicationType.valueOf(
|
||||
conf.get(OZONE_REPLICATION_TYPE, ReplicationType.RATIS.toString()));
|
||||
ReplicationFactor replicationFactor;
|
||||
if (cmd.hasOption(Shell.REPLICATION_FACTOR)) {
|
||||
replicationFactor = ReplicationFactor.valueOf(Integer.parseInt(cmd
|
||||
.getOptionValue(Shell.REPLICATION_FACTOR)));
|
||||
} else {
|
||||
replicationFactor = ReplicationFactor.valueOf(
|
||||
conf.getInt(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT));
|
||||
}
|
||||
|
||||
ReplicationType replicationType = ReplicationType.valueOf(
|
||||
conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT));
|
||||
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
|
||||
OzoneBucket bucket = vol.getBucket(bucketName);
|
||||
OzoneOutputStream outputStream = bucket
|
||||
|
Loading…
Reference in New Issue
Block a user