HDDS-317. Use new StorageSize API for reading ozone.scm.container.size.gb. Contributed by Junjie Chen.

This commit is contained in:
Márton Elek 2018-08-24 13:54:38 +02:00
parent 96c4575d73
commit 55b6931059
14 changed files with 54 additions and 45 deletions

View File

@ -215,9 +215,9 @@ public final class ScmConfigKeys {
"ozone.scm.db.cache.size.mb";
public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128;
public static final String OZONE_SCM_CONTAINER_SIZE_GB =
"ozone.scm.container.size.gb";
public static final int OZONE_SCM_CONTAINER_SIZE_DEFAULT = 5;
public static final String OZONE_SCM_CONTAINER_SIZE =
"ozone.scm.container.size";
public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB";
public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
"ozone.scm.container.placement.impl";

View File

@ -611,12 +611,11 @@
</description>
</property>
<property>
<name>ozone.scm.container.size.gb</name>
<value>5</value>
<name>ozone.scm.container.size</name>
<value>5GB</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
<description>
Default container size used by Ozone. This value is specified
in GB.
Default container size used by Ozone.
There are two considerations while picking this number. The speed at which
a container can be replicated, determined by the network speed and the
metadata that each container generates. So selecting a large number

View File

@ -111,7 +111,7 @@ protected ContainerData(ContainerType type, long containerId, int size) {
* @param type - ContainerType
* @param containerId - ContainerId
* @param layOutVersion - Container layOutVersion
* @param size - Container maximum size
* @param size - Container maximum size in GB
*/
protected ContainerData(ContainerType type, long containerId,
int layOutVersion, int size) {

View File

@ -21,6 +21,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import java.util.Collections;
import org.apache.hadoop.conf.StorageSize;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@ -82,7 +84,7 @@ public class KeyValueContainerData extends ContainerData {
/**
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param size - maximum size of the container
* @param size - maximum size in GB of the container
*/
public KeyValueContainerData(long id, int size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, size);
@ -94,7 +96,7 @@ public KeyValueContainerData(long id, int size) {
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param layOutVersion
* @param size - maximum size of the container
* @param size - maximum size in GB of the container
*/
public KeyValueContainerData(long id, int layOutVersion, int size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
@ -266,9 +268,11 @@ public static List<String> getYamlFields() {
public static KeyValueContainerData getFromProtoBuf(
ContainerProtos.ContainerData protoData) throws IOException {
// TODO: Add containerMaxSize to ContainerProtos.ContainerData
StorageSize storageSize = StorageSize.parse(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
KeyValueContainerData data = new KeyValueContainerData(
protoData.getContainerID(),
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
(int)storageSize.getUnit().toBytes(storageSize.getValue()));
for (int x = 0; x < protoData.getMetadataCount(); x++) {
data.addMetadata(protoData.getMetadata(x).getKey(),
protoData.getMetadata(x).getValue());

View File

@ -22,6 +22,7 @@
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -149,9 +150,9 @@ public KeyValueHandler(Configuration config, ContainerSet contSet,
volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
.class, VolumeChoosingPolicy.class), conf);
maxContainerSizeGB = config.getInt(ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB, ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
maxContainerSizeGB = (int)config.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.GB);
// this handler lock is used for synchronizing createContainer Requests,
// so using a fair lock here.
handlerLock = new AutoCloseableLock(new ReentrantLock(true));

View File

@ -18,6 +18,7 @@
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
@ -30,7 +31,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
@ -98,9 +98,10 @@ public BlockManagerImpl(final Configuration conf,
this.nodeManager = nodeManager;
this.containerManager = containerManager;
this.containerSize = OzoneConsts.GB * conf.getInt(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
this.containerSize = (long)conf.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.BYTES);
this.containerProvisionBatchSize =
conf.getInt(

View File

@ -20,6 +20,7 @@
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo;
@ -66,7 +67,7 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB;
.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
.FAILED_TO_CHANGE_CONTAINER_STATE;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
@ -129,9 +130,8 @@ public ContainerMapping(
this.lock = new ReentrantLock();
// To be replaced with code getStorageSize once it is committed.
size = conf.getLong(OZONE_SCM_CONTAINER_SIZE_GB,
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
size = (long)conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
this.containerStateManager =
new ContainerStateManager(conf, this);
LOG.trace("Container State Manager created.");

View File

@ -20,6 +20,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@ -35,7 +36,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.statemachine
.InvalidStateTransitionException;
import org.apache.hadoop.ozone.common.statemachine.StateMachine;
@ -148,9 +148,10 @@ public ContainerStateManager(Configuration configuration,
finalStates);
initializeStateMachine();
this.containerSize = OzoneConsts.GB * configuration.getInt(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
this.containerSize =(long)configuration.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.BYTES);
lastUsedMap = new ConcurrentHashMap<>();
containerCount = new AtomicLong(0);

View File

@ -18,6 +18,7 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
@ -38,7 +39,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.statemachine
.InvalidStateTransitionException;
import org.apache.hadoop.ozone.common.statemachine.StateMachine;
@ -94,9 +94,10 @@ public PipelineSelector(NodeManager nodeManager,
this.conf = conf;
this.eventPublisher = eventPublisher;
this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf);
this.containerSize = OzoneConsts.GB * this.conf.getInt(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
this.containerSize = (long)this.conf.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.BYTES);
node2PipelineMap = new Node2PipelineMap();
this.standaloneManager =
new StandaloneManagerImpl(this.nodeManager, placementPolicy,

View File

@ -19,6 +19,7 @@
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@ -39,7 +40,7 @@
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
@ -58,9 +59,8 @@ public class TestCloseContainerEventHandler {
@BeforeClass
public static void setUp() throws Exception {
configuration = SCMTestUtils.getConf();
size = configuration
.getLong(OZONE_SCM_CONTAINER_SIZE_GB, OZONE_SCM_CONTAINER_SIZE_DEFAULT)
* 1024 * 1024 * 1024;
size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
testDir = GenericTestUtils
.getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
configuration

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.container.closer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
@ -50,7 +51,7 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB;
.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
.CREATE;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent
@ -71,8 +72,8 @@ public class TestContainerCloser {
@BeforeClass
public static void setUp() throws Exception {
configuration = SCMTestUtils.getConf();
size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB,
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL,
1, TimeUnit.SECONDS);
testDir = GenericTestUtils

View File

@ -23,6 +23,7 @@
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.cli.container.ContainerCommandHandler;
import org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler;
@ -49,7 +50,7 @@
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB;
.OZONE_SCM_CONTAINER_SIZE;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
import static org.apache.hadoop.hdds.scm.cli.ResultCode.SUCCESS;
@ -107,8 +108,9 @@ private static ScmClient getScmClient(OzoneConfiguration ozoneConf)
StorageContainerLocationProtocolPB.class);
InetSocketAddress scmAddress =
getScmAddressForClients(ozoneConf);
int containerSizeGB = ozoneConf.getInt(OZONE_SCM_CONTAINER_SIZE_GB,
OZONE_SCM_CONTAINER_SIZE_DEFAULT);
int containerSizeGB = (int)ozoneConf.getStorageSize(
OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT,
StorageUnit.GB);
ContainerOperationClient.setContainerSizeB(containerSizeGB*OzoneConsts.GB);
RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class,

View File

@ -33,7 +33,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
@ -52,7 +52,7 @@ public void test() throws IOException, TimeoutException, InterruptedException,
//setup a cluster (1G free space is enough for a unit test)
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1");
conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();

View File

@ -23,14 +23,13 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.rest.OzoneException;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB;
.OZONE_SCM_CONTAINER_SIZE;
import org.junit.Test;
/**
@ -47,7 +46,7 @@ public void test() throws IOException, TimeoutException, InterruptedException,
.captureLogs(ReplicateContainerCommandHandler.LOG);
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1");
conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
MiniOzoneCluster cluster =
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();