diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index e337d2fb49..283488367f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -215,9 +215,9 @@ public final class ScmConfigKeys { "ozone.scm.db.cache.size.mb"; public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128; - public static final String OZONE_SCM_CONTAINER_SIZE_GB = - "ozone.scm.container.size.gb"; - public static final int OZONE_SCM_CONTAINER_SIZE_DEFAULT = 5; + public static final String OZONE_SCM_CONTAINER_SIZE = + "ozone.scm.container.size"; + public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB"; public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY = "ozone.scm.container.placement.impl"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 568e38d040..37a845e0af 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -611,12 +611,11 @@ - ozone.scm.container.size.gb - 5 + ozone.scm.container.size + 5GB OZONE, PERFORMANCE, MANAGEMENT - Default container size used by Ozone. This value is specified - in GB. + Default container size used by Ozone. There are two considerations while picking this number. The speed at which a container can be replicated, determined by the network speed and the metadata that each container generates. So selecting a large number diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 47894dcde3..afd140781a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -111,7 +111,7 @@ protected ContainerData(ContainerType type, long containerId, int size) { * @param type - ContainerType * @param containerId - ContainerId * @param layOutVersion - Container layOutVersion - * @param size - Container maximum size + * @param size - Container maximum size in GB */ protected ContainerData(ContainerType type, long containerId, int layOutVersion, int size) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 1d37437187..e4cb5f35de 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -21,6 +21,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import java.util.Collections; + +import org.apache.hadoop.conf.StorageSize; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -82,7 +84,7 @@ public class KeyValueContainerData extends ContainerData { /** * Constructs KeyValueContainerData object. * @param id - ContainerId - * @param size - maximum size of the container + * @param size - maximum size in GB of the container */ public KeyValueContainerData(long id, int size) { super(ContainerProtos.ContainerType.KeyValueContainer, id, size); @@ -94,7 +96,7 @@ public KeyValueContainerData(long id, int size) { * Constructs KeyValueContainerData object. * @param id - ContainerId * @param layOutVersion - * @param size - maximum size of the container + * @param size - maximum size in GB of the container */ public KeyValueContainerData(long id, int layOutVersion, int size) { super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, @@ -266,9 +268,11 @@ public static List getYamlFields() { public static KeyValueContainerData getFromProtoBuf( ContainerProtos.ContainerData protoData) throws IOException { // TODO: Add containerMaxSize to ContainerProtos.ContainerData + StorageSize storageSize = StorageSize.parse( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); KeyValueContainerData data = new KeyValueContainerData( protoData.getContainerID(), - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); + (int)storageSize.getUnit().toBytes(storageSize.getValue())); for (int x = 0; x < protoData.getMetadataCount(); x++) { data.addMetadata(protoData.getMetadata(x).getKey(), protoData.getMetadata(x).getValue()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 29c359e15c..8409561742 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -22,6 +22,7 @@ import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos @@ -149,9 +150,9 @@ public KeyValueHandler(Configuration config, ContainerSet contSet, volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass( HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy .class, VolumeChoosingPolicy.class), conf); - maxContainerSizeGB = config.getInt(ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_GB, ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_DEFAULT); + maxContainerSizeGB = (int)config.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.GB); // this handler lock is used for synchronizing createContainer Requests, // so using a fair lock here. handlerLock = new AutoCloseableLock(new ReentrantLock(true)); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index f3a111fa57..ca2a6a0f74 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -18,6 +18,7 @@ import java.util.UUID; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -30,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -98,9 +98,10 @@ public BlockManagerImpl(final Configuration conf, this.nodeManager = nodeManager; this.containerManager = containerManager; - this.containerSize = OzoneConsts.GB * conf.getInt( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); + this.containerSize = (long)conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.BYTES); this.containerProvisionBatchSize = conf.getInt( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index 4076dad623..ba957642b0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -20,6 +20,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo; @@ -66,7 +67,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_GB; + .OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; @@ -129,9 +130,8 @@ public ContainerMapping( this.lock = new ReentrantLock(); - // To be replaced with code getStorageSize once it is committed. - size = conf.getLong(OZONE_SCM_CONTAINER_SIZE_GB, - OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; + size = (long)conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, + OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); this.containerStateManager = new ContainerStateManager(conf, this); LOG.trace("Container State Manager created."); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 5df7dc7e68..5eb81951f5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -35,7 +36,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.statemachine .InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; @@ -148,9 +148,10 @@ public ContainerStateManager(Configuration configuration, finalStates); initializeStateMachine(); - this.containerSize = OzoneConsts.GB * configuration.getInt( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); + this.containerSize =(long)configuration.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.BYTES); lastUsedMap = new ConcurrentHashMap<>(); containerCount = new AtomicLong(0); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java index 028d14bd6d..5343bce10e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java @@ -18,6 +18,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; @@ -38,7 +39,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.statemachine .InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; @@ -94,9 +94,10 @@ public PipelineSelector(NodeManager nodeManager, this.conf = conf; this.eventPublisher = eventPublisher; this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf); - this.containerSize = OzoneConsts.GB * this.conf.getInt( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); + this.containerSize = (long)this.conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.BYTES); node2PipelineMap = new Node2PipelineMap(); this.standaloneManager = new StandaloneManagerImpl(this.nodeManager, placementPolicy, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index 543cad3c97..4790c82906 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -19,6 +19,7 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -39,7 +40,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; @@ -58,9 +59,8 @@ public class TestCloseContainerEventHandler { @BeforeClass public static void setUp() throws Exception { configuration = SCMTestUtils.getConf(); - size = configuration - .getLong(OZONE_SCM_CONTAINER_SIZE_GB, OZONE_SCM_CONTAINER_SIZE_DEFAULT) - * 1024 * 1024 * 1024; + size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE, + OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); testDir = GenericTestUtils .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); configuration diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java index 0c0f25d0c1..608bb9242b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container.closer; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.ContainerMapping; @@ -50,7 +51,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_GB; + .OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent .CREATE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent @@ -71,8 +72,8 @@ public class TestContainerCloser { @BeforeClass public static void setUp() throws Exception { configuration = SCMTestUtils.getConf(); - size = configuration.getLong(OZONE_SCM_CONTAINER_SIZE_GB, - OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024; + size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE, + OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); configuration.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); testDir = GenericTestUtils diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java index 8d71d00551..f54322c523 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java @@ -23,6 +23,7 @@ import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.cli.container.ContainerCommandHandler; import org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler; @@ -49,7 +50,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_GB; + .OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR; import static org.apache.hadoop.hdds.scm.cli.ResultCode.SUCCESS; @@ -107,8 +108,9 @@ private static ScmClient getScmClient(OzoneConfiguration ozoneConf) StorageContainerLocationProtocolPB.class); InetSocketAddress scmAddress = getScmAddressForClients(ozoneConf); - int containerSizeGB = ozoneConf.getInt(OZONE_SCM_CONTAINER_SIZE_GB, - OZONE_SCM_CONTAINER_SIZE_DEFAULT); + int containerSizeGB = (int)ozoneConf.getStorageSize( + OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.GB); ContainerOperationClient.setContainerSizeB(containerSizeGB*OzoneConsts.GB); RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 3d39dbb6dd..84b7b764b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; @@ -52,7 +52,7 @@ public void test() throws IOException, TimeoutException, InterruptedException, //setup a cluster (1G free space is enough for a unit test) OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1"); + conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java index a5b101fa70..9e08212629 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerHandler.java @@ -23,14 +23,13 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; import org.apache.hadoop.test.GenericTestUtils; import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_GB; + .OZONE_SCM_CONTAINER_SIZE; import org.junit.Test; /** @@ -47,7 +46,7 @@ public void test() throws IOException, TimeoutException, InterruptedException, .captureLogs(ReplicateContainerCommandHandler.LOG); OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CONTAINER_SIZE_GB, "1"); + conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady();