HDDS-374. Support to configure container size in units lesser than GB. Contributed by Nanda kumar.

This commit is contained in:
Nanda kumar 2018-08-27 18:29:32 +05:30
parent 91836f0f81
commit 12b2f362cc
24 changed files with 79 additions and 66 deletions

View File

@ -185,7 +185,7 @@ private OzoneConsts() {
public static final String CONTAINER_TYPE = "containerType"; public static final String CONTAINER_TYPE = "containerType";
public static final String STATE = "state"; public static final String STATE = "state";
public static final String METADATA = "metadata"; public static final String METADATA = "metadata";
public static final String MAX_SIZE_GB = "maxSizeGB"; public static final String MAX_SIZE = "maxSize";
public static final String METADATA_PATH = "metadataPath"; public static final String METADATA_PATH = "metadataPath";
public static final String CHUNKS_PATH = "chunksPath"; public static final String CHUNKS_PATH = "chunksPath";
public static final String CONTAINER_DB_TYPE = "containerDBType"; public static final String CONTAINER_DB_TYPE = "containerDBType";

View File

@ -40,7 +40,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB; import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA; import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
import static org.apache.hadoop.ozone.OzoneConsts.STATE; import static org.apache.hadoop.ozone.OzoneConsts.STATE;
@ -67,7 +67,7 @@ public abstract class ContainerData {
// State of the Container // State of the Container
private ContainerLifeCycleState state; private ContainerLifeCycleState state;
private final int maxSizeGB; private final long maxSize;
/** parameters for read/write statistics on the container. **/ /** parameters for read/write statistics on the container. **/
private final AtomicLong readBytes; private final AtomicLong readBytes;
@ -92,16 +92,16 @@ public abstract class ContainerData {
LAYOUTVERSION, LAYOUTVERSION,
STATE, STATE,
METADATA, METADATA,
MAX_SIZE_GB, MAX_SIZE,
CHECKSUM)); CHECKSUM));
/** /**
* Creates a ContainerData Object, which holds metadata of the container. * Creates a ContainerData Object, which holds metadata of the container.
* @param type - ContainerType * @param type - ContainerType
* @param containerId - ContainerId * @param containerId - ContainerId
* @param size - container maximum size * @param size - container maximum size in bytes
*/ */
protected ContainerData(ContainerType type, long containerId, int size) { protected ContainerData(ContainerType type, long containerId, long size) {
this(type, containerId, this(type, containerId,
ChunkLayOutVersion.getLatestVersion().getVersion(), size); ChunkLayOutVersion.getLatestVersion().getVersion(), size);
} }
@ -111,10 +111,10 @@ protected ContainerData(ContainerType type, long containerId, int size) {
* @param type - ContainerType * @param type - ContainerType
* @param containerId - ContainerId * @param containerId - ContainerId
* @param layOutVersion - Container layOutVersion * @param layOutVersion - Container layOutVersion
* @param size - Container maximum size in GB * @param size - Container maximum size in bytes
*/ */
protected ContainerData(ContainerType type, long containerId, protected ContainerData(ContainerType type, long containerId,
int layOutVersion, int size) { int layOutVersion, long size) {
Preconditions.checkNotNull(type); Preconditions.checkNotNull(type);
this.containerType = type; this.containerType = type;
@ -128,7 +128,7 @@ protected ContainerData(ContainerType type, long containerId,
this.writeBytes = new AtomicLong(0L); this.writeBytes = new AtomicLong(0L);
this.bytesUsed = new AtomicLong(0L); this.bytesUsed = new AtomicLong(0L);
this.keyCount = new AtomicLong(0L); this.keyCount = new AtomicLong(0L);
this.maxSizeGB = size; this.maxSize = size;
setChecksumTo0ByteArray(); setChecksumTo0ByteArray();
} }
@ -171,11 +171,11 @@ public synchronized void setState(ContainerLifeCycleState state) {
} }
/** /**
* Return's maximum size of the container in GB. * Return's maximum size of the container in bytes.
* @return maxSizeGB * @return maxSize in bytes
*/ */
public int getMaxSizeGB() { public long getMaxSize() {
return maxSizeGB; return maxSize;
} }
/** /**

View File

@ -236,12 +236,11 @@ public Object construct(Node node) {
long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION); long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION);
int lv = (int) layOutVersion; int lv = (int) layOutVersion;
long size = (long) nodes.get(OzoneConsts.MAX_SIZE_GB); long size = (long) nodes.get(OzoneConsts.MAX_SIZE);
int maxSize = (int) size;
//When a new field is added, it needs to be added here. //When a new field is added, it needs to be added here.
KeyValueContainerData kvData = new KeyValueContainerData( KeyValueContainerData kvData = new KeyValueContainerData(
(long) nodes.get(OzoneConsts.CONTAINER_ID), lv, maxSize); (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, size);
kvData.setContainerDBType((String)nodes.get( kvData.setContainerDBType((String)nodes.get(
OzoneConsts.CONTAINER_DB_TYPE)); OzoneConsts.CONTAINER_DB_TYPE));

View File

@ -21,12 +21,8 @@
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerInfo;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerAction; .StorageContainerDatanodeProtocolProtos.ContainerAction;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@ -166,7 +162,7 @@ private void sendCloseContainerActionIfNeeded(Container container) {
if (isOpen) { if (isOpen) {
ContainerData containerData = container.getContainerData(); ContainerData containerData = container.getContainerData();
double containerUsedPercentage = 1.0f * containerData.getBytesUsed() / double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
StorageUnit.GB.toBytes(containerData.getMaxSizeGB()); containerData.getMaxSize();
if (containerUsedPercentage >= containerCloseThreshold) { if (containerUsedPercentage >= containerCloseThreshold) {
ContainerAction action = ContainerAction.newBuilder() ContainerAction action = ContainerAction.newBuilder()
.setContainerID(containerData.getContainerID()) .setContainerID(containerData.getContainerID())

View File

@ -109,7 +109,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
File containerMetaDataPath = null; File containerMetaDataPath = null;
//acquiring volumeset lock and container lock //acquiring volumeset lock and container lock
volumeSet.acquireLock(); volumeSet.acquireLock();
long maxSize = (containerData.getMaxSizeGB() * 1024L * 1024L * 1024L); long maxSize = containerData.getMaxSize();
try { try {
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
.getVolumesList(), maxSize); .getVolumesList(), maxSize);

View File

@ -84,9 +84,9 @@ public class KeyValueContainerData extends ContainerData {
/** /**
* Constructs KeyValueContainerData object. * Constructs KeyValueContainerData object.
* @param id - ContainerId * @param id - ContainerId
* @param size - maximum size in GB of the container * @param size - maximum size of the container in bytes
*/ */
public KeyValueContainerData(long id, int size) { public KeyValueContainerData(long id, long size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, size); super(ContainerProtos.ContainerType.KeyValueContainer, id, size);
this.numPendingDeletionBlocks = new AtomicInteger(0); this.numPendingDeletionBlocks = new AtomicInteger(0);
this.deleteTransactionId = 0; this.deleteTransactionId = 0;
@ -96,9 +96,9 @@ public KeyValueContainerData(long id, int size) {
* Constructs KeyValueContainerData object. * Constructs KeyValueContainerData object.
* @param id - ContainerId * @param id - ContainerId
* @param layOutVersion * @param layOutVersion
* @param size - maximum size in GB of the container * @param size - maximum size of the container in bytes
*/ */
public KeyValueContainerData(long id, int layOutVersion, int size) { public KeyValueContainerData(long id, int layOutVersion, long size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
size); size);
this.numPendingDeletionBlocks = new AtomicInteger(0); this.numPendingDeletionBlocks = new AtomicInteger(0);
@ -272,7 +272,7 @@ public static KeyValueContainerData getFromProtoBuf(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
KeyValueContainerData data = new KeyValueContainerData( KeyValueContainerData data = new KeyValueContainerData(
protoData.getContainerID(), protoData.getContainerID(),
(int)storageSize.getUnit().toBytes(storageSize.getValue())); (long)storageSize.getUnit().toBytes(storageSize.getValue()));
for (int x = 0; x < protoData.getMetadataCount(); x++) { for (int x = 0; x < protoData.getMetadataCount(); x++) {
data.addMetadata(protoData.getMetadata(x).getKey(), data.addMetadata(protoData.getMetadata(x).getKey(),
protoData.getMetadata(x).getValue()); protoData.getMetadata(x).getValue());

View File

@ -80,8 +80,6 @@
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CLOSED_CONTAINER_RETRY;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_INTERNAL_ERROR; .Result.CONTAINER_INTERNAL_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -124,8 +122,8 @@ public class KeyValueHandler extends Handler {
private final KeyManager keyManager; private final KeyManager keyManager;
private final ChunkManager chunkManager; private final ChunkManager chunkManager;
private final BlockDeletingService blockDeletingService; private final BlockDeletingService blockDeletingService;
private VolumeChoosingPolicy volumeChoosingPolicy; private final VolumeChoosingPolicy volumeChoosingPolicy;
private final int maxContainerSizeGB; private final long maxContainerSize;
private final AutoCloseableLock handlerLock; private final AutoCloseableLock handlerLock;
private final OpenContainerBlockMap openContainerBlockMap; private final OpenContainerBlockMap openContainerBlockMap;
@ -150,9 +148,9 @@ public KeyValueHandler(Configuration config, ContainerSet contSet,
volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass( volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
.class, VolumeChoosingPolicy.class), conf); .class, VolumeChoosingPolicy.class), conf);
maxContainerSizeGB = (int)config.getStorageSize( maxContainerSize = (long)config.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.GB); ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
// this handler lock is used for synchronizing createContainer Requests, // this handler lock is used for synchronizing createContainer Requests,
// so using a fair lock here. // so using a fair lock here.
handlerLock = new AutoCloseableLock(new ReentrantLock(true)); handlerLock = new AutoCloseableLock(new ReentrantLock(true));
@ -215,9 +213,10 @@ public ContainerCommandResponseProto handle(
return handleGetSmallFile(request, kvContainer); return handleGetSmallFile(request, kvContainer);
case GetCommittedBlockLength: case GetCommittedBlockLength:
return handleGetCommittedBlockLength(request, kvContainer); return handleGetCommittedBlockLength(request, kvContainer);
} default:
return null; return null;
} }
}
@VisibleForTesting @VisibleForTesting
public ChunkManager getChunkManager() { public ChunkManager getChunkManager() {
@ -247,7 +246,7 @@ ContainerCommandResponseProto handleCreateContainer(
long containerID = request.getContainerID(); long containerID = request.getContainerID();
KeyValueContainerData newContainerData = new KeyValueContainerData( KeyValueContainerData newContainerData = new KeyValueContainerData(
containerID, maxContainerSizeGB); containerID, maxContainerSize);
// TODO: Add support to add metadataList to ContainerData. Add metadata // TODO: Add support to add metadataList to ContainerData. Add metadata
// to container during creation. // to container during creation.
KeyValueContainer newContainer = new KeyValueContainer( KeyValueContainer newContainer = new KeyValueContainer(

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common; package org.apache.hadoop.ozone.container.common;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.junit.Test; import org.junit.Test;
@ -31,7 +32,7 @@
*/ */
public class TestKeyValueContainerData { public class TestKeyValueContainerData {
private static final int MAXSIZE = 5; private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
@Test @Test
public void testKeyValueData() { public void testKeyValueData() {
long containerId = 1L; long containerId = 1L;
@ -58,7 +59,7 @@ public void testKeyValueData() {
assertEquals(val.get(), kvData.getWriteCount()); assertEquals(val.get(), kvData.getWriteCount());
assertEquals(val.get(), kvData.getKeyCount()); assertEquals(val.get(), kvData.getKeyCount());
assertEquals(val.get(), kvData.getNumPendingDeletionBlocks()); assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
assertEquals(MAXSIZE, kvData.getMaxSizeGB()); assertEquals(MAXSIZE, kvData.getMaxSize());
kvData.setState(state); kvData.setState(state);
kvData.setContainerDBType(containerDBType); kvData.setContainerDBType(containerDBType);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common.impl; package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -42,7 +43,7 @@ public class TestContainerDataYaml {
private static String testRoot = new FileSystemTestHelper().getTestRootDir(); private static String testRoot = new FileSystemTestHelper().getTestRootDir();
private static final int MAXSIZE = 5; private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
/** /**
* Creates a .container file. cleanup() should be called at the end of the * Creates a .container file. cleanup() should be called at the end of the
@ -94,7 +95,7 @@ public void testCreateContainerFile() throws IOException {
.getState()); .getState());
assertEquals(1, kvData.getLayOutVersion()); assertEquals(1, kvData.getLayOutVersion());
assertEquals(0, kvData.getMetadata().size()); assertEquals(0, kvData.getMetadata().size());
assertEquals(MAXSIZE, kvData.getMaxSizeGB()); assertEquals(MAXSIZE, kvData.getMaxSize());
// Update ContainerData. // Update ContainerData.
kvData.addMetadata("VOLUME", "hdfs"); kvData.addMetadata("VOLUME", "hdfs");
@ -122,7 +123,7 @@ public void testCreateContainerFile() throws IOException {
assertEquals(2, kvData.getMetadata().size()); assertEquals(2, kvData.getMetadata().size());
assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
assertEquals("ozone", kvData.getMetadata().get("OWNER")); assertEquals("ozone", kvData.getMetadata().get("OWNER"));
assertEquals(MAXSIZE, kvData.getMaxSizeGB()); assertEquals(MAXSIZE, kvData.getMaxSize());
} }
@Test @Test

View File

@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common.impl; package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
@ -53,7 +54,8 @@ public void testAddGetRemoveContainer() throws StorageContainerException {
ContainerProtos.ContainerLifeCycleState state = ContainerProtos ContainerProtos.ContainerLifeCycleState state = ContainerProtos
.ContainerLifeCycleState.CLOSED; .ContainerLifeCycleState.CLOSED;
KeyValueContainerData kvData = new KeyValueContainerData(containerId, 5); KeyValueContainerData kvData = new KeyValueContainerData(containerId,
(long) StorageUnit.GB.toBytes(5));
kvData.setState(state); kvData.setState(state);
KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
OzoneConfiguration()); OzoneConfiguration());
@ -163,7 +165,8 @@ public void testListContainer() throws StorageContainerException {
private ContainerSet createContainerSet() throws StorageContainerException { private ContainerSet createContainerSet() throws StorageContainerException {
ContainerSet containerSet = new ContainerSet(); ContainerSet containerSet = new ContainerSet();
for (int i=0; i<10; i++) { for (int i=0; i<10; i++) {
KeyValueContainerData kvData = new KeyValueContainerData(i, 5); KeyValueContainerData kvData = new KeyValueContainerData(i,
(long) StorageUnit.GB.toBytes(5));
if (i%2 == 0) { if (i%2 == 0) {
kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED); kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
} else { } else {

View File

@ -70,7 +70,8 @@ public void testContainerCloseActionWhenFull() throws IOException {
ContainerSet containerSet = new ContainerSet(); ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
StateContext context = Mockito.mock(StateContext.class); StateContext context = Mockito.mock(StateContext.class);
KeyValueContainerData containerData = new KeyValueContainerData(1L, 1); KeyValueContainerData containerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(1));
Container container = new KeyValueContainer(containerData, conf); Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
scmId.toString()); scmId.toString());

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue; package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -79,7 +80,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume); .thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5); keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer(keyValueContainerData, config); keyValueContainer = new KeyValueContainer(keyValueContainerData, config);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue; package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -79,7 +80,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume); .thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5); keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer( keyValueContainer = new KeyValueContainer(
keyValueContainerData, config); keyValueContainerData, config);

View File

@ -20,6 +20,7 @@
import com.google.common.primitives.Longs; import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -244,7 +245,8 @@ public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
private void createContainerWithBlocks(long containerId, int private void createContainerWithBlocks(long containerId, int
normalBlocks, int deletedBlocks) throws normalBlocks, int deletedBlocks) throws
Exception { Exception {
containerData = new KeyValueContainerData(containerId, 1); containerData = new KeyValueContainerData(containerId,
(long) StorageUnit.GB.toBytes(1));
container = new KeyValueContainer(containerData, conf); container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
.randomUUID().toString()); .randomUUID().toString());

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container.keyvalue; package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.primitives.Longs; import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -94,7 +95,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume); .thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5); keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer( keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf); keyValueContainerData, conf);
@ -103,7 +105,8 @@ public void setUp() throws Exception {
@Test @Test
public void testBlockIterator() throws Exception{ public void testBlockIterator() throws Exception{
keyValueContainerData = new KeyValueContainerData(100L, 1); keyValueContainerData = new KeyValueContainerData(100L,
(long) StorageUnit.GB.toBytes(1));
keyValueContainer = new KeyValueContainer( keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf); keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
@ -213,7 +216,7 @@ public void testContainerImportExport() throws Exception {
//create a new one //create a new one
KeyValueContainerData containerData = KeyValueContainerData containerData =
new KeyValueContainerData(containerId, 1, new KeyValueContainerData(containerId, 1,
keyValueContainerData.getMaxSizeGB()); keyValueContainerData.getMaxSize());
KeyValueContainer container = new KeyValueContainer(containerData, conf); KeyValueContainer container = new KeyValueContainer(containerData, conf);
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
@ -234,8 +237,8 @@ public void testContainerImportExport() throws Exception {
containerData.getKeyCount()); containerData.getKeyCount());
Assert.assertEquals(keyValueContainerData.getLayOutVersion(), Assert.assertEquals(keyValueContainerData.getLayOutVersion(),
containerData.getLayOutVersion()); containerData.getLayOutVersion());
Assert.assertEquals(keyValueContainerData.getMaxSizeGB(), Assert.assertEquals(keyValueContainerData.getMaxSize(),
containerData.getMaxSizeGB()); containerData.getMaxSize());
Assert.assertEquals(keyValueContainerData.getBytesUsed(), Assert.assertEquals(keyValueContainerData.getBytesUsed(),
containerData.getBytesUsed()); containerData.getBytesUsed());

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -262,7 +263,8 @@ private ContainerCommandRequestProto getDummyCommandRequestProto(
public void testCloseInvalidContainer() { public void testCloseInvalidContainer() {
long containerID = 1234L; long containerID = 1234L;
Configuration conf = new Configuration(); Configuration conf = new Configuration();
KeyValueContainerData kvData = new KeyValueContainerData(containerID, 1); KeyValueContainerData kvData = new KeyValueContainerData(containerID,
(long) StorageUnit.GB.toBytes(1));
KeyValueContainer container = new KeyValueContainer(kvData, conf); KeyValueContainer container = new KeyValueContainer(kvData, conf);
kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID); kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.ozone.container.ozoneimpl; package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@ -77,7 +77,8 @@ public void testBuildContainerMap() throws Exception {
// Add containers to disk // Add containers to disk
for (int i=0; i<10; i++) { for (int i=0; i<10; i++) {
keyValueContainerData = new KeyValueContainerData(i, 1); keyValueContainerData = new KeyValueContainerData(i,
(long) StorageUnit.GB.toBytes(1));
keyValueContainer = new KeyValueContainer( keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf); keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);

View File

@ -5,8 +5,8 @@ containerID: 9223372036854775807
containerType: KeyValueContainer containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
layOutVersion: 1 layOutVersion: 1
maxSizeGB: 5 maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs} metadata: {OWNER: ozone, VOLUME: hdfs}
state: CLOSED state: CLOSED
aclEnabled: true aclEnabled: true
checksum: 1bbff32aeaa8fadc0b80c5c1e0597036e96acd8ae4bddbed188a2162762251a2 checksum: c5b5373b8755c4e7199478dcaded9d996f9aca089704e08950259cdb0f290680

View File

@ -5,7 +5,7 @@ containerID: 9223372036854775807
containerType: KeyValueContainer containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
layOutVersion: 1 layOutVersion: 1
maxSizeGB: 5 maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs} metadata: {OWNER: ozone, VOLUME: hdfs}
state: OPEN state: OPEN
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -5,7 +5,7 @@ containerID: 9223372036854775807
containerType: KeyValueContainer containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
layOutVersion: 1 layOutVersion: 1
maxSizeGB: 5 maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs} metadata: {OWNER: ozone, VOLUME: hdfs}
state: INVALID state: INVALID
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container; package org.apache.hadoop.ozone.container;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.binary.Hex;
@ -59,7 +60,8 @@ public final class ContainerTestHelper {
ContainerTestHelper.class); ContainerTestHelper.class);
private static Random r = new Random(); private static Random r = new Random();
public static final int CONTAINER_MAX_SIZE_GB = 1; public static final long CONTAINER_MAX_SIZE =
(long) StorageUnit.GB.toBytes(1);
/** /**
* Never constructed. * Never constructed.

View File

@ -110,7 +110,7 @@ private void createToDeleteBlocks(ContainerSet containerSet,
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
long containerID = ContainerTestHelper.getTestContainerID(); long containerID = ContainerTestHelper.getTestContainerID();
KeyValueContainerData data = new KeyValueContainerData(containerID, KeyValueContainerData data = new KeyValueContainerData(containerID,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB); ContainerTestHelper.CONTAINER_MAX_SIZE);
Container container = new KeyValueContainer(data, conf); Container container = new KeyValueContainer(data, conf);
container.create(new VolumeSet(scmId, clusterID, conf), container.create(new VolumeSet(scmId, clusterID, conf),
new RoundRobinVolumeChoosingPolicy(), scmId); new RoundRobinVolumeChoosingPolicy(), scmId);

View File

@ -75,8 +75,8 @@ public void testRandomChoosingPolicy() throws IOException {
int numContainers = 10; int numContainers = 10;
for (int i = 0; i < numContainers; i++) { for (int i = 0; i < numContainers; i++) {
KeyValueContainerData data = new KeyValueContainerData(new Long(i), KeyValueContainerData data = new KeyValueContainerData(i,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB); ContainerTestHelper.CONTAINER_MAX_SIZE);
KeyValueContainer container = new KeyValueContainer(data, conf); KeyValueContainer container = new KeyValueContainer(data, conf);
containerSet.addContainer(container); containerSet.addContainer(container);
Assert.assertTrue( Assert.assertTrue(
@ -128,8 +128,8 @@ public void testTopNOrderedChoosingPolicy() throws IOException {
for (int i = 0; i <= numContainers; i++) { for (int i = 0; i <= numContainers; i++) {
long containerId = RandomUtils.nextLong(); long containerId = RandomUtils.nextLong();
KeyValueContainerData data = KeyValueContainerData data =
new KeyValueContainerData(new Long(containerId), new KeyValueContainerData(containerId,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB); ContainerTestHelper.CONTAINER_MAX_SIZE);
if (i != numContainers) { if (i != numContainers) {
int deletionBlocks = random.nextInt(numContainers) + 1; int deletionBlocks = random.nextInt(numContainers) + 1;
data.incrPendingDeletionBlocks(deletionBlocks); data.incrPendingDeletionBlocks(deletionBlocks);

View File

@ -20,7 +20,6 @@
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.binary.Hex;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto import org.apache.hadoop.hdds.protocol.proto
@ -193,7 +192,7 @@ private long getTestContainerID() {
private Container addContainer(ContainerSet containerSet, long containerID) private Container addContainer(ContainerSet containerSet, long containerID)
throws IOException { throws IOException {
KeyValueContainerData data = new KeyValueContainerData(containerID, KeyValueContainerData data = new KeyValueContainerData(containerID,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB); ContainerTestHelper.CONTAINER_MAX_SIZE);
data.addMetadata("VOLUME", "shire"); data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo"); data.addMetadata("owner)", "bilbo");
KeyValueContainer container = new KeyValueContainer(data, conf); KeyValueContainer container = new KeyValueContainer(data, conf);