HDDS-374. Support to configure container size in units lesser than GB. Contributed by Nanda kumar.

This commit is contained in:
Nanda kumar 2018-08-27 18:29:32 +05:30
parent 91836f0f81
commit 12b2f362cc
24 changed files with 79 additions and 66 deletions

View File

@ -185,7 +185,7 @@ private OzoneConsts() {
public static final String CONTAINER_TYPE = "containerType";
public static final String STATE = "state";
public static final String METADATA = "metadata";
public static final String MAX_SIZE_GB = "maxSizeGB";
public static final String MAX_SIZE = "maxSize";
public static final String METADATA_PATH = "metadataPath";
public static final String CHUNKS_PATH = "chunksPath";
public static final String CONTAINER_DB_TYPE = "containerDBType";

View File

@ -40,7 +40,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE;
import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION;
import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE_GB;
import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.METADATA;
import static org.apache.hadoop.ozone.OzoneConsts.STATE;
@ -67,7 +67,7 @@ public abstract class ContainerData {
// State of the Container
private ContainerLifeCycleState state;
private final int maxSizeGB;
private final long maxSize;
/** parameters for read/write statistics on the container. **/
private final AtomicLong readBytes;
@ -92,16 +92,16 @@ public abstract class ContainerData {
LAYOUTVERSION,
STATE,
METADATA,
MAX_SIZE_GB,
MAX_SIZE,
CHECKSUM));
/**
* Creates a ContainerData Object, which holds metadata of the container.
* @param type - ContainerType
* @param containerId - ContainerId
* @param size - container maximum size
* @param size - container maximum size in bytes
*/
protected ContainerData(ContainerType type, long containerId, int size) {
protected ContainerData(ContainerType type, long containerId, long size) {
this(type, containerId,
ChunkLayOutVersion.getLatestVersion().getVersion(), size);
}
@ -111,10 +111,10 @@ protected ContainerData(ContainerType type, long containerId, int size) {
* @param type - ContainerType
* @param containerId - ContainerId
* @param layOutVersion - Container layOutVersion
* @param size - Container maximum size in GB
* @param size - Container maximum size in bytes
*/
protected ContainerData(ContainerType type, long containerId,
int layOutVersion, int size) {
int layOutVersion, long size) {
Preconditions.checkNotNull(type);
this.containerType = type;
@ -128,7 +128,7 @@ protected ContainerData(ContainerType type, long containerId,
this.writeBytes = new AtomicLong(0L);
this.bytesUsed = new AtomicLong(0L);
this.keyCount = new AtomicLong(0L);
this.maxSizeGB = size;
this.maxSize = size;
setChecksumTo0ByteArray();
}
@ -171,11 +171,11 @@ public synchronized void setState(ContainerLifeCycleState state) {
}
/**
* Return's maximum size of the container in GB.
* @return maxSizeGB
* Return's maximum size of the container in bytes.
* @return maxSize in bytes
*/
public int getMaxSizeGB() {
return maxSizeGB;
public long getMaxSize() {
return maxSize;
}
/**

View File

@ -236,12 +236,11 @@ public Object construct(Node node) {
long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION);
int lv = (int) layOutVersion;
long size = (long) nodes.get(OzoneConsts.MAX_SIZE_GB);
int maxSize = (int) size;
long size = (long) nodes.get(OzoneConsts.MAX_SIZE);
//When a new field is added, it needs to be added here.
KeyValueContainerData kvData = new KeyValueContainerData(
(long) nodes.get(OzoneConsts.CONTAINER_ID), lv, maxSize);
(long) nodes.get(OzoneConsts.CONTAINER_ID), lv, size);
kvData.setContainerDBType((String)nodes.get(
OzoneConsts.CONTAINER_DB_TYPE));

View File

@ -21,12 +21,8 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerInfo;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerAction;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@ -166,7 +162,7 @@ private void sendCloseContainerActionIfNeeded(Container container) {
if (isOpen) {
ContainerData containerData = container.getContainerData();
double containerUsedPercentage = 1.0f * containerData.getBytesUsed() /
StorageUnit.GB.toBytes(containerData.getMaxSizeGB());
containerData.getMaxSize();
if (containerUsedPercentage >= containerCloseThreshold) {
ContainerAction action = ContainerAction.newBuilder()
.setContainerID(containerData.getContainerID())

View File

@ -109,7 +109,7 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
File containerMetaDataPath = null;
//acquiring volumeset lock and container lock
volumeSet.acquireLock();
long maxSize = (containerData.getMaxSizeGB() * 1024L * 1024L * 1024L);
long maxSize = containerData.getMaxSize();
try {
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
.getVolumesList(), maxSize);

View File

@ -84,9 +84,9 @@ public class KeyValueContainerData extends ContainerData {
/**
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param size - maximum size in GB of the container
* @param size - maximum size of the container in bytes
*/
public KeyValueContainerData(long id, int size) {
public KeyValueContainerData(long id, long size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, size);
this.numPendingDeletionBlocks = new AtomicInteger(0);
this.deleteTransactionId = 0;
@ -96,9 +96,9 @@ public KeyValueContainerData(long id, int size) {
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param layOutVersion
* @param size - maximum size in GB of the container
* @param size - maximum size of the container in bytes
*/
public KeyValueContainerData(long id, int layOutVersion, int size) {
public KeyValueContainerData(long id, int layOutVersion, long size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
size);
this.numPendingDeletionBlocks = new AtomicInteger(0);
@ -272,7 +272,7 @@ public static KeyValueContainerData getFromProtoBuf(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
KeyValueContainerData data = new KeyValueContainerData(
protoData.getContainerID(),
(int)storageSize.getUnit().toBytes(storageSize.getValue()));
(long)storageSize.getUnit().toBytes(storageSize.getValue()));
for (int x = 0; x < protoData.getMetadataCount(); x++) {
data.addMetadata(protoData.getMetadata(x).getKey(),
protoData.getMetadata(x).getValue());

View File

@ -80,8 +80,6 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CLOSED_CONTAINER_RETRY;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Result.CONTAINER_INTERNAL_ERROR;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@ -124,8 +122,8 @@ public class KeyValueHandler extends Handler {
private final KeyManager keyManager;
private final ChunkManager chunkManager;
private final BlockDeletingService blockDeletingService;
private VolumeChoosingPolicy volumeChoosingPolicy;
private final int maxContainerSizeGB;
private final VolumeChoosingPolicy volumeChoosingPolicy;
private final long maxContainerSize;
private final AutoCloseableLock handlerLock;
private final OpenContainerBlockMap openContainerBlockMap;
@ -150,9 +148,9 @@ public KeyValueHandler(Configuration config, ContainerSet contSet,
volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
.class, VolumeChoosingPolicy.class), conf);
maxContainerSizeGB = (int)config.getStorageSize(
maxContainerSize = (long)config.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.GB);
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
// this handler lock is used for synchronizing createContainer Requests,
// so using a fair lock here.
handlerLock = new AutoCloseableLock(new ReentrantLock(true));
@ -215,9 +213,10 @@ public ContainerCommandResponseProto handle(
return handleGetSmallFile(request, kvContainer);
case GetCommittedBlockLength:
return handleGetCommittedBlockLength(request, kvContainer);
}
default:
return null;
}
}
@VisibleForTesting
public ChunkManager getChunkManager() {
@ -247,7 +246,7 @@ ContainerCommandResponseProto handleCreateContainer(
long containerID = request.getContainerID();
KeyValueContainerData newContainerData = new KeyValueContainerData(
containerID, maxContainerSizeGB);
containerID, maxContainerSize);
// TODO: Add support to add metadataList to ContainerData. Add metadata
// to container during creation.
KeyValueContainer newContainer = new KeyValueContainer(

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.junit.Test;
@ -31,7 +32,7 @@
*/
public class TestKeyValueContainerData {
private static final int MAXSIZE = 5;
private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
@Test
public void testKeyValueData() {
long containerId = 1L;
@ -58,7 +59,7 @@ public void testKeyValueData() {
assertEquals(val.get(), kvData.getWriteCount());
assertEquals(val.get(), kvData.getKeyCount());
assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
assertEquals(MAXSIZE, kvData.getMaxSize());
kvData.setState(state);
kvData.setContainerDBType(containerDBType);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -42,7 +43,7 @@ public class TestContainerDataYaml {
private static String testRoot = new FileSystemTestHelper().getTestRootDir();
private static final int MAXSIZE = 5;
private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
/**
* Creates a .container file. cleanup() should be called at the end of the
@ -94,7 +95,7 @@ public void testCreateContainerFile() throws IOException {
.getState());
assertEquals(1, kvData.getLayOutVersion());
assertEquals(0, kvData.getMetadata().size());
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
assertEquals(MAXSIZE, kvData.getMaxSize());
// Update ContainerData.
kvData.addMetadata("VOLUME", "hdfs");
@ -122,7 +123,7 @@ public void testCreateContainerFile() throws IOException {
assertEquals(2, kvData.getMetadata().size());
assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
assertEquals("ozone", kvData.getMetadata().get("OWNER"));
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
assertEquals(MAXSIZE, kvData.getMaxSize());
}
@Test

View File

@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto
@ -53,7 +54,8 @@ public void testAddGetRemoveContainer() throws StorageContainerException {
ContainerProtos.ContainerLifeCycleState state = ContainerProtos
.ContainerLifeCycleState.CLOSED;
KeyValueContainerData kvData = new KeyValueContainerData(containerId, 5);
KeyValueContainerData kvData = new KeyValueContainerData(containerId,
(long) StorageUnit.GB.toBytes(5));
kvData.setState(state);
KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
OzoneConfiguration());
@ -163,7 +165,8 @@ public void testListContainer() throws StorageContainerException {
private ContainerSet createContainerSet() throws StorageContainerException {
ContainerSet containerSet = new ContainerSet();
for (int i=0; i<10; i++) {
KeyValueContainerData kvData = new KeyValueContainerData(i, 5);
KeyValueContainerData kvData = new KeyValueContainerData(i,
(long) StorageUnit.GB.toBytes(5));
if (i%2 == 0) {
kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
} else {

View File

@ -70,7 +70,8 @@ public void testContainerCloseActionWhenFull() throws IOException {
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
StateContext context = Mockito.mock(StateContext.class);
KeyValueContainerData containerData = new KeyValueContainerData(1L, 1);
KeyValueContainerData containerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(1));
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
scmId.toString());

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -79,7 +80,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -79,7 +80,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer(
keyValueContainerData, config);

View File

@ -20,6 +20,7 @@
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@ -244,7 +245,8 @@ public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
private void createContainerWithBlocks(long containerId, int
normalBlocks, int deletedBlocks) throws
Exception {
containerData = new KeyValueContainerData(containerId, 1);
containerData = new KeyValueContainerData(containerId,
(long) StorageUnit.GB.toBytes(1));
container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
.randomUUID().toString());

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -94,7 +95,8 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainerData = new KeyValueContainerData(1L,
(long) StorageUnit.GB.toBytes(5));
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);
@ -103,7 +105,8 @@ public void setUp() throws Exception {
@Test
public void testBlockIterator() throws Exception{
keyValueContainerData = new KeyValueContainerData(100L, 1);
keyValueContainerData = new KeyValueContainerData(100L,
(long) StorageUnit.GB.toBytes(1));
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
@ -213,7 +216,7 @@ public void testContainerImportExport() throws Exception {
//create a new one
KeyValueContainerData containerData =
new KeyValueContainerData(containerId, 1,
keyValueContainerData.getMaxSizeGB());
keyValueContainerData.getMaxSize());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
@ -234,8 +237,8 @@ public void testContainerImportExport() throws Exception {
containerData.getKeyCount());
Assert.assertEquals(keyValueContainerData.getLayOutVersion(),
containerData.getLayOutVersion());
Assert.assertEquals(keyValueContainerData.getMaxSizeGB(),
containerData.getMaxSizeGB());
Assert.assertEquals(keyValueContainerData.getMaxSize(),
containerData.getMaxSize());
Assert.assertEquals(keyValueContainerData.getBytesUsed(),
containerData.getBytesUsed());

View File

@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@ -262,7 +263,8 @@ private ContainerCommandRequestProto getDummyCommandRequestProto(
public void testCloseInvalidContainer() {
long containerID = 1234L;
Configuration conf = new Configuration();
KeyValueContainerData kvData = new KeyValueContainerData(containerID, 1);
KeyValueContainerData kvData = new KeyValueContainerData(containerID,
(long) StorageUnit.GB.toBytes(1));
KeyValueContainer container = new KeyValueContainer(kvData, conf);
kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);

View File

@ -19,9 +19,9 @@
package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@ -77,7 +77,8 @@ public void testBuildContainerMap() throws Exception {
// Add containers to disk
for (int i=0; i<10; i++) {
keyValueContainerData = new KeyValueContainerData(i, 1);
keyValueContainerData = new KeyValueContainerData(i,
(long) StorageUnit.GB.toBytes(1));
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);

View File

@ -5,8 +5,8 @@ containerID: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
layOutVersion: 1
maxSizeGB: 5
maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs}
state: CLOSED
aclEnabled: true
checksum: 1bbff32aeaa8fadc0b80c5c1e0597036e96acd8ae4bddbed188a2162762251a2
checksum: c5b5373b8755c4e7199478dcaded9d996f9aca089704e08950259cdb0f290680

View File

@ -5,7 +5,7 @@ containerID: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
layOutVersion: 1
maxSizeGB: 5
maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs}
state: OPEN
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -5,7 +5,7 @@ containerID: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1
layOutVersion: 1
maxSizeGB: 5
maxSize: 5368709120
metadata: {OWNER: ozone, VOLUME: hdfs}
state: INVALID
checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
import org.apache.ratis.shaded.com.google.protobuf.ByteString;
import org.apache.commons.codec.binary.Hex;
@ -59,7 +60,8 @@ public final class ContainerTestHelper {
ContainerTestHelper.class);
private static Random r = new Random();
public static final int CONTAINER_MAX_SIZE_GB = 1;
public static final long CONTAINER_MAX_SIZE =
(long) StorageUnit.GB.toBytes(1);
/**
* Never constructed.

View File

@ -110,7 +110,7 @@ private void createToDeleteBlocks(ContainerSet containerSet,
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
long containerID = ContainerTestHelper.getTestContainerID();
KeyValueContainerData data = new KeyValueContainerData(containerID,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB);
ContainerTestHelper.CONTAINER_MAX_SIZE);
Container container = new KeyValueContainer(data, conf);
container.create(new VolumeSet(scmId, clusterID, conf),
new RoundRobinVolumeChoosingPolicy(), scmId);

View File

@ -75,8 +75,8 @@ public void testRandomChoosingPolicy() throws IOException {
int numContainers = 10;
for (int i = 0; i < numContainers; i++) {
KeyValueContainerData data = new KeyValueContainerData(new Long(i),
ContainerTestHelper.CONTAINER_MAX_SIZE_GB);
KeyValueContainerData data = new KeyValueContainerData(i,
ContainerTestHelper.CONTAINER_MAX_SIZE);
KeyValueContainer container = new KeyValueContainer(data, conf);
containerSet.addContainer(container);
Assert.assertTrue(
@ -128,8 +128,8 @@ public void testTopNOrderedChoosingPolicy() throws IOException {
for (int i = 0; i <= numContainers; i++) {
long containerId = RandomUtils.nextLong();
KeyValueContainerData data =
new KeyValueContainerData(new Long(containerId),
ContainerTestHelper.CONTAINER_MAX_SIZE_GB);
new KeyValueContainerData(containerId,
ContainerTestHelper.CONTAINER_MAX_SIZE);
if (i != numContainers) {
int deletionBlocks = random.nextInt(numContainers) + 1;
data.incrPendingDeletionBlocks(deletionBlocks);

View File

@ -20,7 +20,6 @@
import com.google.common.collect.Maps;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto
@ -193,7 +192,7 @@ private long getTestContainerID() {
private Container addContainer(ContainerSet containerSet, long containerID)
throws IOException {
KeyValueContainerData data = new KeyValueContainerData(containerID,
ContainerTestHelper.CONTAINER_MAX_SIZE_GB);
ContainerTestHelper.CONTAINER_MAX_SIZE);
data.addMetadata("VOLUME", "shire");
data.addMetadata("owner)", "bilbo");
KeyValueContainer container = new KeyValueContainer(data, conf);