From 869a1ab41a7c817e3f5f9bb5c74a93b68e5d2af4 Mon Sep 17 00:00:00 2001 From: sdeka Date: Sat, 18 May 2019 16:16:33 +0530 Subject: [PATCH] HDDS-1535. Space tracking for Open Containers : Handle Node Startup. Contributed by Supratim Deka --- .../container/ozoneimpl/ContainerReader.java | 31 +++++++ .../ozoneimpl/TestOzoneContainer.java | 91 ++++++++++++++++++- 2 files changed, 117 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 0192fd5dd1..d704bb7b5b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -27,11 +27,14 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -45,6 +48,7 @@ import java.io.File; import java.io.FileFilter; import java.io.IOException; +import java.util.List; /** * Class used to read .container files from Volume and build container map. @@ -201,6 +205,11 @@ public void verifyContainerData(ContainerData containerData) kvContainerData .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); } + if (kvContainer.getContainerState() + == ContainerProtos.ContainerDataProto.State.OPEN) { + // commitSpace for Open Containers relies on usedBytes + initializeUsedBytes(kvContainer); + } containerSet.addContainer(kvContainer); } else { throw new StorageContainerException("Container File is corrupted. " + @@ -215,4 +224,26 @@ public void verifyContainerData(ContainerData containerData) ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); } } + + private void initializeUsedBytes(KeyValueContainer container) throws IOException { + KeyValueBlockIterator blockIter = new KeyValueBlockIterator( + container.getContainerData().getContainerID(), + new File(container.getContainerData().getContainerPath())); + long usedBytes = 0; + + while (blockIter.hasNext()) { + BlockData block = blockIter.nextBlock(); + long blockLen = 0; + + List chunkInfoList = block.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { + ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); + blockLen += info.getLen(); + } + + usedBytes += blockLen; + } + + container.getContainerData().setBytesUsed(usedBytes); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 003f26e7a4..198885d0e6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -19,11 +19,17 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -32,6 +38,8 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.utils.MetadataStore; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -40,7 +48,9 @@ import java.util.Random; import java.util.UUID; - +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; import static org.junit.Assert.assertEquals; @@ -52,7 +62,6 @@ public class TestOzoneContainer { @Rule public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration conf; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; @@ -60,6 +69,8 @@ public class TestOzoneContainer { private KeyValueContainerData keyValueContainerData; private KeyValueContainer keyValueContainer; private final DatanodeDetails datanodeDetails = createDatanodeDetails(); + private HashMap commitSpaceMap; //RootDir -> committed space + private final int NUM_TEST_CONTAINERS = 10; @Before public void setUp() throws Exception { @@ -68,6 +79,7 @@ public void setUp() throws Exception { .getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); + commitSpaceMap = new HashMap(); } @Test @@ -78,16 +90,32 @@ public void testBuildContainerMap() throws Exception { // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { volume.format(UUID.randomUUID().toString()); + commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } // Add containers to disk - for (int i=0; i<10; i++) { + for (int i=0; i < NUM_TEST_CONTAINERS; i++) { + long freeBytes = 0; + long volCommitBytes; + long maxCap = (long) StorageUnit.GB.toBytes(1); + + HddsVolume myVolume; + keyValueContainerData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), + maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + myVolume = keyValueContainer.getContainerData().getVolume(); + + freeBytes = addBlocks(keyValueContainer, 2, 3); + + // update our expectation of volume committed space in the map + volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue(); + Preconditions.checkState(freeBytes >= 0); + commitSpaceMap.put(getVolumeKey(myVolume), + Long.valueOf(volCommitBytes + freeBytes)); } DatanodeStateMachine stateMachine = Mockito.mock( @@ -97,12 +125,65 @@ public void testBuildContainerMap() throws Exception { Mockito.when(context.getParent()).thenReturn(stateMachine); // When OzoneContainer is started, the containers from disk should be // loaded into the containerSet. + // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf, context, null); + ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(10, containerset.containerCount()); + assertEquals(NUM_TEST_CONTAINERS, containerset.containerCount()); + + verifyCommittedSpace(ozoneContainer); } + //verify committed space on each volume + private void verifyCommittedSpace(OzoneContainer oc) { + for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { + String key = getVolumeKey(dnVol); + long expectedCommit = commitSpaceMap.get(key).longValue(); + long volumeCommitted = dnVol.getCommittedBytes(); + assertEquals("Volume committed space not initialized correctly", + expectedCommit, volumeCommitted); + } + } + + private long addBlocks(KeyValueContainer container, + int blocks, int chunksPerBlock) throws Exception { + String strBlock = "block"; + String strChunk = "-chunkFile"; + int datalen = 65536; + long usedBytes = 0; + + long freeBytes = container.getContainerData().getMaxSize(); + long containerId = container.getContainerData().getContainerID(); + MetadataStore metadataStore = BlockUtils.getDB(container + .getContainerData(), conf); + + for (int bi = 0; bi < blocks; bi++) { + // Creating BlockData + BlockID blockID = new BlockID(containerId, bi); + BlockData blockData = new BlockData(blockID); + List chunkList = new ArrayList<>(); + + chunkList.clear(); + for (int ci = 0; ci < chunksPerBlock; ci++) { + String chunkName = strBlock + bi + strChunk + ci; + long offset = ci * datalen; + ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); + usedBytes += datalen; + chunkList.add(info.getProtoBufMessage()); + } + blockData.setChunks(chunkList); + metadataStore.put(Longs.toByteArray(blockID.getLocalID()), + blockData.getProtoBufMessage().toByteArray()); + } + + // remaining available capacity of the container + return (freeBytes - usedBytes); + } + + private String getVolumeKey(HddsVolume volume) { + return volume.getHddsRootDir().getPath(); + } private DatanodeDetails createDatanodeDetails() { Random random = new Random();