From 869a1ab41a7c817e3f5f9bb5c74a93b68e5d2af4 Mon Sep 17 00:00:00 2001 From: sdeka Date: Sat, 18 May 2019 16:16:33 +0530 Subject: [PATCH 1/5] HDDS-1535. Space tracking for Open Containers : Handle Node Startup. Contributed by Supratim Deka --- .../container/ozoneimpl/ContainerReader.java | 31 +++++++ .../ozoneimpl/TestOzoneContainer.java | 91 ++++++++++++++++++- 2 files changed, 117 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 0192fd5dd1..d704bb7b5b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -27,11 +27,14 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; @@ -45,6 +48,7 @@ import java.io.File; import java.io.FileFilter; import java.io.IOException; +import java.util.List; /** * Class used to read .container files from Volume and build container map. @@ -201,6 +205,11 @@ public void verifyContainerData(ContainerData containerData) kvContainerData .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); } + if (kvContainer.getContainerState() + == ContainerProtos.ContainerDataProto.State.OPEN) { + // commitSpace for Open Containers relies on usedBytes + initializeUsedBytes(kvContainer); + } containerSet.addContainer(kvContainer); } else { throw new StorageContainerException("Container File is corrupted. " + @@ -215,4 +224,26 @@ public void verifyContainerData(ContainerData containerData) ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); } } + + private void initializeUsedBytes(KeyValueContainer container) throws IOException { + KeyValueBlockIterator blockIter = new KeyValueBlockIterator( + container.getContainerData().getContainerID(), + new File(container.getContainerData().getContainerPath())); + long usedBytes = 0; + + while (blockIter.hasNext()) { + BlockData block = blockIter.nextBlock(); + long blockLen = 0; + + List chunkInfoList = block.getChunks(); + for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { + ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); + blockLen += info.getLen(); + } + + usedBytes += blockLen; + } + + container.getContainerData().setBytesUsed(usedBytes); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 003f26e7a4..198885d0e6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -19,11 +19,17 @@ package org.apache.hadoop.ozone.container.ozoneimpl; +import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -32,6 +38,8 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.utils.MetadataStore; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -40,7 +48,9 @@ import java.util.Random; import java.util.UUID; - +import java.util.HashMap; +import java.util.List; +import java.util.ArrayList; import static org.junit.Assert.assertEquals; @@ -52,7 +62,6 @@ public class TestOzoneContainer { @Rule public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration conf; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; @@ -60,6 +69,8 @@ public class TestOzoneContainer { private KeyValueContainerData keyValueContainerData; private KeyValueContainer keyValueContainer; private final DatanodeDetails datanodeDetails = createDatanodeDetails(); + private HashMap commitSpaceMap; //RootDir -> committed space + private final int NUM_TEST_CONTAINERS = 10; @Before public void setUp() throws Exception { @@ -68,6 +79,7 @@ public void setUp() throws Exception { .getAbsolutePath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); + commitSpaceMap = new HashMap(); } @Test @@ -78,16 +90,32 @@ public void testBuildContainerMap() throws Exception { // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { volume.format(UUID.randomUUID().toString()); + commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); } // Add containers to disk - for (int i=0; i<10; i++) { + for (int i=0; i < NUM_TEST_CONTAINERS; i++) { + long freeBytes = 0; + long volCommitBytes; + long maxCap = (long) StorageUnit.GB.toBytes(1); + + HddsVolume myVolume; + keyValueContainerData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), + maxCap, UUID.randomUUID().toString(), datanodeDetails.getUuidString()); keyValueContainer = new KeyValueContainer( keyValueContainerData, conf); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + myVolume = keyValueContainer.getContainerData().getVolume(); + + freeBytes = addBlocks(keyValueContainer, 2, 3); + + // update our expectation of volume committed space in the map + volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue(); + Preconditions.checkState(freeBytes >= 0); + commitSpaceMap.put(getVolumeKey(myVolume), + Long.valueOf(volCommitBytes + freeBytes)); } DatanodeStateMachine stateMachine = Mockito.mock( @@ -97,12 +125,65 @@ public void testBuildContainerMap() throws Exception { Mockito.when(context.getParent()).thenReturn(stateMachine); // When OzoneContainer is started, the containers from disk should be // loaded into the containerSet. + // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new OzoneContainer(datanodeDetails, conf, context, null); + ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(10, containerset.containerCount()); + assertEquals(NUM_TEST_CONTAINERS, containerset.containerCount()); + + verifyCommittedSpace(ozoneContainer); } + //verify committed space on each volume + private void verifyCommittedSpace(OzoneContainer oc) { + for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { + String key = getVolumeKey(dnVol); + long expectedCommit = commitSpaceMap.get(key).longValue(); + long volumeCommitted = dnVol.getCommittedBytes(); + assertEquals("Volume committed space not initialized correctly", + expectedCommit, volumeCommitted); + } + } + + private long addBlocks(KeyValueContainer container, + int blocks, int chunksPerBlock) throws Exception { + String strBlock = "block"; + String strChunk = "-chunkFile"; + int datalen = 65536; + long usedBytes = 0; + + long freeBytes = container.getContainerData().getMaxSize(); + long containerId = container.getContainerData().getContainerID(); + MetadataStore metadataStore = BlockUtils.getDB(container + .getContainerData(), conf); + + for (int bi = 0; bi < blocks; bi++) { + // Creating BlockData + BlockID blockID = new BlockID(containerId, bi); + BlockData blockData = new BlockData(blockID); + List chunkList = new ArrayList<>(); + + chunkList.clear(); + for (int ci = 0; ci < chunksPerBlock; ci++) { + String chunkName = strBlock + bi + strChunk + ci; + long offset = ci * datalen; + ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); + usedBytes += datalen; + chunkList.add(info.getProtoBufMessage()); + } + blockData.setChunks(chunkList); + metadataStore.put(Longs.toByteArray(blockID.getLocalID()), + blockData.getProtoBufMessage().toByteArray()); + } + + // remaining available capacity of the container + return (freeBytes - usedBytes); + } + + private String getVolumeKey(HddsVolume volume) { + return volume.getHddsRootDir().getPath(); + } private DatanodeDetails createDatanodeDetails() { Random random = new Random(); From 64c39856d2d3af762ab508c68057ce6eb654bd75 Mon Sep 17 00:00:00 2001 From: sdeka Date: Mon, 20 May 2019 08:13:37 +0530 Subject: [PATCH 2/5] Fixed checkstyle issues. --- .../hadoop/ozone/container/ozoneimpl/ContainerReader.java | 3 ++- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index d704bb7b5b..08a8f5d47f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -225,7 +225,8 @@ public void verifyContainerData(ContainerData containerData) } } - private void initializeUsedBytes(KeyValueContainer container) throws IOException { + private void initializeUsedBytes(KeyValueContainer container) + throws IOException { KeyValueBlockIterator blockIter = new KeyValueBlockIterator( container.getContainerData().getContainerID(), new File(container.getContainerData().getContainerPath())); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 198885d0e6..7cdb692597 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -70,7 +70,7 @@ public class TestOzoneContainer { private KeyValueContainer keyValueContainer; private final DatanodeDetails datanodeDetails = createDatanodeDetails(); private HashMap commitSpaceMap; //RootDir -> committed space - private final int NUM_TEST_CONTAINERS = 10; + private final int numTestContainers = 10; @Before public void setUp() throws Exception { @@ -94,7 +94,7 @@ public void testBuildContainerMap() throws Exception { } // Add containers to disk - for (int i=0; i < NUM_TEST_CONTAINERS; i++) { + for (int i = 0; i < numTestContainers; i++) { long freeBytes = 0; long volCommitBytes; long maxCap = (long) StorageUnit.GB.toBytes(1); @@ -130,7 +130,7 @@ public void testBuildContainerMap() throws Exception { OzoneContainer(datanodeDetails, conf, context, null); ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(NUM_TEST_CONTAINERS, containerset.containerCount()); + assertEquals(numTestContainers, containerset.containerCount()); verifyCommittedSpace(ozoneContainer); } From 456bb8a3e01bb41b02cc64a5cff149446668afab Mon Sep 17 00:00:00 2001 From: sdeka Date: Wed, 22 May 2019 09:34:21 +0530 Subject: [PATCH 3/5] verifyContainerData also does fixup, renamed. Added a Javadoc comment, both as per review discussion --- .../ozone/container/ozoneimpl/ContainerReader.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 08a8f5d47f..448a02f5e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -165,14 +165,21 @@ private void verifyContainerFile(long containerID, File containerFile) { "Skipping loading of this container.", containerFile); return; } - verifyContainerData(containerData); + verifyAndFixupContainerData(containerData); } catch (IOException ex) { LOG.error("Failed to parse ContainerFile for ContainerID: {}", containerID, ex); } } - public void verifyContainerData(ContainerData containerData) + /** + * verify ContainerData loaded from disk and fix-up stale members. + * Specifically blockCommitSequenceId, delete related metadata + * and bytesUsed + * @param containerData + * @throws IOException + */ + public void verifyAndFixupContainerData(ContainerData containerData) throws IOException { switch (containerData.getContainerType()) { case KeyValueContainer: From 72bef0f6cb58bf2f237be9d65f0d1f62b08a4524 Mon Sep 17 00:00:00 2001 From: sdeka Date: Thu, 23 May 2019 11:36:40 +0530 Subject: [PATCH 4/5] fixed merge error. adapted to new signature of BlockUtils getDB --- .../ozone/container/ozoneimpl/TestOzoneContainer.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7cdb692597..6c089021cb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -33,13 +33,13 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.utils.MetadataStore; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -155,7 +155,7 @@ private long addBlocks(KeyValueContainer container, long freeBytes = container.getContainerData().getMaxSize(); long containerId = container.getContainerData().getContainerID(); - MetadataStore metadataStore = BlockUtils.getDB(container + ContainerCache.ReferenceCountedDB db = BlockUtils.getDB(container .getContainerData(), conf); for (int bi = 0; bi < blocks; bi++) { @@ -173,7 +173,7 @@ private long addBlocks(KeyValueContainer container, chunkList.add(info.getProtoBufMessage()); } blockData.setChunks(chunkList); - metadataStore.put(Longs.toByteArray(blockID.getLocalID()), + db.getStore().put(Longs.toByteArray(blockID.getLocalID()), blockData.getProtoBufMessage().toByteArray()); } From ca93760504487f4e1a821585d5481b235ba9aaba Mon Sep 17 00:00:00 2001 From: sdeka Date: Thu, 23 May 2019 20:49:52 +0530 Subject: [PATCH 5/5] fixed checkstyle issue post merge --- .../hadoop/ozone/container/ozoneimpl/ContainerReader.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 2d14341653..37b726ddbb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -215,7 +215,7 @@ public void verifyAndFixupContainerData(ContainerData containerData) .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); } if (kvContainer.getContainerState() - == ContainerProtos.ContainerDataProto.State.OPEN) { + == ContainerProtos.ContainerDataProto.State.OPEN) { // commitSpace for Open Containers relies on usedBytes initializeUsedBytes(kvContainer); }