HDDS-312. Add blockIterator to Container. Contributed by Bharat Viswanadham.

This commit is contained in:
Xiaoyu Yao 2018-08-02 16:48:21 -07:00
parent 889df6f194
commit 40ab8ee597
3 changed files with 69 additions and 5 deletions

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import java.io.File;
import java.io.IOException;
import java.util.Map;
@ -103,4 +104,11 @@ void update(Map<String, String> metaData, boolean forceUpdate)
*/
void updateDeleteTransactionId(long deleteTransactionId);
/**
* Returns blockIterator for the container.
* @return BlockIterator
* @throws IOException
*/
BlockIterator blockIterator() throws IOException;
}

View File

@ -346,6 +346,12 @@ public void updateDeleteTransactionId(long deleteTransactionId) {
containerData.updateDeleteTransactionId(deleteTransactionId);
}
@Override
public KeyValueBlockIterator blockIterator() throws IOException{
return new KeyValueBlockIterator(containerData.getContainerID(), new File(
containerData.getContainerPath()));
}
/**
* Acquire read lock.
*/
@ -420,7 +426,7 @@ public File getContainerFile() {
}
/**
* Returns container DB file
* Returns container DB file.
* @return
*/
public File getContainerDBFile() {

View File

@ -18,22 +18,26 @@
package org.apache.hadoop.ozone.container.keyvalue;
import com.google.common.primitives.Longs;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume
.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.helpers
.KeyValueContainerLocationUtil;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.utils.MetadataStore;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -46,6 +50,8 @@
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.LinkedList;
import java.util.UUID;
import static org.apache.ratis.util.Preconditions.assertTrue;
@ -92,6 +98,50 @@ public void setUp() throws Exception {
}
@Test
public void testBlockIterator() throws Exception{
keyValueContainerData = new KeyValueContainerData(100L, 1);
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
KeyValueBlockIterator blockIterator = keyValueContainer.blockIterator();
//As no blocks created, hasNext should return false.
assertFalse(blockIterator.hasNext());
int blockCount = 10;
addBlocks(blockCount);
blockIterator = keyValueContainer.blockIterator();
assertTrue(blockIterator.hasNext());
KeyData keyData;
int blockCounter = 0;
while(blockIterator.hasNext()) {
keyData = blockIterator.nextBlock();
assertEquals(blockCounter++, keyData.getBlockID().getLocalID());
}
assertEquals(blockCount, blockCounter);
}
private void addBlocks(int count) throws Exception {
long containerId = keyValueContainerData.getContainerID();
MetadataStore metadataStore = KeyUtils.getDB(keyValueContainer
.getContainerData(), conf);
for (int i=0; i < count; i++) {
// Creating KeyData
BlockID blockID = new BlockID(containerId, i);
KeyData keyData = new KeyData(blockID);
keyData.addMetadata("VOLUME", "ozone");
keyData.addMetadata("OWNER", "hdfs");
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
.getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage());
keyData.setChunks(chunkList);
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
.getProtoBufMessage().toByteArray());
}
}
@Test
public void testCreateContainer() throws Exception {
@ -113,8 +163,8 @@ public void testCreateContainer() throws Exception {
//Check whether container file and container db file exists or not.
assertTrue(keyValueContainer.getContainerFile().exists(),
".Container File does not exist");
assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container DB does " +
"not exist");
assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " +
"DB does not exist");
}
@Test