HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham.
This commit is contained in:
parent
c7ebcd76bf
commit
c835fc08ad
@ -24,7 +24,7 @@
|
||||
* Iterator for MetaDataStore DB.
|
||||
* @param <T>
|
||||
*/
|
||||
interface MetaStoreIterator<T> extends Iterator<T> {
|
||||
public interface MetaStoreIterator<T> extends Iterator<T> {
|
||||
|
||||
/**
|
||||
* seek to first entry.
|
||||
|
@ -0,0 +1,57 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.interfaces;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Block Iterator for container. Each container type need to implement this
|
||||
* interface.
|
||||
* @param <T>
|
||||
*/
|
||||
public interface BlockIterator<T> {
|
||||
|
||||
/**
|
||||
* This checks if iterator has next element. If it has returns true,
|
||||
* otherwise false.
|
||||
* @return boolean
|
||||
*/
|
||||
boolean hasNext() throws IOException;
|
||||
|
||||
/**
|
||||
* Seek to first entry.
|
||||
*/
|
||||
void seekToFirst();
|
||||
|
||||
/**
|
||||
* Seek to last entry.
|
||||
*/
|
||||
void seekToLast();
|
||||
|
||||
/**
|
||||
* Get next block in the container.
|
||||
* @return next block or null if there are no blocks
|
||||
* @throws IOException
|
||||
*/
|
||||
T nextBlock() throws IOException, NoSuchElementException;
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.keyvalue;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
|
||||
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
|
||||
import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
|
||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
|
||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
|
||||
import org.apache.hadoop.utils.MetaStoreIterator;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
|
||||
import org.apache.hadoop.utils.MetadataStore;
|
||||
import org.apache.hadoop.utils.MetadataStore.KeyValue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
|
||||
/**
|
||||
* Block Iterator for KeyValue Container. This block iterator returns blocks
|
||||
* which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no
|
||||
* filter is specified, then default filter used is
|
||||
* {@link MetadataKeyFilters#getNormalKeyFilter()}
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
public class KeyValueBlockIterator implements BlockIterator<KeyData> {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(
|
||||
KeyValueBlockIterator.class);
|
||||
|
||||
private MetaStoreIterator<KeyValue> blockIterator;
|
||||
private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
|
||||
.getNormalKeyFilter();
|
||||
private KeyPrefixFilter blockFilter;
|
||||
private KeyData nextBlock;
|
||||
private long containerId;
|
||||
|
||||
/**
|
||||
* KeyValueBlockIterator to iterate blocks in a container.
|
||||
* @param id - container id
|
||||
* @param path - container base path
|
||||
* @throws IOException
|
||||
*/
|
||||
|
||||
public KeyValueBlockIterator(long id, File path)
|
||||
throws IOException {
|
||||
this(id, path, defaultBlockFilter);
|
||||
}
|
||||
|
||||
/**
|
||||
* KeyValueBlockIterator to iterate blocks in a container.
|
||||
* @param id - container id
|
||||
* @param path - container base path
|
||||
* @param filter - Block filter, filter to be applied for blocks
|
||||
* @throws IOException
|
||||
*/
|
||||
public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter)
|
||||
throws IOException {
|
||||
containerId = id;
|
||||
File metdataPath = new File(path, OzoneConsts.METADATA);
|
||||
File containerFile = ContainerUtils.getContainerFile(metdataPath
|
||||
.getParentFile());
|
||||
ContainerData containerData = ContainerDataYaml.readContainerFile(
|
||||
containerFile);
|
||||
KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
|
||||
containerData;
|
||||
keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
|
||||
.getContainerDBFile(metdataPath, containerId));
|
||||
MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, new
|
||||
OzoneConfiguration());
|
||||
blockIterator = metadataStore.iterator();
|
||||
blockFilter = filter;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method returns blocks matching with the filter.
|
||||
* @return next block or null if no more blocks
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
public KeyData nextBlock() throws IOException, NoSuchElementException {
|
||||
if (nextBlock != null) {
|
||||
KeyData currentBlock = nextBlock;
|
||||
nextBlock = null;
|
||||
return currentBlock;
|
||||
}
|
||||
if(hasNext()) {
|
||||
return nextBlock();
|
||||
}
|
||||
throw new NoSuchElementException("Block Iterator reached end for " +
|
||||
"ContainerID " + containerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() throws IOException {
|
||||
if (nextBlock != null) {
|
||||
return true;
|
||||
}
|
||||
if (blockIterator.hasNext()) {
|
||||
KeyValue block = blockIterator.next();
|
||||
if (blockFilter.filterKey(null, block.getKey(), null)) {
|
||||
nextBlock = KeyUtils.getKeyData(block.getValue());
|
||||
LOG.trace("Block matching with filter found: blockID is : {} for " +
|
||||
"containerID {}", nextBlock.getLocalID(), containerId);
|
||||
return true;
|
||||
}
|
||||
hasNext();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seekToFirst() {
|
||||
nextBlock = null;
|
||||
blockIterator.seekToFirst();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seekToLast() {
|
||||
nextBlock = null;
|
||||
blockIterator.seekToLast();
|
||||
}
|
||||
}
|
@ -0,0 +1,275 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.keyvalue;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdds.client.BlockID;
|
||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.ozone.OzoneConsts;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
|
||||
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
|
||||
import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
|
||||
import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
|
||||
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.utils.MetadataKeyFilters;
|
||||
import org.apache.hadoop.utils.MetadataStore;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.UUID;
|
||||
|
||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys
|
||||
.OZONE_METADATA_STORE_IMPL_LEVELDB;
|
||||
import static org.apache.hadoop.ozone.OzoneConfigKeys
|
||||
.OZONE_METADATA_STORE_IMPL_ROCKSDB;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* This class is used to test KeyValue container block iterator.
|
||||
*/
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestKeyValueBlockIterator {
|
||||
|
||||
private KeyValueContainer container;
|
||||
private KeyValueContainerData containerData;
|
||||
private VolumeSet volumeSet;
|
||||
private Configuration conf;
|
||||
private File testRoot;
|
||||
|
||||
private final String storeImpl;
|
||||
|
||||
public TestKeyValueBlockIterator(String metadataImpl) {
|
||||
this.storeImpl = metadataImpl;
|
||||
}
|
||||
|
||||
@Parameterized.Parameters
|
||||
public static Collection<Object[]> data() {
|
||||
return Arrays.asList(new Object[][] {
|
||||
{OZONE_METADATA_STORE_IMPL_LEVELDB},
|
||||
{OZONE_METADATA_STORE_IMPL_ROCKSDB}});
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
testRoot = GenericTestUtils.getRandomizedTestDir();
|
||||
conf = new OzoneConfiguration();
|
||||
conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
|
||||
conf.set(OZONE_METADATA_STORE_IMPL, storeImpl);
|
||||
volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
|
||||
}
|
||||
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
volumeSet.shutdown();
|
||||
FileUtil.fullyDelete(testRoot);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
|
||||
|
||||
long containerID = 100L;
|
||||
int deletedBlocks = 5;
|
||||
int normalBlocks = 5;
|
||||
createContainerWithBlocks(containerID, normalBlocks, deletedBlocks);
|
||||
String containerPath = new File(containerData.getMetadataPath())
|
||||
.getParent();
|
||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||
containerID, new File(containerPath));
|
||||
|
||||
int counter = 0;
|
||||
while(keyValueBlockIterator.hasNext()) {
|
||||
KeyData keyData = keyValueBlockIterator.nextBlock();
|
||||
assertEquals(keyData.getLocalID(), counter++);
|
||||
}
|
||||
|
||||
assertFalse(keyValueBlockIterator.hasNext());
|
||||
|
||||
keyValueBlockIterator.seekToFirst();
|
||||
counter = 0;
|
||||
while(keyValueBlockIterator.hasNext()) {
|
||||
KeyData keyData = keyValueBlockIterator.nextBlock();
|
||||
assertEquals(keyData.getLocalID(), counter++);
|
||||
}
|
||||
assertFalse(keyValueBlockIterator.hasNext());
|
||||
|
||||
try {
|
||||
keyValueBlockIterator.nextBlock();
|
||||
} catch (NoSuchElementException ex) {
|
||||
GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
|
||||
"for ContainerID " + containerID, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueBlockIteratorWithNextBlock() throws Exception {
|
||||
long containerID = 101L;
|
||||
createContainerWithBlocks(containerID, 2, 0);
|
||||
String containerPath = new File(containerData.getMetadataPath())
|
||||
.getParent();
|
||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||
containerID, new File(containerPath));
|
||||
long blockID = 0L;
|
||||
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
|
||||
try {
|
||||
keyValueBlockIterator.nextBlock();
|
||||
} catch (NoSuchElementException ex) {
|
||||
GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
|
||||
"for ContainerID " + containerID, ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueBlockIteratorWithHasNext() throws Exception {
|
||||
long containerID = 102L;
|
||||
createContainerWithBlocks(containerID, 2, 0);
|
||||
String containerPath = new File(containerData.getMetadataPath())
|
||||
.getParent();
|
||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||
containerID, new File(containerPath));
|
||||
long blockID = 0L;
|
||||
|
||||
// Even calling multiple times hasNext() should not move entry forward.
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
|
||||
keyValueBlockIterator.seekToLast();
|
||||
assertTrue(keyValueBlockIterator.hasNext());
|
||||
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
|
||||
keyValueBlockIterator.seekToFirst();
|
||||
blockID = 0L;
|
||||
assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID());
|
||||
|
||||
try {
|
||||
keyValueBlockIterator.nextBlock();
|
||||
} catch (NoSuchElementException ex) {
|
||||
GenericTestUtils.assertExceptionContains("Block Iterator reached end " +
|
||||
"for ContainerID " + containerID, ex);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueBlockIteratorWithFilter() throws Exception {
|
||||
long containerId = 103L;
|
||||
int deletedBlocks = 5;
|
||||
int normalBlocks = 5;
|
||||
createContainerWithBlocks(containerId, normalBlocks, deletedBlocks);
|
||||
String containerPath = new File(containerData.getMetadataPath())
|
||||
.getParent();
|
||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||
containerId, new File(containerPath), MetadataKeyFilters
|
||||
.getDeletingKeyFilter());
|
||||
|
||||
int counter = 5;
|
||||
while(keyValueBlockIterator.hasNext()) {
|
||||
KeyData keyData = keyValueBlockIterator.nextBlock();
|
||||
assertEquals(keyData.getLocalID(), counter++);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws
|
||||
Exception {
|
||||
long containerId = 104L;
|
||||
createContainerWithBlocks(containerId, 0, 5);
|
||||
String containerPath = new File(containerData.getMetadataPath())
|
||||
.getParent();
|
||||
KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
|
||||
containerId, new File(containerPath));
|
||||
//As all blocks are deleted blocks, blocks does not match with normal key
|
||||
// filter.
|
||||
assertFalse(keyValueBlockIterator.hasNext());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a container with specified number of normal blocks and deleted
|
||||
* blocks. First it will insert normal blocks, and then it will insert
|
||||
* deleted blocks.
|
||||
* @param containerId
|
||||
* @param normalBlocks
|
||||
* @param deletedBlocks
|
||||
* @throws Exception
|
||||
*/
|
||||
private void createContainerWithBlocks(long containerId, int
|
||||
normalBlocks, int deletedBlocks) throws
|
||||
Exception {
|
||||
containerData = new KeyValueContainerData(containerId, 1);
|
||||
container = new KeyValueContainer(containerData, conf);
|
||||
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
|
||||
.randomUUID().toString());
|
||||
MetadataStore metadataStore = KeyUtils.getDB(containerData, conf);
|
||||
|
||||
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
|
||||
ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
|
||||
chunkList.add(info.getProtoBufMessage());
|
||||
|
||||
for (int i=0; i<normalBlocks; i++) {
|
||||
BlockID blockID = new BlockID(containerId, i);
|
||||
KeyData keyData = new KeyData(blockID);
|
||||
keyData.setChunks(chunkList);
|
||||
metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
|
||||
.getProtoBufMessage().toByteArray());
|
||||
}
|
||||
|
||||
for (int i=normalBlocks; i<deletedBlocks; i++) {
|
||||
BlockID blockID = new BlockID(containerId, i);
|
||||
KeyData keyData = new KeyData(blockID);
|
||||
keyData.setChunks(chunkList);
|
||||
metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
|
||||
.DELETING_KEY_PREFIX + blockID.getLocalID()), keyData
|
||||
.getProtoBufMessage().toByteArray());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user