HDDS-176. Add keyCount and container maximum size to ContainerData. Contributed by Bharat Viswanadham.

This commit is contained in:
Hanisha Koneru 2018-07-03 09:53:41 -07:00
parent 44b091a8d7
commit e1f4b3b560
19 changed files with 147 additions and 41 deletions

View File

@ -53,12 +53,15 @@ public class ContainerData {
// State of the Container
private ContainerLifeCycleState state;
private final int maxSizeGB;
/** parameters for read/write statistics on the container. **/
private final AtomicLong readBytes;
private final AtomicLong writeBytes;
private final AtomicLong readCount;
private final AtomicLong writeCount;
private final AtomicLong bytesUsed;
private final AtomicLong keyCount;
private HddsVolume volume;
@ -67,8 +70,9 @@ public class ContainerData {
* Creates a ContainerData Object, which holds metadata of the container.
* @param type - ContainerType
* @param containerId - ContainerId
* @param size - container maximum size
*/
public ContainerData(ContainerType type, long containerId) {
public ContainerData(ContainerType type, long containerId, int size) {
this.containerType = type;
this.containerId = containerId;
this.layOutVersion = ChunkLayOutVersion.getLatestVersion().getVersion();
@ -79,6 +83,8 @@ public ContainerData(ContainerType type, long containerId) {
this.writeCount = new AtomicLong(0L);
this.writeBytes = new AtomicLong(0L);
this.bytesUsed = new AtomicLong(0L);
this.keyCount = new AtomicLong(0L);
this.maxSizeGB = size;
}
/**
@ -86,9 +92,10 @@ public ContainerData(ContainerType type, long containerId) {
* @param type - ContainerType
* @param containerId - ContainerId
* @param layOutVersion - Container layOutVersion
* @param size - Container maximum size
*/
public ContainerData(ContainerType type, long containerId, int
layOutVersion) {
layOutVersion, int size) {
this.containerType = type;
this.containerId = containerId;
this.layOutVersion = layOutVersion;
@ -99,6 +106,8 @@ public ContainerData(ContainerType type, long containerId, int
this.writeCount = new AtomicLong(0L);
this.writeBytes = new AtomicLong(0L);
this.bytesUsed = new AtomicLong(0L);
this.keyCount = new AtomicLong(0L);
this.maxSizeGB = size;
}
/**
@ -133,6 +142,14 @@ public synchronized void setState(ContainerLifeCycleState state) {
this.state = state;
}
/**
* Return's maximum size of the container in GB.
* @return maxSizeGB
*/
public int getMaxSizeGB() {
return maxSizeGB;
}
/**
* Returns the layOutVersion of the actual container data format.
* @return layOutVersion
@ -309,5 +326,34 @@ public HddsVolume getVolume() {
return volume;
}
/**
* Increments the number of keys in the container.
*/
public void incrKeyCount() {
this.keyCount.incrementAndGet();
}
/**
* Decrements number of keys in the container.
*/
public void decrKeyCount() {
this.keyCount.decrementAndGet();
}
/**
* Returns number of keys in the container.
* @return key count
*/
public long getKeyCount() {
return this.keyCount.get();
}
/**
* Set's number of keys in the container.
* @param count
*/
public void setKeyCount(long count) {
this.keyCount.set(count);
}
}

View File

@ -32,6 +32,7 @@
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.io.File;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
import java.util.Map;
@ -47,7 +48,6 @@
import org.yaml.snakeyaml.nodes.Tag;
import org.yaml.snakeyaml.representer.Representer;
import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.YAML_FIELDS;
import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.YAML_TAG;
/**
@ -150,10 +150,11 @@ protected Set<Property> getProperties(Class<? extends Object> type)
// When a new Container type is added, we need to add what fields need
// to be filtered here
if (type.equals(KeyValueContainerData.class)) {
List<String> yamlFields = KeyValueContainerData.getYamlFields();
// filter properties
for (Property prop : set) {
String name = prop.getName();
if (YAML_FIELDS.contains(name)) {
if (yamlFields.contains(name)) {
filtered.add(prop);
}
}
@ -183,9 +184,12 @@ public Object construct(Node node) {
long layOutVersion = (long) nodes.get("layOutVersion");
int lv = (int) layOutVersion;
long size = (long) nodes.get("maxSizeGB");
int maxSize = (int) size;
//When a new field is added, it needs to be added here.
KeyValueContainerData kvData = new KeyValueContainerData((long) nodes
.get("containerId"), lv);
.get("containerId"), lv, maxSize);
kvData.setContainerDBType((String)nodes.get("containerDBType"));
kvData.setMetadataPath((String) nodes.get(
"metadataPath"));

View File

@ -18,7 +18,6 @@
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
import org.apache.hadoop.ozone.OzoneConsts;
@ -30,7 +29,6 @@
import org.apache.hadoop.ozone.protocol.VersionResponse;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
@ -67,7 +65,6 @@ public EndpointStateMachine.EndPointStates call() throws Exception {
rpcEndPoint.setVersion(response);
VolumeSet volumeSet = ozoneContainer.getVolumeSet();
Map<String, HddsVolume> volumeMap = volumeSet.getVolumeMap();
List<HddsProtos.KeyValue> keyValues = versionResponse.getKeysList();
String scmId = response.getValue(OzoneConsts.SCM_ID);
String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);

View File

@ -25,7 +25,6 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.io.IOUtils;
@ -84,7 +83,6 @@ public class KeyValueContainer implements Container {
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final KeyValueContainerData containerData;
private long containerMaxSize;
private Configuration config;
public KeyValueContainer(KeyValueContainerData containerData, Configuration
@ -95,9 +93,6 @@ public KeyValueContainer(KeyValueContainerData containerData, Configuration
"be null");
this.config = ozoneConfig;
this.containerData = containerData;
this.containerMaxSize = (long) ozoneConfig.getInt(ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_GB, ScmConfigKeys
.OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024L * 1024L * 1024L;
}
@Override
@ -111,9 +106,10 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
File containerMetaDataPath = null;
//acquiring volumeset lock and container lock
volumeSet.acquireLock();
long maxSize = (containerData.getMaxSizeGB() * 1024L * 1024L * 1024L);
try {
HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
.getVolumesList(), containerMaxSize);
.getVolumesList(), maxSize);
String containerBasePath = containerVolume.getHddsRootDir().toString();
long containerId = containerData.getContainerId();

View File

@ -39,9 +39,9 @@ public class KeyValueContainerData extends ContainerData {
public static final Tag YAML_TAG = new Tag("KeyValueContainerData");
// Fields need to be stored in .container file.
public static final List<String> YAML_FIELDS = Lists.newArrayList(
private static final List<String> YAML_FIELDS = Lists.newArrayList(
"containerType", "containerId", "layOutVersion", "state", "metadata",
"metadataPath", "chunksPath", "containerDBType");
"metadataPath", "chunksPath", "containerDBType", "maxSizeGB");
// Path to Container metadata Level DB/RocksDB Store and .container file.
private String metadataPath;
@ -60,9 +60,10 @@ public class KeyValueContainerData extends ContainerData {
/**
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param size - maximum size of the container
*/
public KeyValueContainerData(long id) {
super(ContainerProtos.ContainerType.KeyValueContainer, id);
public KeyValueContainerData(long id, int size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, size);
this.numPendingDeletionBlocks = 0;
}
@ -70,10 +71,11 @@ public KeyValueContainerData(long id) {
* Constructs KeyValueContainerData object.
* @param id - ContainerId
* @param layOutVersion
* @param size - maximum size of the container
*/
public KeyValueContainerData(long id,
int layOutVersion) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion);
public KeyValueContainerData(long id, int layOutVersion, int size) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion,
size);
this.numPendingDeletionBlocks = 0;
}
@ -205,4 +207,8 @@ public ContainerProtos.ContainerData getProtoBufMessage() {
return builder.build();
}
public static List<String> getYamlFields() {
return YAML_FIELDS;
}
}

View File

@ -41,6 +41,7 @@
.PutSmallFileRequestProto;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.Type;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
@ -104,6 +105,7 @@ public class KeyValueHandler extends Handler {
private final KeyManager keyManager;
private final ChunkManager chunkManager;
private VolumeChoosingPolicy volumeChoosingPolicy;
private final int maxContainerSizeGB;
// TODO : Add metrics and populate it.
@ -125,6 +127,8 @@ private KeyValueHandler(Configuration config, ContainerSet contSet,
chunkManager = new ChunkManagerImpl();
// TODO: Add supoort for different volumeChoosingPolicies.
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
maxContainerSizeGB = config.getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
}
@Override
@ -207,7 +211,7 @@ ContainerCommandResponseProto handleCreateContainer(
}
KeyValueContainerData newContainerData = new KeyValueContainerData(
containerID);
containerID, maxContainerSizeGB);
// TODO: Add support to add metadataList to ContainerData. Add metadata
// to container during creation.
KeyValueContainer newContainer = new KeyValueContainer(
@ -565,8 +569,8 @@ ContainerCommandResponseProto handlePutSmallFile(
try {
checkContainerOpen(kvContainer);
BlockID blockID = BlockID.getFromProtobuf(
putSmallFileReq.getKey().getKeyData().getBlockID());
BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getKey()
.getKeyData().getBlockID());
KeyData keyData = KeyData.getFromProtoBuf(
putSmallFileReq.getKey().getKeyData());
Preconditions.checkNotNull(keyData);
@ -613,8 +617,8 @@ ContainerCommandResponseProto handleGetSmallFile(
GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
try {
BlockID blockID = BlockID.getFromProtobuf(
getSmallFileReq.getKey().getBlockID());
BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getKey()
.getBlockID());
KeyData responseData = keyManager.getKey(kvContainer, blockID);
ContainerProtos.ChunkInfo chunkInfo = null;

View File

@ -302,6 +302,7 @@ public static void parseKeyValueContainerData(
}
}).sum();
containerData.setBytesUsed(bytesUsed);
containerData.setKeyCount(liveKeys.size());
}
}

View File

@ -84,6 +84,9 @@ public void putKey(Container container, KeyData data) throws IOException {
Preconditions.checkNotNull(db, "DB cannot be null here");
db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
.toByteArray());
// Increment keycount here
container.getContainerData().incrKeyCount();
}
/**
@ -148,6 +151,9 @@ public void deleteKey(Container container, BlockID blockID) throws
NO_SUCH_KEY);
}
db.delete(kKey);
// Decrement keycount here
container.getContainerData().decrKeyCount();
}
/**

View File

@ -0,0 +1,22 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.container.keyvalue.impl;
/**
This package contains chunk manager and key manager implementation for
keyvalue container type.
**/

View File

@ -109,11 +109,9 @@ public boolean accept(File pathname) {
.getContainerFile(metadataPath, containerName);
File checksumFile = KeyValueContainerLocationUtil
.getContainerCheckSumFile(metadataPath, containerName);
File dbFile = KeyValueContainerLocationUtil
.getContainerDBFile(metadataPath, containerName);
if (containerFile.exists() && checksumFile.exists() &&
dbFile.exists()) {
verifyContainerFile(containerFile, checksumFile, dbFile);
if (containerFile.exists() && checksumFile.exists()) {
verifyContainerFile(containerName, containerFile,
checksumFile);
} else {
LOG.error("Missing container metadata files for Container: " +
"{}", containerName);
@ -129,8 +127,8 @@ public boolean accept(File pathname) {
}
}
private void verifyContainerFile(File containerFile, File checksumFile,
File dbFile) {
private void verifyContainerFile(String containerName, File containerFile,
File checksumFile) {
try {
ContainerData containerData = ContainerDataYaml.readContainerFile(
containerFile);
@ -139,6 +137,15 @@ private void verifyContainerFile(File containerFile, File checksumFile,
case KeyValueContainer:
KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
containerData;
File dbFile = KeyValueContainerLocationUtil
.getContainerDBFile(new File(containerFile.getParent()),
containerName);
if (!dbFile.exists()) {
LOG.error("Container DB file is missing for Container {}, skipping " +
"this", containerName);
// Don't further process this container, as it is missing db file.
return;
}
KeyValueContainerUtil.parseKeyValueContainerData(keyValueContainerData,
containerFile, checksumFile, dbFile, config);
KeyValueContainer keyValueContainer = new KeyValueContainer(

View File

@ -31,6 +31,7 @@
*/
public class TestKeyValueContainerData {
private static final int MAXSIZE = 5;
@Test
public void testKeyValueData() {
long containerId = 1L;
@ -42,7 +43,8 @@ public void testKeyValueData() {
.ContainerLifeCycleState.CLOSED;
AtomicLong val = new AtomicLong(0);
KeyValueContainerData kvData = new KeyValueContainerData(containerId);
KeyValueContainerData kvData = new KeyValueContainerData(containerId,
MAXSIZE);
assertEquals(containerType, kvData.getContainerType());
assertEquals(containerId, kvData.getContainerId());
@ -54,6 +56,8 @@ public void testKeyValueData() {
assertEquals(val.get(), kvData.getWriteBytes());
assertEquals(val.get(), kvData.getReadCount());
assertEquals(val.get(), kvData.getWriteCount());
assertEquals(val.get(), kvData.getKeyCount());
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
kvData.setState(state);
kvData.setContainerDBType(containerDBType);
@ -63,6 +67,7 @@ public void testKeyValueData() {
kvData.incrWriteBytes(10);
kvData.incrReadCount();
kvData.incrWriteCount();
kvData.incrKeyCount();
assertEquals(state, kvData.getState());
assertEquals(containerDBType, kvData.getContainerDBType());
@ -73,6 +78,7 @@ public void testKeyValueData() {
assertEquals(10, kvData.getWriteBytes());
assertEquals(1, kvData.getReadCount());
assertEquals(1, kvData.getWriteCount());
assertEquals(1, kvData.getKeyCount());
}

View File

@ -37,6 +37,7 @@
*/
public class TestContainerDataYaml {
private static final int MAXSIZE = 5;
@Test
public void testCreateContainerFile() throws IOException {
String path = new FileSystemTestHelper().getTestRootDir();
@ -45,7 +46,8 @@ public void testCreateContainerFile() throws IOException {
File filePath = new File(new FileSystemTestHelper().getTestRootDir());
filePath.mkdirs();
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(Long.MAX_VALUE);
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
Long.MAX_VALUE, MAXSIZE);
keyValueContainerData.setContainerDBType("RocksDB");
keyValueContainerData.setMetadataPath(path);
keyValueContainerData.setChunksPath(path);
@ -72,6 +74,7 @@ public void testCreateContainerFile() throws IOException {
.getState());
assertEquals(1, kvData.getLayOutVersion());
assertEquals(0, kvData.getMetadata().size());
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
// Update ContainerData.
kvData.addMetadata("VOLUME", "hdfs");
@ -101,6 +104,7 @@ public void testCreateContainerFile() throws IOException {
assertEquals(2, kvData.getMetadata().size());
assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
assertEquals("ozone", kvData.getMetadata().get("OWNER"));
assertEquals(MAXSIZE, kvData.getMaxSizeGB());
FileUtil.fullyDelete(filePath);

View File

@ -53,7 +53,7 @@ public void testAddGetRemoveContainer() throws StorageContainerException {
ContainerProtos.ContainerLifeCycleState state = ContainerProtos
.ContainerLifeCycleState.CLOSED;
KeyValueContainerData kvData = new KeyValueContainerData(containerId);
KeyValueContainerData kvData = new KeyValueContainerData(containerId, 5);
kvData.setState(state);
KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
OzoneConfiguration());
@ -163,7 +163,7 @@ public void testListContainer() throws StorageContainerException {
private ContainerSet createContainerSet() throws StorageContainerException {
ContainerSet containerSet = new ContainerSet();
for (int i=0; i<10; i++) {
KeyValueContainerData kvData = new KeyValueContainerData(i);
KeyValueContainerData kvData = new KeyValueContainerData(i, 5);
if (i%2 == 0) {
kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
} else {

View File

@ -79,7 +79,7 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);

View File

@ -79,7 +79,7 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainer = new KeyValueContainer(
keyValueContainerData, config);
@ -104,9 +104,11 @@ public void setUp() throws Exception {
@Test
public void testPutAndGetKey() throws Exception {
assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
//Put Key
keyManager.putKey(keyValueContainer, keyData);
assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
//Get Key
KeyData fromGetKeyData = keyManager.getKey(keyValueContainer,
keyData.getBlockID());
@ -123,10 +125,13 @@ public void testPutAndGetKey() throws Exception {
@Test
public void testDeleteKey() throws Exception {
try {
assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
//Put Key
keyManager.putKey(keyValueContainer, keyData);
assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
//Delete Key
keyManager.deleteKey(keyValueContainer, blockID);
assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
try {
keyManager.getKey(keyValueContainer, blockID);
fail("testDeleteKey");

View File

@ -86,7 +86,7 @@ public void setUp() throws Exception {
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
.thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L);
keyValueContainerData = new KeyValueContainerData(1L, 5);
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);

View File

@ -66,7 +66,7 @@ public void setUp() throws Exception {
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
for (int i=0; i<10; i++) {
keyValueContainerData = new KeyValueContainerData(i);
keyValueContainerData = new KeyValueContainerData(i, 1);
keyValueContainer = new KeyValueContainer(
keyValueContainerData, conf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);

View File

@ -5,6 +5,7 @@ containerId: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
layOutVersion: 1
maxSizeGB: 5
metadata: {OWNER: ozone, VOLUME: hdfs}
state: CLOSED
aclEnabled: true

View File

@ -5,5 +5,6 @@ containerId: 9223372036854775807
containerType: KeyValueContainer
metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1
layOutVersion: 1
maxSizeGB: 5
metadata: {OWNER: ozone, VOLUME: hdfs}
state: INVALID