HDDS-90: Create ContainerData, Container classes. Contributed by Bharat Viswanadham
This commit is contained in:
parent
ee1e0e2036
commit
6cd19b45ef
@ -232,6 +232,14 @@ message ContainerData {
|
||||
optional string containerDBType = 11;
|
||||
}
|
||||
|
||||
// This is used for create Container Request.
|
||||
message CreateContainerData {
|
||||
required int64 containerId = 1;
|
||||
repeated KeyValue metadata = 2;
|
||||
optional ContainerType containerType = 3 [default = KeyValueContainer];
|
||||
}
|
||||
|
||||
|
||||
enum ContainerType {
|
||||
KeyValueContainer = 1;
|
||||
}
|
||||
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.impl;
|
||||
|
||||
|
||||
/**
|
||||
* Defines layout versions for the Chunks.
|
||||
*/
|
||||
|
||||
public final class ChunkLayOutVersion {
|
||||
|
||||
private final static ChunkLayOutVersion[] CHUNK_LAYOUT_VERSION_INFOS =
|
||||
{new ChunkLayOutVersion(1, "Data without checksums.")};
|
||||
|
||||
private int version;
|
||||
private String description;
|
||||
|
||||
|
||||
/**
|
||||
* Never created outside this class.
|
||||
*
|
||||
* @param description -- description
|
||||
* @param version -- version number
|
||||
*/
|
||||
private ChunkLayOutVersion(int version, String description) {
|
||||
this.version = version;
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all versions.
|
||||
*
|
||||
* @return Version info array.
|
||||
*/
|
||||
public static ChunkLayOutVersion[] getAllVersions() {
|
||||
return CHUNK_LAYOUT_VERSION_INFOS.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the latest version.
|
||||
*
|
||||
* @return versionInfo
|
||||
*/
|
||||
public static ChunkLayOutVersion getLatestVersion() {
|
||||
return CHUNK_LAYOUT_VERSION_INFOS[CHUNK_LAYOUT_VERSION_INFOS.length - 1];
|
||||
}
|
||||
|
||||
/**
|
||||
* Return version.
|
||||
*
|
||||
* @return int
|
||||
*/
|
||||
public int getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns description.
|
||||
* @return String
|
||||
*/
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.ozone.container.common.impl;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
|
||||
ContainerType;
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
|
||||
ContainerLifeCycleState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* ContainerData is the in-memory representation of container metadata and is
|
||||
* represented on disk by the .container file.
|
||||
*/
|
||||
public class ContainerData {
|
||||
|
||||
//Type of the container.
|
||||
// For now, we support only KeyValueContainer.
|
||||
private final ContainerType containerType;
|
||||
|
||||
// Unique identifier for the container
|
||||
private final long containerId;
|
||||
|
||||
// Layout version of the container data
|
||||
private final ChunkLayOutVersion layOutVersion;
|
||||
|
||||
// Metadata of the container will be a key value pair.
|
||||
// This can hold information like volume name, owner etc.,
|
||||
private final Map<String, String> metadata;
|
||||
|
||||
// State of the Container
|
||||
private ContainerLifeCycleState state;
|
||||
|
||||
/** parameters for read/write statistics on the container. **/
|
||||
private final AtomicLong readBytes;
|
||||
private final AtomicLong writeBytes;
|
||||
private final AtomicLong readCount;
|
||||
private final AtomicLong writeCount;
|
||||
|
||||
|
||||
/**
|
||||
* Creates a ContainerData Object, which holds metadata of the container.
|
||||
* @param type - ContainerType
|
||||
* @param containerId - ContainerId
|
||||
*/
|
||||
public ContainerData(ContainerType type, long containerId) {
|
||||
this.containerType = type;
|
||||
this.containerId = containerId;
|
||||
this.layOutVersion = ChunkLayOutVersion.getLatestVersion();
|
||||
this.metadata = new TreeMap<>();
|
||||
this.state = ContainerLifeCycleState.OPEN;
|
||||
this.readCount = new AtomicLong(0L);
|
||||
this.readBytes = new AtomicLong(0L);
|
||||
this.writeCount = new AtomicLong(0L);
|
||||
this.writeBytes = new AtomicLong(0L);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the containerId.
|
||||
*/
|
||||
public long getContainerId() {
|
||||
return containerId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the type of the container.
|
||||
* @return ContainerType
|
||||
*/
|
||||
public ContainerType getContainerType() {
|
||||
return containerType;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the state of the container.
|
||||
* @return ContainerLifeCycleState
|
||||
*/
|
||||
public synchronized ContainerLifeCycleState getState() {
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the state of the container.
|
||||
* @param state
|
||||
*/
|
||||
public synchronized void setState(ContainerLifeCycleState state) {
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the layOutVersion of the actual container data format.
|
||||
* @return layOutVersion
|
||||
*/
|
||||
public ChunkLayOutVersion getLayOutVersion() {
|
||||
return layOutVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds metadata.
|
||||
*/
|
||||
public void addMetadata(String key, String value) throws IOException {
|
||||
synchronized (this.metadata) {
|
||||
if (this.metadata.containsKey(key)) {
|
||||
throw new IOException("This key already exists. Key " + key);
|
||||
}
|
||||
metadata.put(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retuns metadata of the container.
|
||||
* @return metadata
|
||||
*/
|
||||
public Map<String, String> getMetadata() {
|
||||
synchronized (this.metadata) {
|
||||
return Collections.unmodifiableMap(this.metadata);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the container is open.
|
||||
* @return - boolean
|
||||
*/
|
||||
public synchronized boolean isOpen() {
|
||||
return ContainerLifeCycleState.OPEN == state;
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the container is invalid.
|
||||
* @return - boolean
|
||||
*/
|
||||
public synchronized boolean isValid() {
|
||||
return !(ContainerLifeCycleState.INVALID == state);
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if the container is closed.
|
||||
* @return - boolean
|
||||
*/
|
||||
public synchronized boolean isClosed() {
|
||||
return ContainerLifeCycleState.CLOSED == state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks this container as closed.
|
||||
*/
|
||||
public synchronized void closeContainer() {
|
||||
// TODO: closed or closing here
|
||||
setState(ContainerLifeCycleState.CLOSED);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bytes read from the container.
|
||||
* @return the number of bytes read from the container.
|
||||
*/
|
||||
public long getReadBytes() {
|
||||
return readBytes.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the number of bytes read from the container.
|
||||
* @param bytes number of bytes read.
|
||||
*/
|
||||
public void incrReadBytes(long bytes) {
|
||||
this.readBytes.addAndGet(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of times the container is read.
|
||||
* @return the number of times the container is read.
|
||||
*/
|
||||
public long getReadCount() {
|
||||
return readCount.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the number of container read count by 1.
|
||||
*/
|
||||
public void incrReadCount() {
|
||||
this.readCount.incrementAndGet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bytes write into the container.
|
||||
* @return the number of bytes write into the container.
|
||||
*/
|
||||
public long getWriteBytes() {
|
||||
return writeBytes.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the number of bytes write into the container.
|
||||
* @param bytes the number of bytes write into the container.
|
||||
*/
|
||||
public void incrWriteBytes(long bytes) {
|
||||
this.writeBytes.addAndGet(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of writes into the container.
|
||||
* @return the number of writes into the container.
|
||||
*/
|
||||
public long getWriteCount() {
|
||||
return writeCount.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Increase the number of writes into the container by 1.
|
||||
*/
|
||||
public void incrWriteCount() {
|
||||
this.writeCount.incrementAndGet();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.impl;
|
||||
|
||||
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||
|
||||
|
||||
import org.apache.hadoop.ozone.container.common.interfaces.Container;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
|
||||
/**
|
||||
* Class to perform KeyValue Container operations.
|
||||
*/
|
||||
public class KeyValueContainer implements Container {
|
||||
|
||||
static final Logger LOG =
|
||||
LoggerFactory.getLogger(Container.class);
|
||||
|
||||
private KeyValueContainerData containerData;
|
||||
|
||||
public KeyValueContainer(KeyValueContainerData containerData) {
|
||||
this.containerData = containerData;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void create(ContainerData cData) throws StorageContainerException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(boolean forceDelete)
|
||||
throws StorageContainerException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(boolean forceUpdate)
|
||||
throws StorageContainerException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContainerData getContainerData() throws StorageContainerException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws StorageContainerException,
|
||||
NoSuchAlgorithmException {
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,159 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.impl;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This class represents the KeyValueContainer metadata, which is the
|
||||
* in-memory representation of container metadata and is represented on disk
|
||||
* by the .container file.
|
||||
*/
|
||||
public class KeyValueContainerData extends ContainerData {
|
||||
|
||||
// Path to Level DB/RocksDB Store.
|
||||
private String dbPath;
|
||||
|
||||
// Path to Physical file system where container and checksum are stored.
|
||||
private String containerFilePath;
|
||||
|
||||
//Type of DB used to store key to chunks mapping
|
||||
private String containerDBType;
|
||||
|
||||
//Number of pending deletion blocks in container.
|
||||
private int numPendingDeletionBlocks;
|
||||
|
||||
/**
|
||||
* Constructs KeyValueContainerData object.
|
||||
* @param type - containerType
|
||||
* @param id - ContainerId
|
||||
*/
|
||||
public KeyValueContainerData(ContainerProtos.ContainerType type, long id) {
|
||||
super(type, id);
|
||||
this.numPendingDeletionBlocks = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns path.
|
||||
*
|
||||
* @return - path
|
||||
*/
|
||||
public String getDBPath() {
|
||||
return dbPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets path.
|
||||
*
|
||||
* @param path - String.
|
||||
*/
|
||||
public void setDBPath(String path) {
|
||||
this.dbPath = path;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get container file path.
|
||||
* @return - Physical path where container file and checksum is stored.
|
||||
*/
|
||||
public String getContainerPath() {
|
||||
return containerFilePath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set container Path.
|
||||
* @param containerPath - File path.
|
||||
*/
|
||||
public void setContainerPath(String containerPath) {
|
||||
this.containerFilePath = containerPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the DBType used for the container.
|
||||
* @return containerDBType
|
||||
*/
|
||||
public String getContainerDBType() {
|
||||
return containerDBType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the DBType used for the container.
|
||||
* @param containerDBType
|
||||
*/
|
||||
public void setContainerDBType(String containerDBType) {
|
||||
this.containerDBType = containerDBType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of pending deletion blocks in container.
|
||||
* @return numPendingDeletionBlocks
|
||||
*/
|
||||
public int getNumPendingDeletionBlocks() {
|
||||
return numPendingDeletionBlocks;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Increase the count of pending deletion blocks.
|
||||
*
|
||||
* @param numBlocks increment number
|
||||
*/
|
||||
public void incrPendingDeletionBlocks(int numBlocks) {
|
||||
this.numPendingDeletionBlocks += numBlocks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrease the count of pending deletion blocks.
|
||||
*
|
||||
* @param numBlocks decrement number
|
||||
*/
|
||||
public void decrPendingDeletionBlocks(int numBlocks) {
|
||||
this.numPendingDeletionBlocks -= numBlocks;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Constructs a KeyValueContainerData object from ProtoBuf classes.
|
||||
*
|
||||
* @param protoData - ProtoBuf Message
|
||||
* @throws IOException
|
||||
*/
|
||||
public static KeyValueContainerData getFromProtoBuf(
|
||||
ContainerProtos.CreateContainerData protoData) throws IOException {
|
||||
|
||||
long containerID;
|
||||
ContainerProtos.ContainerType containerType;
|
||||
|
||||
containerID = protoData.getContainerId();
|
||||
containerType = protoData.getContainerType();
|
||||
|
||||
KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
|
||||
containerType, containerID);
|
||||
|
||||
for (int x = 0; x < protoData.getMetadataCount(); x++) {
|
||||
keyValueContainerData.addMetadata(protoData.getMetadata(x).getKey(),
|
||||
protoData.getMetadata(x).getValue());
|
||||
}
|
||||
|
||||
return keyValueContainerData;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common.interfaces;
|
||||
|
||||
|
||||
import org.apache.hadoop.hdds.scm.container.common.helpers.
|
||||
StorageContainerException;
|
||||
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
|
||||
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
|
||||
/**
|
||||
* Interface for Container Operations.
|
||||
*/
|
||||
public interface Container {
|
||||
|
||||
/**
|
||||
* Creates a container.
|
||||
*
|
||||
* @throws StorageContainerException
|
||||
*/
|
||||
void create(ContainerData containerData) throws StorageContainerException;
|
||||
|
||||
/**
|
||||
* Deletes the container.
|
||||
*
|
||||
* @param forceDelete - whether this container should be deleted forcibly.
|
||||
* @throws StorageContainerException
|
||||
*/
|
||||
void delete(boolean forceDelete) throws StorageContainerException;
|
||||
|
||||
/**
|
||||
* Update the container.
|
||||
*
|
||||
* @param forceUpdate if true, update container forcibly.
|
||||
* @throws StorageContainerException
|
||||
*/
|
||||
void update(boolean forceUpdate)
|
||||
throws StorageContainerException;
|
||||
|
||||
/**
|
||||
* Get metadata about the container.
|
||||
*
|
||||
* @return ContainerData - Container Data.
|
||||
* @throws StorageContainerException
|
||||
*/
|
||||
ContainerData getContainerData() throws StorageContainerException;
|
||||
|
||||
/**
|
||||
* Closes a open container, if it is already closed or does not exist a
|
||||
* StorageContainerException is thrown.
|
||||
*
|
||||
* @throws StorageContainerException
|
||||
*/
|
||||
void close() throws StorageContainerException,
|
||||
NoSuchAlgorithmException;
|
||||
|
||||
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common;
|
||||
|
||||
import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This class tests ChunkLayOutVersion.
|
||||
*/
|
||||
public class TestChunkLayOutVersion {
|
||||
|
||||
@Test
|
||||
public void testChunkLayOutVersion() {
|
||||
|
||||
// Check Latest Version and description
|
||||
Assert.assertEquals(1, ChunkLayOutVersion.getLatestVersion().getVersion());
|
||||
Assert.assertEquals("Data without checksums.", ChunkLayOutVersion
|
||||
.getLatestVersion().getDescription());
|
||||
|
||||
Assert.assertEquals(1, ChunkLayOutVersion.getAllVersions().length);
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.ozone.container.common;
|
||||
|
||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||
import org.apache.hadoop.ozone.container.common.impl.KeyValueContainerData;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
* This class is used to test the KeyValueContainerData.
|
||||
*/
|
||||
public class TestKeyValueContainerData {
|
||||
|
||||
@Test
|
||||
public void testGetFromProtoBuf() throws IOException {
|
||||
|
||||
long containerId = 1L;
|
||||
ContainerProtos.ContainerType containerType = ContainerProtos
|
||||
.ContainerType.KeyValueContainer;
|
||||
String path = "/tmp";
|
||||
String containerDBType = "RocksDB";
|
||||
int layOutVersion = 1;
|
||||
ContainerProtos.ContainerLifeCycleState state = ContainerProtos
|
||||
.ContainerLifeCycleState.OPEN;
|
||||
|
||||
ContainerProtos.KeyValue.Builder keyValBuilder =
|
||||
ContainerProtos.KeyValue.newBuilder();
|
||||
ContainerProtos.CreateContainerData containerData = ContainerProtos
|
||||
.CreateContainerData.newBuilder()
|
||||
.setContainerType(containerType)
|
||||
.setContainerId(containerId)
|
||||
.addMetadata(0, keyValBuilder.setKey("VOLUME").setValue("ozone")
|
||||
.build())
|
||||
.addMetadata(1, keyValBuilder.setKey("OWNER").setValue("hdfs")
|
||||
.build()).build();
|
||||
|
||||
KeyValueContainerData kvData = KeyValueContainerData.getFromProtoBuf(
|
||||
containerData);
|
||||
|
||||
assertEquals(containerType, kvData.getContainerType());
|
||||
assertEquals(containerId, kvData.getContainerId());
|
||||
assertEquals(layOutVersion, kvData.getLayOutVersion().getVersion());
|
||||
assertEquals(state, kvData.getState());
|
||||
assertEquals(2, kvData.getMetadata().size());
|
||||
assertEquals("ozone", kvData.getMetadata().get("VOLUME"));
|
||||
assertEquals("hdfs", kvData.getMetadata().get("OWNER"));
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeyValueData() {
|
||||
long containerId = 1L;
|
||||
ContainerProtos.ContainerType containerType = ContainerProtos
|
||||
.ContainerType.KeyValueContainer;
|
||||
String path = "/tmp";
|
||||
String containerDBType = "RocksDB";
|
||||
int layOutVersion = 1;
|
||||
ContainerProtos.ContainerLifeCycleState state = ContainerProtos
|
||||
.ContainerLifeCycleState.CLOSED;
|
||||
AtomicLong val = new AtomicLong(0);
|
||||
AtomicLong updatedVal = new AtomicLong(100);
|
||||
|
||||
KeyValueContainerData kvData = new KeyValueContainerData(containerType,
|
||||
containerId);
|
||||
|
||||
assertEquals(containerType, kvData.getContainerType());
|
||||
assertEquals(containerId, kvData.getContainerId());
|
||||
assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
|
||||
.getState());
|
||||
assertEquals(0, kvData.getMetadata().size());
|
||||
assertEquals(0, kvData.getNumPendingDeletionBlocks());
|
||||
assertEquals(val.get(), kvData.getReadBytes());
|
||||
assertEquals(val.get(), kvData.getWriteBytes());
|
||||
assertEquals(val.get(), kvData.getReadCount());
|
||||
assertEquals(val.get(), kvData.getWriteCount());
|
||||
|
||||
kvData.setState(state);
|
||||
kvData.setContainerDBType(containerDBType);
|
||||
kvData.setContainerPath(path);
|
||||
kvData.setDBPath(path);
|
||||
kvData.incrReadBytes(10);
|
||||
kvData.incrWriteBytes(10);
|
||||
kvData.incrReadCount();
|
||||
kvData.incrWriteCount();
|
||||
|
||||
assertEquals(state, kvData.getState());
|
||||
assertEquals(containerDBType, kvData.getContainerDBType());
|
||||
assertEquals(path, kvData.getContainerPath());
|
||||
assertEquals(path, kvData.getDBPath());
|
||||
|
||||
assertEquals(10, kvData.getReadBytes());
|
||||
assertEquals(10, kvData.getWriteBytes());
|
||||
assertEquals(1, kvData.getReadCount());
|
||||
assertEquals(1, kvData.getWriteCount());
|
||||
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user